klaude-code 2.10.2__py3-none-any.whl → 2.10.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/auth/AGENTS.md +4 -24
- klaude_code/auth/__init__.py +1 -17
- klaude_code/cli/auth_cmd.py +3 -53
- klaude_code/cli/list_model.py +0 -50
- klaude_code/config/assets/builtin_config.yaml +7 -35
- klaude_code/config/config.py +5 -42
- klaude_code/const.py +5 -2
- klaude_code/core/agent_profile.py +2 -10
- klaude_code/core/backtrack/__init__.py +3 -0
- klaude_code/core/backtrack/manager.py +48 -0
- klaude_code/core/memory.py +25 -9
- klaude_code/core/task.py +53 -7
- klaude_code/core/tool/__init__.py +2 -0
- klaude_code/core/tool/backtrack/__init__.py +3 -0
- klaude_code/core/tool/backtrack/backtrack_tool.md +17 -0
- klaude_code/core/tool/backtrack/backtrack_tool.py +65 -0
- klaude_code/core/tool/context.py +5 -0
- klaude_code/core/turn.py +3 -0
- klaude_code/llm/anthropic/input.py +28 -4
- klaude_code/llm/input_common.py +70 -1
- klaude_code/llm/openai_compatible/input.py +5 -2
- klaude_code/llm/openrouter/input.py +5 -2
- klaude_code/llm/registry.py +0 -1
- klaude_code/protocol/events.py +10 -0
- klaude_code/protocol/llm_param.py +0 -1
- klaude_code/protocol/message.py +10 -1
- klaude_code/protocol/tools.py +1 -0
- klaude_code/session/session.py +111 -2
- klaude_code/session/store.py +2 -0
- klaude_code/skill/assets/executing-plans/SKILL.md +84 -0
- klaude_code/skill/assets/writing-plans/SKILL.md +116 -0
- klaude_code/tui/commands.py +15 -0
- klaude_code/tui/components/developer.py +1 -1
- klaude_code/tui/components/errors.py +2 -4
- klaude_code/tui/components/metadata.py +5 -10
- klaude_code/tui/components/rich/markdown.py +5 -1
- klaude_code/tui/components/rich/status.py +7 -76
- klaude_code/tui/components/rich/theme.py +12 -2
- klaude_code/tui/components/tools.py +31 -18
- klaude_code/tui/components/user_input.py +1 -1
- klaude_code/tui/display.py +4 -0
- klaude_code/tui/input/completers.py +51 -17
- klaude_code/tui/input/images.py +127 -0
- klaude_code/tui/input/prompt_toolkit.py +16 -2
- klaude_code/tui/machine.py +26 -8
- klaude_code/tui/renderer.py +97 -0
- klaude_code/tui/runner.py +7 -2
- klaude_code/tui/terminal/image.py +28 -12
- klaude_code/ui/terminal/title.py +8 -3
- {klaude_code-2.10.2.dist-info → klaude_code-2.10.4.dist-info}/METADATA +1 -1
- {klaude_code-2.10.2.dist-info → klaude_code-2.10.4.dist-info}/RECORD +53 -56
- klaude_code/auth/antigravity/__init__.py +0 -20
- klaude_code/auth/antigravity/exceptions.py +0 -17
- klaude_code/auth/antigravity/oauth.py +0 -315
- klaude_code/auth/antigravity/pkce.py +0 -25
- klaude_code/auth/antigravity/token_manager.py +0 -27
- klaude_code/core/prompts/prompt-antigravity.md +0 -80
- klaude_code/llm/antigravity/__init__.py +0 -3
- klaude_code/llm/antigravity/client.py +0 -558
- klaude_code/llm/antigravity/input.py +0 -268
- klaude_code/skill/assets/create-plan/SKILL.md +0 -74
- {klaude_code-2.10.2.dist-info → klaude_code-2.10.4.dist-info}/WHEEL +0 -0
- {klaude_code-2.10.2.dist-info → klaude_code-2.10.4.dist-info}/entry_points.txt +0 -0
klaude_code/session/session.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
|
+
import re
|
|
4
5
|
import time
|
|
5
6
|
import uuid
|
|
6
7
|
from collections.abc import Iterable, Sequence
|
|
@@ -15,6 +16,15 @@ from klaude_code.session.store import JsonlSessionStore, build_meta_snapshot
|
|
|
15
16
|
|
|
16
17
|
_DEFAULT_STORES: dict[str, JsonlSessionStore] = {}
|
|
17
18
|
|
|
19
|
+
_CHECKPOINT_RE = re.compile(r"<system>Checkpoint (\d+)</system>")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _extract_checkpoint_id(text: str) -> int | None:
|
|
23
|
+
match = _CHECKPOINT_RE.search(text)
|
|
24
|
+
if match is None:
|
|
25
|
+
return None
|
|
26
|
+
return int(match.group(1))
|
|
27
|
+
|
|
18
28
|
|
|
19
29
|
def _read_json_dict(path: Path) -> dict[str, Any] | None:
|
|
20
30
|
try:
|
|
@@ -51,6 +61,8 @@ class Session(BaseModel):
|
|
|
51
61
|
todos: list[model.TodoItem] = Field(default_factory=list) # pyright: ignore[reportUnknownVariableType]
|
|
52
62
|
model_name: str | None = None
|
|
53
63
|
|
|
64
|
+
next_checkpoint_id: int = 0
|
|
65
|
+
|
|
54
66
|
model_config_name: str | None = None
|
|
55
67
|
model_thinking: llm_param.Thinking | None = None
|
|
56
68
|
created_at: float = Field(default_factory=lambda: time.time())
|
|
@@ -153,6 +165,8 @@ class Session(BaseModel):
|
|
|
153
165
|
model_name = raw.get("model_name") if isinstance(raw.get("model_name"), str) else None
|
|
154
166
|
model_config_name = raw.get("model_config_name") if isinstance(raw.get("model_config_name"), str) else None
|
|
155
167
|
|
|
168
|
+
next_checkpoint_id = int(raw.get("next_checkpoint_id", 0))
|
|
169
|
+
|
|
156
170
|
model_thinking_raw = raw.get("model_thinking")
|
|
157
171
|
model_thinking = (
|
|
158
172
|
llm_param.Thinking.model_validate(model_thinking_raw) if isinstance(model_thinking_raw, dict) else None
|
|
@@ -169,6 +183,7 @@ class Session(BaseModel):
|
|
|
169
183
|
model_name=model_name,
|
|
170
184
|
model_config_name=model_config_name,
|
|
171
185
|
model_thinking=model_thinking,
|
|
186
|
+
next_checkpoint_id=next_checkpoint_id,
|
|
172
187
|
)
|
|
173
188
|
session._store = store
|
|
174
189
|
return session
|
|
@@ -221,19 +236,103 @@ class Session(BaseModel):
|
|
|
221
236
|
model_name=self.model_name,
|
|
222
237
|
model_config_name=self.model_config_name,
|
|
223
238
|
model_thinking=self.model_thinking,
|
|
239
|
+
next_checkpoint_id=self.next_checkpoint_id,
|
|
224
240
|
)
|
|
225
241
|
self._store.append_and_flush(session_id=self.id, items=items, meta=meta)
|
|
226
242
|
|
|
243
|
+
@property
|
|
244
|
+
def n_checkpoints(self) -> int:
|
|
245
|
+
return self.next_checkpoint_id
|
|
246
|
+
|
|
247
|
+
def create_checkpoint(self) -> int:
|
|
248
|
+
checkpoint_id = self.next_checkpoint_id
|
|
249
|
+
self.next_checkpoint_id += 1
|
|
250
|
+
checkpoint_msg = message.DeveloperMessage(
|
|
251
|
+
parts=[message.TextPart(text=f"<system>Checkpoint {checkpoint_id}</system>")]
|
|
252
|
+
)
|
|
253
|
+
self.append_history([checkpoint_msg])
|
|
254
|
+
return checkpoint_id
|
|
255
|
+
|
|
256
|
+
def find_checkpoint_index(self, checkpoint_id: int) -> int | None:
|
|
257
|
+
target_text = f"<system>Checkpoint {checkpoint_id}</system>"
|
|
258
|
+
for i, item in enumerate(self.conversation_history):
|
|
259
|
+
if not isinstance(item, message.DeveloperMessage):
|
|
260
|
+
continue
|
|
261
|
+
text = message.join_text_parts(item.parts)
|
|
262
|
+
if target_text in text:
|
|
263
|
+
return i
|
|
264
|
+
return None
|
|
265
|
+
|
|
266
|
+
def get_user_message_before_checkpoint(self, checkpoint_id: int) -> str | None:
|
|
267
|
+
checkpoint_idx = self.find_checkpoint_index(checkpoint_id)
|
|
268
|
+
if checkpoint_idx is None:
|
|
269
|
+
return None
|
|
270
|
+
|
|
271
|
+
for i in range(checkpoint_idx - 1, -1, -1):
|
|
272
|
+
item = self.conversation_history[i]
|
|
273
|
+
if isinstance(item, message.UserMessage):
|
|
274
|
+
return message.join_text_parts(item.parts)
|
|
275
|
+
return None
|
|
276
|
+
|
|
277
|
+
def get_checkpoint_user_messages(self) -> dict[int, str]:
|
|
278
|
+
checkpoints: dict[int, str] = {}
|
|
279
|
+
last_user_message = ""
|
|
280
|
+
for item in self.conversation_history:
|
|
281
|
+
if isinstance(item, message.UserMessage):
|
|
282
|
+
last_user_message = message.join_text_parts(item.parts)
|
|
283
|
+
continue
|
|
284
|
+
if not isinstance(item, message.DeveloperMessage):
|
|
285
|
+
continue
|
|
286
|
+
text = message.join_text_parts(item.parts)
|
|
287
|
+
checkpoint_id = _extract_checkpoint_id(text)
|
|
288
|
+
if checkpoint_id is None:
|
|
289
|
+
continue
|
|
290
|
+
checkpoints[checkpoint_id] = last_user_message
|
|
291
|
+
return checkpoints
|
|
292
|
+
|
|
293
|
+
def revert_to_checkpoint(self, checkpoint_id: int, note: str, rationale: str) -> message.BacktrackEntry:
|
|
294
|
+
target_idx = self.find_checkpoint_index(checkpoint_id)
|
|
295
|
+
if target_idx is None:
|
|
296
|
+
raise ValueError(f"Checkpoint {checkpoint_id} not found")
|
|
297
|
+
|
|
298
|
+
user_message = self.get_user_message_before_checkpoint(checkpoint_id) or ""
|
|
299
|
+
reverted_from = len(self.conversation_history)
|
|
300
|
+
entry = message.BacktrackEntry(
|
|
301
|
+
checkpoint_id=checkpoint_id,
|
|
302
|
+
note=note,
|
|
303
|
+
rationale=rationale,
|
|
304
|
+
reverted_from_index=reverted_from,
|
|
305
|
+
original_user_message=user_message,
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
self.conversation_history = self.conversation_history[: target_idx + 1]
|
|
309
|
+
self.next_checkpoint_id = checkpoint_id + 1
|
|
310
|
+
self._invalidate_messages_count_cache()
|
|
311
|
+
self._user_messages_cache = None
|
|
312
|
+
return entry
|
|
313
|
+
|
|
227
314
|
def get_llm_history(self) -> list[message.HistoryEvent]:
|
|
228
315
|
"""Return the LLM-facing history view with compaction summary injected."""
|
|
229
316
|
history = self.conversation_history
|
|
317
|
+
|
|
318
|
+
def _convert(item: message.HistoryEvent) -> message.HistoryEvent:
|
|
319
|
+
if isinstance(item, message.BacktrackEntry):
|
|
320
|
+
return message.DeveloperMessage(
|
|
321
|
+
parts=[
|
|
322
|
+
message.TextPart(
|
|
323
|
+
text=f"<system>After this, some operations were performed and context was refined via Backtrack. Rationale: {item.rationale}. Summary: {item.note}. Please continue.</system>"
|
|
324
|
+
)
|
|
325
|
+
]
|
|
326
|
+
)
|
|
327
|
+
return item
|
|
328
|
+
|
|
230
329
|
last_compaction: message.CompactionEntry | None = None
|
|
231
330
|
for item in reversed(history):
|
|
232
331
|
if isinstance(item, message.CompactionEntry):
|
|
233
332
|
last_compaction = item
|
|
234
333
|
break
|
|
235
334
|
if last_compaction is None:
|
|
236
|
-
return [it for it in history if not isinstance(it, message.CompactionEntry)]
|
|
335
|
+
return [_convert(it) for it in history if not isinstance(it, message.CompactionEntry)]
|
|
237
336
|
|
|
238
337
|
summary_message = message.UserMessage(parts=[message.TextPart(text=last_compaction.summary)])
|
|
239
338
|
kept = [it for it in history[last_compaction.first_kept_index :] if not isinstance(it, message.CompactionEntry)]
|
|
@@ -246,7 +345,7 @@ class Session(BaseModel):
|
|
|
246
345
|
first_non_tool += 1
|
|
247
346
|
kept = kept[first_non_tool:]
|
|
248
347
|
|
|
249
|
-
return [summary_message, *kept]
|
|
348
|
+
return [summary_message, *[_convert(it) for it in kept]]
|
|
250
349
|
|
|
251
350
|
def fork(self, *, new_id: str | None = None, until_index: int | None = None) -> Session:
|
|
252
351
|
"""Create a new session as a fork of the current session.
|
|
@@ -266,6 +365,7 @@ class Session(BaseModel):
|
|
|
266
365
|
forked.model_name = self.model_name
|
|
267
366
|
forked.model_config_name = self.model_config_name
|
|
268
367
|
forked.model_thinking = self.model_thinking.model_copy(deep=True) if self.model_thinking is not None else None
|
|
368
|
+
forked.next_checkpoint_id = self.next_checkpoint_id
|
|
269
369
|
forked.file_tracker = {k: v.model_copy(deep=True) for k, v in self.file_tracker.items()}
|
|
270
370
|
forked.todos = [todo.model_copy(deep=True) for todo in self.todos]
|
|
271
371
|
|
|
@@ -437,6 +537,15 @@ class Session(BaseModel):
|
|
|
437
537
|
yield events.DeveloperMessageEvent(session_id=self.id, item=dm)
|
|
438
538
|
case message.StreamErrorItem() as se:
|
|
439
539
|
yield events.ErrorEvent(error_message=se.error, can_retry=False, session_id=self.id)
|
|
540
|
+
case message.BacktrackEntry() as be:
|
|
541
|
+
yield events.BacktrackEvent(
|
|
542
|
+
session_id=self.id,
|
|
543
|
+
checkpoint_id=be.checkpoint_id,
|
|
544
|
+
note=be.note,
|
|
545
|
+
rationale=be.rationale,
|
|
546
|
+
original_user_message=be.original_user_message,
|
|
547
|
+
messages_discarded=None,
|
|
548
|
+
)
|
|
440
549
|
case message.CompactionEntry() as ce:
|
|
441
550
|
yield events.CompactionStartEvent(session_id=self.id, reason="threshold")
|
|
442
551
|
yield events.CompactionEndEvent(
|
klaude_code/session/store.py
CHANGED
|
@@ -169,6 +169,7 @@ def build_meta_snapshot(
|
|
|
169
169
|
model_name: str | None,
|
|
170
170
|
model_config_name: str | None,
|
|
171
171
|
model_thinking: llm_param.Thinking | None,
|
|
172
|
+
next_checkpoint_id: int = 0,
|
|
172
173
|
) -> dict[str, Any]:
|
|
173
174
|
return {
|
|
174
175
|
"id": session_id,
|
|
@@ -186,4 +187,5 @@ def build_meta_snapshot(
|
|
|
186
187
|
"model_thinking": model_thinking.model_dump(mode="json", exclude_defaults=True, exclude_none=True)
|
|
187
188
|
if model_thinking
|
|
188
189
|
else None,
|
|
190
|
+
"next_checkpoint_id": next_checkpoint_id,
|
|
189
191
|
}
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: executing-plans
|
|
3
|
+
description: Use when you have a written implementation plan to execute in a separate session with review checkpoints
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Executing Plans
|
|
7
|
+
|
|
8
|
+
## Overview
|
|
9
|
+
|
|
10
|
+
Load plan, review critically, execute tasks in batches, report for review between batches.
|
|
11
|
+
|
|
12
|
+
**Core principle:** Batch execution with checkpoints for architect review.
|
|
13
|
+
|
|
14
|
+
**Announce at start:** "I'm using the executing-plans skill to implement this plan."
|
|
15
|
+
|
|
16
|
+
## The Process
|
|
17
|
+
|
|
18
|
+
### Step 1: Load and Review Plan
|
|
19
|
+
1. Read plan file
|
|
20
|
+
2. Review critically - identify any questions or concerns about the plan
|
|
21
|
+
3. If concerns: Raise them with your human partner before starting
|
|
22
|
+
4. If no concerns: Create TodoWrite and proceed
|
|
23
|
+
|
|
24
|
+
### Step 2: Execute Batch
|
|
25
|
+
**Default: First 3 tasks**
|
|
26
|
+
|
|
27
|
+
For each task:
|
|
28
|
+
1. Mark as in_progress
|
|
29
|
+
2. Follow each step exactly (plan has bite-sized steps)
|
|
30
|
+
3. Run verifications as specified
|
|
31
|
+
4. Mark as completed
|
|
32
|
+
|
|
33
|
+
### Step 3: Report
|
|
34
|
+
When batch complete:
|
|
35
|
+
- Show what was implemented
|
|
36
|
+
- Show verification output
|
|
37
|
+
- Say: "Ready for feedback."
|
|
38
|
+
|
|
39
|
+
### Step 4: Continue
|
|
40
|
+
Based on feedback:
|
|
41
|
+
- Apply changes if needed
|
|
42
|
+
- Execute next batch
|
|
43
|
+
- Repeat until complete
|
|
44
|
+
|
|
45
|
+
### Step 5: Complete Development
|
|
46
|
+
|
|
47
|
+
After all tasks complete and verified:
|
|
48
|
+
- Announce: "I'm using the finishing-a-development-branch skill to complete this work."
|
|
49
|
+
- **REQUIRED SUB-SKILL:** Use superpowers:finishing-a-development-branch
|
|
50
|
+
- Follow that skill to verify tests, present options, execute choice
|
|
51
|
+
|
|
52
|
+
## When to Stop and Ask for Help
|
|
53
|
+
|
|
54
|
+
**STOP executing immediately when:**
|
|
55
|
+
- Hit a blocker mid-batch (missing dependency, test fails, instruction unclear)
|
|
56
|
+
- Plan has critical gaps preventing starting
|
|
57
|
+
- You don't understand an instruction
|
|
58
|
+
- Verification fails repeatedly
|
|
59
|
+
|
|
60
|
+
**Ask for clarification rather than guessing.**
|
|
61
|
+
|
|
62
|
+
## When to Revisit Earlier Steps
|
|
63
|
+
|
|
64
|
+
**Return to Review (Step 1) when:**
|
|
65
|
+
- Partner updates the plan based on your feedback
|
|
66
|
+
- Fundamental approach needs rethinking
|
|
67
|
+
|
|
68
|
+
**Don't force through blockers** - stop and ask.
|
|
69
|
+
|
|
70
|
+
## Remember
|
|
71
|
+
- Review plan critically first
|
|
72
|
+
- Follow plan steps exactly
|
|
73
|
+
- Don't skip verifications
|
|
74
|
+
- Reference skills when plan says to
|
|
75
|
+
- Between batches: just report and wait
|
|
76
|
+
- Stop when blocked, don't guess
|
|
77
|
+
- Never start implementation on main/master branch without explicit user consent
|
|
78
|
+
|
|
79
|
+
## Integration
|
|
80
|
+
|
|
81
|
+
**Required workflow skills:**
|
|
82
|
+
- **superpowers:using-git-worktrees** - REQUIRED: Set up isolated workspace before starting
|
|
83
|
+
- **superpowers:writing-plans** - Creates the plan this skill executes
|
|
84
|
+
- **superpowers:finishing-a-development-branch** - Complete development after all tasks
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: writing-plans
|
|
3
|
+
description: Use when you have a spec or requirements for a multi-step task, before touching code
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Writing Plans
|
|
7
|
+
|
|
8
|
+
## Overview
|
|
9
|
+
|
|
10
|
+
Write comprehensive implementation plans assuming the engineer has zero context for our codebase and questionable taste. Document everything they need to know: which files to touch for each task, code, testing, docs they might need to check, how to test it. Give them the whole plan as bite-sized tasks. DRY. YAGNI. TDD. Frequent commits.
|
|
11
|
+
|
|
12
|
+
Assume they are a skilled developer, but know almost nothing about our toolset or problem domain. Assume they don't know good test design very well.
|
|
13
|
+
|
|
14
|
+
**Announce at start:** "I'm using the writing-plans skill to create the implementation plan."
|
|
15
|
+
|
|
16
|
+
**Context:** This should be run in a dedicated worktree (created by brainstorming skill).
|
|
17
|
+
|
|
18
|
+
**Save plans to:** `docs/plans/YYYY-MM-DD-<feature-name>.md`
|
|
19
|
+
|
|
20
|
+
## Bite-Sized Task Granularity
|
|
21
|
+
|
|
22
|
+
**Each step is one action (2-5 minutes):**
|
|
23
|
+
- "Write the failing test" - step
|
|
24
|
+
- "Run it to make sure it fails" - step
|
|
25
|
+
- "Implement the minimal code to make the test pass" - step
|
|
26
|
+
- "Run the tests and make sure they pass" - step
|
|
27
|
+
- "Commit" - step
|
|
28
|
+
|
|
29
|
+
## Plan Document Header
|
|
30
|
+
|
|
31
|
+
**Every plan MUST start with this header:**
|
|
32
|
+
|
|
33
|
+
```markdown
|
|
34
|
+
# [Feature Name] Implementation Plan
|
|
35
|
+
|
|
36
|
+
> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task.
|
|
37
|
+
|
|
38
|
+
**Goal:** [One sentence describing what this builds]
|
|
39
|
+
|
|
40
|
+
**Architecture:** [2-3 sentences about approach]
|
|
41
|
+
|
|
42
|
+
**Tech Stack:** [Key technologies/libraries]
|
|
43
|
+
|
|
44
|
+
---
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Task Structure
|
|
48
|
+
|
|
49
|
+
```markdown
|
|
50
|
+
### Task N: [Component Name]
|
|
51
|
+
|
|
52
|
+
**Files:**
|
|
53
|
+
- Create: `exact/path/to/file.py`
|
|
54
|
+
- Modify: `exact/path/to/existing.py:123-145`
|
|
55
|
+
- Test: `tests/exact/path/to/test.py`
|
|
56
|
+
|
|
57
|
+
**Step 1: Write the failing test**
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
def test_specific_behavior():
|
|
61
|
+
result = function(input)
|
|
62
|
+
assert result == expected
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
**Step 2: Run test to verify it fails**
|
|
66
|
+
|
|
67
|
+
Run: `pytest tests/path/test.py::test_name -v`
|
|
68
|
+
Expected: FAIL with "function not defined"
|
|
69
|
+
|
|
70
|
+
**Step 3: Write minimal implementation**
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
def function(input):
|
|
74
|
+
return expected
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
**Step 4: Run test to verify it passes**
|
|
78
|
+
|
|
79
|
+
Run: `pytest tests/path/test.py::test_name -v`
|
|
80
|
+
Expected: PASS
|
|
81
|
+
|
|
82
|
+
**Step 5: Commit**
|
|
83
|
+
|
|
84
|
+
```bash
|
|
85
|
+
git add tests/path/test.py src/path/file.py
|
|
86
|
+
git commit -m "feat: add specific feature"
|
|
87
|
+
```
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
## Remember
|
|
91
|
+
- Exact file paths always
|
|
92
|
+
- Complete code in plan (not "add validation")
|
|
93
|
+
- Exact commands with expected output
|
|
94
|
+
- Reference relevant skills with @ syntax
|
|
95
|
+
- DRY, YAGNI, TDD, frequent commits
|
|
96
|
+
|
|
97
|
+
## Execution Handoff
|
|
98
|
+
|
|
99
|
+
After saving the plan, offer execution choice:
|
|
100
|
+
|
|
101
|
+
**"Plan complete and saved to `docs/plans/<filename>.md`. Two execution options:**
|
|
102
|
+
|
|
103
|
+
**1. Subagent-Driven (this session)** - I dispatch fresh subagent per task, review between tasks, fast iteration
|
|
104
|
+
|
|
105
|
+
**2. Parallel Session (separate)** - Open new session with executing-plans, batch execution with checkpoints
|
|
106
|
+
|
|
107
|
+
**Which approach?"**
|
|
108
|
+
|
|
109
|
+
**If Subagent-Driven chosen:**
|
|
110
|
+
- **REQUIRED SUB-SKILL:** Use superpowers:subagent-driven-development
|
|
111
|
+
- Stay in this session
|
|
112
|
+
- Fresh subagent per task + code review
|
|
113
|
+
|
|
114
|
+
**If Parallel Session chosen:**
|
|
115
|
+
- Guide them to open new session in worktree
|
|
116
|
+
- **REQUIRED SUB-SKILL:** New session uses superpowers:executing-plans
|
klaude_code/tui/commands.py
CHANGED
|
@@ -183,3 +183,18 @@ class TaskClockClear(RenderCommand):
|
|
|
183
183
|
class RenderCompactionSummary(RenderCommand):
|
|
184
184
|
summary: str
|
|
185
185
|
kept_items_brief: tuple[tuple[str, int, str], ...] = () # (item_type, count, preview)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
@dataclass(frozen=True, slots=True)
|
|
189
|
+
class RenderBacktrack(RenderCommand):
|
|
190
|
+
checkpoint_id: int
|
|
191
|
+
note: str
|
|
192
|
+
rationale: str
|
|
193
|
+
original_user_message: str
|
|
194
|
+
messages_discarded: int | None = None
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
@dataclass(frozen=True, slots=True)
|
|
198
|
+
class UpdateTerminalTitlePrefix(RenderCommand):
|
|
199
|
+
prefix: str | None
|
|
200
|
+
model_name: str | None
|
|
@@ -6,7 +6,7 @@ from klaude_code.tui.components.common import create_grid
|
|
|
6
6
|
from klaude_code.tui.components.rich.theme import ThemeKey
|
|
7
7
|
from klaude_code.tui.components.tools import render_path
|
|
8
8
|
|
|
9
|
-
REMINDER_BULLET = "
|
|
9
|
+
REMINDER_BULLET = "+"
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
def need_render_developer_message(e: events.DeveloperMessageEvent) -> bool:
|
|
@@ -9,8 +9,7 @@ def render_error(error_msg: Text) -> RenderableType:
|
|
|
9
9
|
"""Render error with X mark for error events."""
|
|
10
10
|
grid = create_grid()
|
|
11
11
|
error_msg.style = ThemeKey.ERROR
|
|
12
|
-
error_msg.overflow = "
|
|
13
|
-
error_msg.no_wrap = True
|
|
12
|
+
error_msg.overflow = "fold"
|
|
14
13
|
grid.add_row(Text("✘", style=ThemeKey.ERROR_BOLD), error_msg)
|
|
15
14
|
return grid
|
|
16
15
|
|
|
@@ -19,7 +18,6 @@ def render_tool_error(error_msg: Text) -> RenderableType:
|
|
|
19
18
|
"""Render error with indent for tool results."""
|
|
20
19
|
grid = create_grid()
|
|
21
20
|
error_msg.style = ThemeKey.ERROR
|
|
22
|
-
error_msg.overflow = "
|
|
23
|
-
error_msg.no_wrap = True
|
|
21
|
+
error_msg.overflow = "fold"
|
|
24
22
|
grid.add_row(Text(" "), error_msg)
|
|
25
23
|
return grid
|
|
@@ -32,9 +32,6 @@ def _render_task_metadata_block(
|
|
|
32
32
|
|
|
33
33
|
# Second column: provider/model description / tokens / cost / …
|
|
34
34
|
content = Text()
|
|
35
|
-
if metadata.provider is not None:
|
|
36
|
-
content.append_text(Text(metadata.provider.lower().replace(" ", "-"), style=ThemeKey.METADATA))
|
|
37
|
-
content.append_text(Text("/", style=ThemeKey.METADATA))
|
|
38
35
|
content.append_text(Text(metadata.model_name, style=ThemeKey.METADATA))
|
|
39
36
|
if metadata.description:
|
|
40
37
|
content.append_text(Text(" ", style=ThemeKey.METADATA)).append_text(
|
|
@@ -129,7 +126,7 @@ def _render_task_metadata_block(
|
|
|
129
126
|
|
|
130
127
|
if parts:
|
|
131
128
|
content.append_text(Text(" ", style=ThemeKey.METADATA))
|
|
132
|
-
content.append_text(Text(" ", style=ThemeKey.
|
|
129
|
+
content.append_text(Text(" ", style=ThemeKey.METADATA_DIM).join(parts))
|
|
133
130
|
|
|
134
131
|
grid.add_row(mark, content)
|
|
135
132
|
return grid
|
|
@@ -140,15 +137,14 @@ def render_task_metadata(e: events.TaskMetadataEvent) -> RenderableType:
|
|
|
140
137
|
renderables: list[RenderableType] = []
|
|
141
138
|
|
|
142
139
|
has_sub_agents = len(e.metadata.sub_agent_task_metadata) > 0
|
|
143
|
-
|
|
144
|
-
main_mark_text = "●"
|
|
140
|
+
main_mark_text = "•"
|
|
145
141
|
main_mark = Text(main_mark_text, style=ThemeKey.METADATA)
|
|
146
142
|
|
|
147
143
|
renderables.append(_render_task_metadata_block(e.metadata.main_agent, mark=main_mark, show_context_and_time=True))
|
|
148
144
|
|
|
149
145
|
# Render each sub-agent metadata block
|
|
150
146
|
for meta in e.metadata.sub_agent_task_metadata:
|
|
151
|
-
sub_mark = Text("
|
|
147
|
+
sub_mark = Text(" •", style=ThemeKey.METADATA)
|
|
152
148
|
renderables.append(_render_task_metadata_block(meta, mark=sub_mark, show_context_and_time=True))
|
|
153
149
|
|
|
154
150
|
# Add total cost line when there are sub-agents
|
|
@@ -165,9 +161,8 @@ def render_task_metadata(e: events.TaskMetadataEvent) -> RenderableType:
|
|
|
165
161
|
|
|
166
162
|
currency_symbol = "¥" if currency == "CNY" else "$"
|
|
167
163
|
total_line = Text.assemble(
|
|
168
|
-
("
|
|
169
|
-
("
|
|
170
|
-
("total ", ThemeKey.METADATA),
|
|
164
|
+
(" •", ThemeKey.METADATA),
|
|
165
|
+
(" total ", ThemeKey.METADATA),
|
|
171
166
|
(currency_symbol, ThemeKey.METADATA),
|
|
172
167
|
(f"{total_cost:.4f}", ThemeKey.METADATA),
|
|
173
168
|
)
|
|
@@ -609,7 +609,11 @@ class MarkdownStream:
|
|
|
609
609
|
|
|
610
610
|
live_text_to_set: Text | None = None
|
|
611
611
|
if not final and MARKDOWN_STREAM_LIVE_REPAINT_ENABLED and self._live_sink is not None:
|
|
612
|
-
# Only update live area after we have rendered at least one stable block
|
|
612
|
+
# Only update the live area after we have rendered at least one stable block.
|
|
613
|
+
#
|
|
614
|
+
# This keeps the bottom "live" region anchored to stable scrollback, and
|
|
615
|
+
# avoids showing a live frame that would later need to be retroactively
|
|
616
|
+
# re-rendered once stable content exists.
|
|
613
617
|
if not self._stable_rendered_lines:
|
|
614
618
|
return
|
|
615
619
|
# When nothing is stable yet, we still want to show incremental output.
|
|
@@ -2,7 +2,6 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import contextlib
|
|
4
4
|
import math
|
|
5
|
-
import random
|
|
6
5
|
import time
|
|
7
6
|
from collections.abc import Callable
|
|
8
7
|
|
|
@@ -21,23 +20,14 @@ from klaude_code.const import (
|
|
|
21
20
|
STATUS_HINT_TEXT,
|
|
22
21
|
STATUS_SHIMMER_ALPHA_SCALE,
|
|
23
22
|
STATUS_SHIMMER_BAND_HALF_WIDTH,
|
|
23
|
+
STATUS_SHIMMER_ENABLED,
|
|
24
24
|
STATUS_SHIMMER_PADDING,
|
|
25
25
|
)
|
|
26
26
|
from klaude_code.tui.components.rich.theme import ThemeKey
|
|
27
|
-
from klaude_code.tui.terminal.color import get_last_terminal_background_rgb
|
|
28
27
|
|
|
29
28
|
# Use an existing Rich spinner name; BreathingSpinner overrides its rendering
|
|
30
29
|
BREATHING_SPINNER_NAME = "dots"
|
|
31
30
|
|
|
32
|
-
# Alternating glyphs for the breathing spinner - switches at each "transparent" point
|
|
33
|
-
_BREATHING_SPINNER_GLYPHS_BASE = [
|
|
34
|
-
"✦",
|
|
35
|
-
]
|
|
36
|
-
|
|
37
|
-
# Shuffle glyphs on module load for variety across sessions
|
|
38
|
-
BREATHING_SPINNER_GLYPHS = _BREATHING_SPINNER_GLYPHS_BASE.copy()
|
|
39
|
-
random.shuffle(BREATHING_SPINNER_GLYPHS)
|
|
40
|
-
|
|
41
31
|
|
|
42
32
|
_process_start: float | None = None
|
|
43
33
|
_task_start: float | None = None
|
|
@@ -158,6 +148,9 @@ def _shimmer_profile(main_text: str) -> list[tuple[str, float]]:
|
|
|
158
148
|
if not chars:
|
|
159
149
|
return []
|
|
160
150
|
|
|
151
|
+
if not STATUS_SHIMMER_ENABLED:
|
|
152
|
+
return [(ch, 0.0) for ch in chars]
|
|
153
|
+
|
|
161
154
|
padding = STATUS_SHIMMER_PADDING
|
|
162
155
|
char_count = len(chars)
|
|
163
156
|
period = char_count + padding * 2
|
|
@@ -211,58 +204,6 @@ def _shimmer_style(console: Console, base_style: Style, intensity: float) -> Sty
|
|
|
211
204
|
return base_style + Style(color=shimmer_color)
|
|
212
205
|
|
|
213
206
|
|
|
214
|
-
def _breathing_intensity() -> float:
|
|
215
|
-
"""Compute breathing intensity in [0, 1] for the spinner.
|
|
216
|
-
|
|
217
|
-
Intensity follows a smooth cosine curve over the configured period, starting
|
|
218
|
-
from 0 (fully blended into background), rising to 1 (full style color),
|
|
219
|
-
then returning to 0, giving a subtle "breathing" effect.
|
|
220
|
-
"""
|
|
221
|
-
|
|
222
|
-
period = max(SPINNER_BREATH_PERIOD_SECONDS, 0.1)
|
|
223
|
-
elapsed = _elapsed_since_start()
|
|
224
|
-
phase = (elapsed % period) / period
|
|
225
|
-
return 0.5 * (1.0 - math.cos(2.0 * math.pi * phase))
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
def _breathing_glyph() -> str:
|
|
229
|
-
"""Get the current glyph for the breathing spinner.
|
|
230
|
-
|
|
231
|
-
Alternates between glyphs at each breath cycle (when intensity reaches 0).
|
|
232
|
-
"""
|
|
233
|
-
period = max(SPINNER_BREATH_PERIOD_SECONDS, 0.1)
|
|
234
|
-
elapsed = _elapsed_since_start()
|
|
235
|
-
cycle = int(elapsed / period)
|
|
236
|
-
return BREATHING_SPINNER_GLYPHS[cycle % len(BREATHING_SPINNER_GLYPHS)]
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
def _breathing_style(console: Console, base_style: Style, intensity: float) -> Style:
|
|
240
|
-
"""Blend a base style's foreground color toward terminal background.
|
|
241
|
-
|
|
242
|
-
When intensity is 0, the color matches the background (effectively
|
|
243
|
-
"transparent"); when intensity is 1, the color is the base style color.
|
|
244
|
-
"""
|
|
245
|
-
|
|
246
|
-
base_color = base_style.color or Color.default()
|
|
247
|
-
base_triplet = base_color.get_truecolor()
|
|
248
|
-
base_r, base_g, base_b = base_triplet
|
|
249
|
-
|
|
250
|
-
cached_bg = get_last_terminal_background_rgb()
|
|
251
|
-
if cached_bg is not None:
|
|
252
|
-
bg_r, bg_g, bg_b = cached_bg
|
|
253
|
-
else:
|
|
254
|
-
bg_triplet = Color.default().get_truecolor(foreground=False)
|
|
255
|
-
bg_r, bg_g, bg_b = bg_triplet
|
|
256
|
-
|
|
257
|
-
intensity_clamped = max(0.0, min(1.0, intensity))
|
|
258
|
-
r = int(bg_r * (1.0 - intensity_clamped) + base_r * intensity_clamped)
|
|
259
|
-
g = int(bg_g * (1.0 - intensity_clamped) + base_g * intensity_clamped)
|
|
260
|
-
b = int(bg_b * (1.0 - intensity_clamped) + base_b * intensity_clamped)
|
|
261
|
-
|
|
262
|
-
breathing_color = Color.from_rgb(r, g, b)
|
|
263
|
-
return base_style + Style(color=breathing_color)
|
|
264
|
-
|
|
265
|
-
|
|
266
207
|
def truncate_left(text: Text, max_cells: int, *, console: Console, ellipsis: str = "…") -> Text:
|
|
267
208
|
"""Left-truncate Text to fit within max_cells.
|
|
268
209
|
|
|
@@ -409,21 +350,11 @@ class BreathingSpinner(RichSpinner):
|
|
|
409
350
|
return console.get_style(style_name)
|
|
410
351
|
|
|
411
352
|
def _render_breathing(self, console: Console) -> RenderableType:
|
|
412
|
-
base_style = self._resolve_base_style(console)
|
|
413
|
-
intensity = _breathing_intensity()
|
|
414
|
-
style = _breathing_style(console, base_style, intensity)
|
|
415
|
-
|
|
416
|
-
glyph = _breathing_glyph()
|
|
417
|
-
frame = Text(glyph, style=style)
|
|
418
|
-
|
|
419
353
|
if not self.text:
|
|
420
|
-
return
|
|
354
|
+
return Text()
|
|
421
355
|
if isinstance(self.text, (str, Text)):
|
|
422
|
-
return
|
|
423
|
-
|
|
424
|
-
table = Table.grid(padding=1)
|
|
425
|
-
table.add_row(frame, self.text)
|
|
426
|
-
return table
|
|
356
|
+
return self.text if isinstance(self.text, Text) else Text(self.text)
|
|
357
|
+
return self.text
|
|
427
358
|
|
|
428
359
|
|
|
429
360
|
# Monkey-patch Rich's Status module to use the breathing spinner implementation
|
|
@@ -189,6 +189,11 @@ class ThemeKey(str, Enum):
|
|
|
189
189
|
THINKING_BOLD = "thinking.bold"
|
|
190
190
|
# COMPACTION
|
|
191
191
|
COMPACTION_SUMMARY = "compaction.summary"
|
|
192
|
+
# BACKTRACK
|
|
193
|
+
BACKTRACK = "backtrack"
|
|
194
|
+
BACKTRACK_INFO = "backtrack.info"
|
|
195
|
+
BACKTRACK_USER_MESSAGE = "backtrack.user_message"
|
|
196
|
+
BACKTRACK_NOTE = "backtrack.note"
|
|
192
197
|
# TODO_ITEM
|
|
193
198
|
TODO_EXPLANATION = "todo.explanation"
|
|
194
199
|
TODO_PENDING_MARK = "todo.pending.mark"
|
|
@@ -262,8 +267,8 @@ def get_theme(theme: str | None = None) -> Themes:
|
|
|
262
267
|
ThemeKey.ERROR_DIM.value: "dim " + palette.red,
|
|
263
268
|
ThemeKey.INTERRUPT.value: palette.red,
|
|
264
269
|
# USER_INPUT
|
|
265
|
-
ThemeKey.USER_INPUT.value: f"{palette.
|
|
266
|
-
ThemeKey.USER_INPUT_PROMPT.value: f"bold {palette.
|
|
270
|
+
ThemeKey.USER_INPUT.value: f"{palette.cyan} on {palette.user_message_background}",
|
|
271
|
+
ThemeKey.USER_INPUT_PROMPT.value: f"bold {palette.cyan} on {palette.user_message_background}",
|
|
267
272
|
ThemeKey.USER_INPUT_AT_PATTERN.value: f"{palette.purple} on {palette.user_message_background}",
|
|
268
273
|
ThemeKey.USER_INPUT_SLASH_COMMAND.value: f"bold {palette.blue} on {palette.user_message_background}",
|
|
269
274
|
ThemeKey.USER_INPUT_SKILL.value: f"bold {palette.green} on {palette.user_message_background}",
|
|
@@ -311,6 +316,11 @@ def get_theme(theme: str | None = None) -> Themes:
|
|
|
311
316
|
ThemeKey.THINKING_BOLD.value: "italic " + palette.grey1,
|
|
312
317
|
# COMPACTION
|
|
313
318
|
ThemeKey.COMPACTION_SUMMARY.value: palette.grey1,
|
|
319
|
+
# BACKTRACK
|
|
320
|
+
ThemeKey.BACKTRACK.value: palette.orange,
|
|
321
|
+
ThemeKey.BACKTRACK_INFO.value: "dim " + palette.grey2,
|
|
322
|
+
ThemeKey.BACKTRACK_USER_MESSAGE.value: palette.cyan,
|
|
323
|
+
ThemeKey.BACKTRACK_NOTE.value: palette.grey1,
|
|
314
324
|
# TODO_ITEM
|
|
315
325
|
ThemeKey.TODO_EXPLANATION.value: palette.grey1 + " italic",
|
|
316
326
|
ThemeKey.TODO_PENDING_MARK.value: "bold " + palette.grey1,
|