klaude-code 1.2.7__py3-none-any.whl → 1.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. klaude_code/auth/codex/__init__.py +1 -1
  2. klaude_code/command/__init__.py +2 -0
  3. klaude_code/command/prompt-deslop.md +14 -0
  4. klaude_code/command/release_notes_cmd.py +86 -0
  5. klaude_code/command/status_cmd.py +92 -54
  6. klaude_code/core/agent.py +13 -19
  7. klaude_code/core/manager/sub_agent_manager.py +5 -1
  8. klaude_code/core/prompt.py +38 -28
  9. klaude_code/core/reminders.py +4 -4
  10. klaude_code/core/task.py +60 -45
  11. klaude_code/core/tool/__init__.py +2 -0
  12. klaude_code/core/tool/file/apply_patch_tool.py +1 -1
  13. klaude_code/core/tool/file/edit_tool.py +1 -1
  14. klaude_code/core/tool/file/multi_edit_tool.py +1 -1
  15. klaude_code/core/tool/file/write_tool.py +1 -1
  16. klaude_code/core/tool/memory/memory_tool.py +2 -2
  17. klaude_code/core/tool/sub_agent_tool.py +2 -1
  18. klaude_code/core/tool/todo/todo_write_tool.py +1 -1
  19. klaude_code/core/tool/todo/update_plan_tool.py +1 -1
  20. klaude_code/core/tool/tool_context.py +21 -4
  21. klaude_code/core/tool/tool_runner.py +5 -8
  22. klaude_code/core/tool/web/mermaid_tool.py +1 -4
  23. klaude_code/core/turn.py +90 -62
  24. klaude_code/llm/anthropic/client.py +15 -46
  25. klaude_code/llm/client.py +1 -1
  26. klaude_code/llm/codex/client.py +44 -30
  27. klaude_code/llm/input_common.py +0 -6
  28. klaude_code/llm/openai_compatible/client.py +29 -73
  29. klaude_code/llm/openai_compatible/input.py +6 -4
  30. klaude_code/llm/openai_compatible/stream_processor.py +82 -0
  31. klaude_code/llm/openrouter/client.py +29 -59
  32. klaude_code/llm/openrouter/input.py +4 -27
  33. klaude_code/llm/responses/client.py +49 -79
  34. klaude_code/llm/usage.py +51 -10
  35. klaude_code/protocol/commands.py +1 -0
  36. klaude_code/protocol/events.py +12 -2
  37. klaude_code/protocol/model.py +142 -26
  38. klaude_code/protocol/sub_agent.py +5 -1
  39. klaude_code/session/export.py +51 -27
  40. klaude_code/session/session.py +33 -16
  41. klaude_code/session/templates/export_session.html +4 -1
  42. klaude_code/ui/modes/repl/__init__.py +1 -5
  43. klaude_code/ui/modes/repl/event_handler.py +153 -54
  44. klaude_code/ui/modes/repl/renderer.py +6 -4
  45. klaude_code/ui/renderers/developer.py +35 -25
  46. klaude_code/ui/renderers/metadata.py +68 -30
  47. klaude_code/ui/renderers/tools.py +53 -87
  48. klaude_code/ui/rich/markdown.py +5 -5
  49. {klaude_code-1.2.7.dist-info → klaude_code-1.2.9.dist-info}/METADATA +1 -1
  50. {klaude_code-1.2.7.dist-info → klaude_code-1.2.9.dist-info}/RECORD +52 -49
  51. {klaude_code-1.2.7.dist-info → klaude_code-1.2.9.dist-info}/WHEEL +0 -0
  52. {klaude_code-1.2.7.dist-info → klaude_code-1.2.9.dist-info}/entry_points.txt +0 -0
@@ -17,4 +17,4 @@ __all__ = [
17
17
  "CodexOAuthError",
18
18
  "CodexTokenExpiredError",
19
19
  "CodexTokenManager",
20
- ]
20
+ ]
@@ -16,6 +16,7 @@ from .registry import (
16
16
  load_prompt_commands,
17
17
  register_command,
18
18
  )
19
+ from .release_notes_cmd import ReleaseNotesCommand
19
20
  from .status_cmd import StatusCommand
20
21
  from .terminal_setup_cmd import TerminalSetupCommand
21
22
 
@@ -29,6 +30,7 @@ __all__ = [
29
30
  "ModelCommand",
30
31
  "ExportCommand",
31
32
  "RefreshTerminalCommand",
33
+ "ReleaseNotesCommand",
32
34
  "StatusCommand",
33
35
  "TerminalSetupCommand",
34
36
  "register_command",
@@ -0,0 +1,14 @@
1
+ ---
2
+ description: Remove AI code slop
3
+ from: https://cursor.com/cn/link/command?name=deslop&text=%23%20Remove%20AI%20code%20slop%0A%0ACheck%20the%20diff%20against%20main%2C%20and%20remove%20all%20AI%20generated%20slop%20introduced%20in%20this%20branch.%0A%0AThis%20includes%3A%0A-%20Extra%20comments%20that%20a%20human%20wouldn%27t%20add%20or%20is%20inconsistent%20with%20the%20rest%20of%20the%20file%0A-%20Extra%20defensive%20checks%20or%20try%2Fcatch%20blocks%20that%20are%20abnormal%20for%20that%20area%20of%20the%20codebase%20(especially%20if%20called%20by%20trusted%20%2F%20validated%20codepaths)%0A-%20Casts%20to%20any%20to%20get%20around%20type%20issues%0A-%20Any%20other%20style%20that%20is%20inconsistent%20with%20the%20file%0A%0AReport%20at%20the%20end%20with%20only%20a%201-3%20sentence%20summary%20of%20what%20you%20changed
4
+ ---
5
+
6
+ Check the diff against main, and remove all AI generated slop introduced in this branch.
7
+
8
+ This includes:
9
+ - Extra comments that a human wouldn't add or is inconsistent with the rest of the file
10
+ - Extra defensive checks or try/catch blocks that are abnormal for that area of the codebase (especially if called by trusted / validated codepaths)
11
+ - Casts to any to get around type issues
12
+ - Any other style that is inconsistent with the file
13
+
14
+ Report at the end with only a 1-3 sentence summary of what you changed
@@ -0,0 +1,86 @@
1
+ from pathlib import Path
2
+
3
+ from klaude_code.command.command_abc import CommandABC, CommandResult
4
+ from klaude_code.command.registry import register_command
5
+ from klaude_code.core.agent import Agent
6
+ from klaude_code.protocol import commands, events, model
7
+
8
+
9
+ def _read_changelog() -> str:
10
+ """Read CHANGELOG.md from project root."""
11
+ changelog_path = Path(__file__).parent.parent.parent.parent / "CHANGELOG.md"
12
+ if not changelog_path.exists():
13
+ return "CHANGELOG.md not found"
14
+ return changelog_path.read_text(encoding="utf-8")
15
+
16
+
17
+ def _extract_releases(changelog: str, count: int = 1) -> str:
18
+ """Extract release sections from changelog in reverse order (oldest first).
19
+
20
+ Args:
21
+ changelog: The full changelog content.
22
+ count: Number of releases to extract (default 1).
23
+
24
+ Returns:
25
+ The content of the specified number of releases, with newest at bottom.
26
+ """
27
+ lines = changelog.split("\n")
28
+ releases: list[list[str]] = []
29
+ current_release: list[str] = []
30
+ version_count = 0
31
+
32
+ for line in lines:
33
+ # Skip [Unreleased] section header
34
+ if line.startswith("## [Unreleased]"):
35
+ continue
36
+
37
+ # Check for version header (e.g., ## [1.2.8] - 2025-12-01)
38
+ if line.startswith("## [") and "]" in line:
39
+ if current_release:
40
+ releases.append(current_release)
41
+ version_count += 1
42
+ if version_count > count:
43
+ break
44
+ current_release = [line]
45
+ continue
46
+
47
+ if version_count > 0:
48
+ current_release.append(line)
49
+
50
+ # Append the last release if exists
51
+ if current_release and version_count <= count:
52
+ releases.append(current_release)
53
+
54
+ if not releases:
55
+ return "No release notes found"
56
+
57
+ # Reverse to show oldest first, newest last
58
+ releases.reverse()
59
+ return "\n".join("\n".join(release) for release in releases).strip()
60
+
61
+
62
+ @register_command
63
+ class ReleaseNotesCommand(CommandABC):
64
+ """Display the latest release notes from CHANGELOG.md."""
65
+
66
+ @property
67
+ def name(self) -> commands.CommandName:
68
+ return commands.CommandName.RELEASE_NOTES
69
+
70
+ @property
71
+ def summary(self) -> str:
72
+ return "Show the latest release notes"
73
+
74
+ async def run(self, raw: str, agent: Agent) -> CommandResult:
75
+ changelog = _read_changelog()
76
+ content = _extract_releases(changelog, count=10)
77
+
78
+ event = events.DeveloperMessageEvent(
79
+ session_id=agent.session.id,
80
+ item=model.DeveloperMessageItem(
81
+ content=content,
82
+ command_output=model.CommandOutput(command_name=self.name),
83
+ ),
84
+ )
85
+
86
+ return CommandResult(events=[event])
@@ -5,47 +5,65 @@ from klaude_code.protocol import commands, events, model
5
5
  from klaude_code.session.session import Session
6
6
 
7
7
 
8
- def accumulate_session_usage(session: Session) -> tuple[model.Usage, int]:
9
- """Accumulate usage statistics from all ResponseMetadataItems in session history.
8
+ class AggregatedUsage(model.BaseModel):
9
+ """Aggregated usage statistics including per-model breakdown."""
10
10
 
11
- Returns:
12
- A tuple of (accumulated_usage, task_count)
11
+ total: model.Usage
12
+ by_model: list[model.TaskMetadata]
13
+ task_count: int
14
+
15
+
16
+ def accumulate_session_usage(session: Session) -> AggregatedUsage:
17
+ """Accumulate usage statistics from all TaskMetadataItems in session history.
18
+
19
+ Includes both main agent and sub-agent task metadata, grouped by model+provider.
13
20
  """
14
- total = model.Usage()
21
+ all_metadata: list[model.TaskMetadata] = []
15
22
  task_count = 0
16
- first_currency_set = False
17
23
 
18
24
  for item in session.conversation_history:
19
- if isinstance(item, model.ResponseMetadataItem) and item.usage:
25
+ if isinstance(item, model.TaskMetadataItem):
20
26
  task_count += 1
21
- usage = item.usage
22
-
23
- # Set currency from first usage item
24
- if not first_currency_set and usage.currency:
25
- total.currency = usage.currency
26
- first_currency_set = True
27
-
28
- total.input_tokens += usage.input_tokens
29
- total.cached_tokens += usage.cached_tokens
30
- total.reasoning_tokens += usage.reasoning_tokens
31
- total.output_tokens += usage.output_tokens
32
- total.total_tokens += usage.total_tokens
33
-
34
- # Accumulate costs
35
- if usage.input_cost is not None:
36
- total.input_cost = (total.input_cost or 0.0) + usage.input_cost
37
- if usage.output_cost is not None:
38
- total.output_cost = (total.output_cost or 0.0) + usage.output_cost
39
- if usage.cache_read_cost is not None:
40
- total.cache_read_cost = (total.cache_read_cost or 0.0) + usage.cache_read_cost
41
- if usage.total_cost is not None:
42
- total.total_cost = (total.total_cost or 0.0) + usage.total_cost
43
-
44
- # Keep the latest context_usage_percent
45
- if usage.context_usage_percent is not None:
46
- total.context_usage_percent = usage.context_usage_percent
47
-
48
- return total, task_count
27
+ all_metadata.append(item.main)
28
+ all_metadata.extend(item.sub_agent_task_metadata)
29
+
30
+ # Aggregate by model+provider
31
+ by_model = model.TaskMetadata.aggregate_by_model(all_metadata)
32
+
33
+ # Calculate total from aggregated results
34
+ total = model.Usage()
35
+ for meta in by_model:
36
+ if not meta.usage:
37
+ continue
38
+ usage = meta.usage
39
+
40
+ # Set currency from first
41
+ if total.currency == "USD" and usage.currency:
42
+ total.currency = usage.currency
43
+
44
+ # Accumulate primary token fields (total_tokens is computed)
45
+ total.input_tokens += usage.input_tokens
46
+ total.cached_tokens += usage.cached_tokens
47
+ total.reasoning_tokens += usage.reasoning_tokens
48
+ total.output_tokens += usage.output_tokens
49
+
50
+ # Accumulate cost components (total_cost is computed)
51
+ if usage.input_cost is not None:
52
+ total.input_cost = (total.input_cost or 0.0) + usage.input_cost
53
+ if usage.output_cost is not None:
54
+ total.output_cost = (total.output_cost or 0.0) + usage.output_cost
55
+ if usage.cache_read_cost is not None:
56
+ total.cache_read_cost = (total.cache_read_cost or 0.0) + usage.cache_read_cost
57
+
58
+ # Track peak context window size (max across all tasks)
59
+ if usage.context_window_size is not None:
60
+ total.context_window_size = usage.context_window_size
61
+
62
+ # Keep the latest context_limit for computed context_usage_percent
63
+ if usage.context_limit is not None:
64
+ total.context_limit = usage.context_limit
65
+
66
+ return AggregatedUsage(total=total, by_model=by_model, task_count=task_count)
49
67
 
50
68
 
51
69
  def _format_tokens(tokens: int) -> str:
@@ -67,20 +85,42 @@ def _format_cost(cost: float | None, currency: str = "USD") -> str:
67
85
  return f"{symbol}{cost:.2f}"
68
86
 
69
87
 
70
- def format_status_content(usage: model.Usage) -> str:
71
- """Format session status as comma-separated text."""
72
- parts: list[str] = []
88
+ def _format_model_usage_line(meta: model.TaskMetadata) -> str:
89
+ """Format a single model's usage as a line."""
90
+ model_label = meta.model_name
91
+ if meta.provider:
92
+ model_label = f"{meta.model_name} ({meta.provider})"
93
+
94
+ usage = meta.usage
95
+ if not usage:
96
+ return f" {model_label}: no usage data"
97
+
98
+ cost_str = _format_cost(usage.total_cost, usage.currency)
99
+ return (
100
+ f" {model_label}: "
101
+ f"{_format_tokens(usage.input_tokens)} input, "
102
+ f"{_format_tokens(usage.output_tokens)} output, "
103
+ f"{_format_tokens(usage.cached_tokens)} cache read, "
104
+ f"{_format_tokens(usage.reasoning_tokens)} thinking, "
105
+ f"({cost_str})"
106
+ )
107
+
108
+
109
+ def format_status_content(aggregated: AggregatedUsage) -> str:
110
+ """Format session status with per-model breakdown."""
111
+ lines: list[str] = []
73
112
 
74
- parts.append(f"Input: {_format_tokens(usage.input_tokens)}")
75
- if usage.cached_tokens > 0:
76
- parts.append(f"Cached: {_format_tokens(usage.cached_tokens)}")
77
- parts.append(f"Output: {_format_tokens(usage.output_tokens)}")
78
- parts.append(f"Total: {_format_tokens(usage.total_tokens)}")
113
+ # Total cost line
114
+ total_cost_str = _format_cost(aggregated.total.total_cost, aggregated.total.currency)
115
+ lines.append(f"Total cost: {total_cost_str}")
79
116
 
80
- if usage.total_cost is not None:
81
- parts.append(f"Cost: {_format_cost(usage.total_cost, usage.currency)}")
117
+ # Per-model breakdown
118
+ if aggregated.by_model:
119
+ lines.append("Usage by model:")
120
+ for stats in aggregated.by_model:
121
+ lines.append(_format_model_usage_line(stats))
82
122
 
83
- return ", ".join(parts)
123
+ return "\n".join(lines)
84
124
 
85
125
 
86
126
  @register_command
@@ -97,20 +137,18 @@ class StatusCommand(CommandABC):
97
137
 
98
138
  async def run(self, raw: str, agent: Agent) -> CommandResult:
99
139
  session = agent.session
100
- usage, task_count = accumulate_session_usage(session)
140
+ aggregated = accumulate_session_usage(session)
101
141
 
102
142
  event = events.DeveloperMessageEvent(
103
143
  session_id=session.id,
104
144
  item=model.DeveloperMessageItem(
105
- content=format_status_content(usage),
145
+ content=format_status_content(aggregated),
106
146
  command_output=model.CommandOutput(
107
147
  command_name=self.name,
108
- ui_extra=model.ToolResultUIExtra(
109
- type=model.ToolResultUIExtraType.SESSION_STATUS,
110
- session_status=model.SessionStatusUIExtra(
111
- usage=usage,
112
- task_count=task_count,
113
- ),
148
+ ui_extra=model.SessionStatusUIExtra(
149
+ usage=aggregated.total,
150
+ task_count=aggregated.task_count,
151
+ by_model=aggregated.by_model,
114
152
  ),
115
153
  ),
116
154
  ),
klaude_code/core/agent.py CHANGED
@@ -6,8 +6,8 @@ from typing import Protocol
6
6
 
7
7
  from klaude_code.core.prompt import get_system_prompt as load_system_prompt
8
8
  from klaude_code.core.reminders import Reminder, load_agent_reminders
9
- from klaude_code.core.task import TaskExecutionContext, TaskExecutor
10
- from klaude_code.core.tool import TodoContext, get_registry, load_agent_tools
9
+ from klaude_code.core.task import SessionContext, TaskExecutionContext, TaskExecutor
10
+ from klaude_code.core.tool import build_todo_context, get_registry, load_agent_tools
11
11
  from klaude_code.llm import LLMClientABC
12
12
  from klaude_code.protocol import events, llm_param, model, tools
13
13
  from klaude_code.protocol.model import UserInputPayload
@@ -76,11 +76,10 @@ class Agent:
76
76
  profile: AgentProfile,
77
77
  ):
78
78
  self.session: Session = session
79
- self.profile: AgentProfile | None = None
80
- # Active task executor, if any
79
+ self.profile: AgentProfile = profile
81
80
  self._current_task: TaskExecutor | None = None
82
- # Ensure runtime configuration matches the active model on initialization
83
- self.set_model_profile(profile)
81
+ if not self.session.model_name:
82
+ self.session.model_name = profile.llm_client.model_name
84
83
 
85
84
  def cancel(self) -> Iterable[events.Event]:
86
85
  """Handle agent cancellation and persist an interrupt marker and tool cancellations.
@@ -106,17 +105,17 @@ class Agent:
106
105
  )
107
106
 
108
107
  async def run_task(self, user_input: UserInputPayload) -> AsyncGenerator[events.Event, None]:
109
- context = TaskExecutionContext(
108
+ session_ctx = SessionContext(
110
109
  session_id=self.session.id,
111
- profile=self._require_profile(),
112
110
  get_conversation_history=lambda: self.session.conversation_history,
113
111
  append_history=self.session.append_history,
114
- tool_registry=get_registry(),
115
112
  file_tracker=self.session.file_tracker,
116
- todo_context=TodoContext(
117
- get_todos=lambda: self.session.todos,
118
- set_todos=lambda todos: setattr(self.session, "todos", todos),
119
- ),
113
+ todo_context=build_todo_context(self.session),
114
+ )
115
+ context = TaskExecutionContext(
116
+ session_ctx=session_ctx,
117
+ profile=self.profile,
118
+ tool_registry=get_registry(),
120
119
  process_reminder=self._process_reminder,
121
120
  sub_agent_state=self.session.sub_agent_state,
122
121
  )
@@ -157,9 +156,4 @@ class Agent:
157
156
  self.session.model_name = profile.llm_client.model_name
158
157
 
159
158
  def get_llm_client(self) -> LLMClientABC:
160
- return self._require_profile().llm_client
161
-
162
- def _require_profile(self) -> AgentProfile:
163
- if self.profile is None:
164
- raise RuntimeError("Agent profile is not initialized")
165
- return self.profile
159
+ return self.profile.llm_client
@@ -58,13 +58,17 @@ class SubAgentManager:
58
58
  try:
59
59
  # Not emit the subtask's user input since task tool call is already rendered
60
60
  result: str = ""
61
+ task_metadata: model.TaskMetadata | None = None
61
62
  sub_agent_input = model.UserInputPayload(text=state.sub_agent_prompt, images=None)
62
63
  async for event in child_agent.run_task(sub_agent_input):
63
64
  # Capture TaskFinishEvent content for return
64
65
  if isinstance(event, events.TaskFinishEvent):
65
66
  result = event.task_result
67
+ # Capture TaskMetadataEvent for metadata propagation
68
+ elif isinstance(event, events.TaskMetadataEvent):
69
+ task_metadata = event.metadata.main
66
70
  await self.emit_event(event)
67
- return SubAgentResult(task_result=result, session_id=child_session.id)
71
+ return SubAgentResult(task_result=result, session_id=child_session.id, task_metadata=task_metadata)
68
72
  except asyncio.CancelledError:
69
73
  # Propagate cancellation so tooling can treat it as user interrupt
70
74
  log_debug(
@@ -26,9 +26,34 @@ PROMPT_FILES: dict[str, str] = {
26
26
 
27
27
 
28
28
  @lru_cache(maxsize=None)
29
- def get_system_prompt(model_name: str, sub_agent_type: str | None = None) -> str:
30
- """Get system prompt content for the given model and sub-agent type."""
29
+ def _load_base_prompt(file_key: str) -> str:
30
+ """Load and cache the base prompt content from file."""
31
+ try:
32
+ prompt_path = PROMPT_FILES[file_key]
33
+ except KeyError as exc:
34
+ raise ValueError(f"Unknown prompt key: {file_key}") from exc
35
+
36
+ return files(__package__).joinpath(prompt_path).read_text(encoding="utf-8").strip()
31
37
 
38
+
39
+ def _get_file_key(model_name: str, sub_agent_type: str | None) -> str:
40
+ """Determine which prompt file to use based on model and agent type."""
41
+ if sub_agent_type is not None:
42
+ return sub_agent_type
43
+
44
+ match model_name:
45
+ case "gpt-5.1-codex-max":
46
+ return "main_gpt_5_1_codex_max"
47
+ case name if "gpt-5" in name:
48
+ return "main_gpt_5_1"
49
+ case name if "gemini" in name:
50
+ return "main_gemini"
51
+ case _:
52
+ return "main_claude"
53
+
54
+
55
+ def _build_env_info(model_name: str) -> str:
56
+ """Build environment info section with dynamic runtime values."""
32
57
  cwd = Path.cwd()
33
58
  today = datetime.datetime.now().strftime("%Y-%m-%d")
34
59
  is_git_repo = (cwd / ".git").exists()
@@ -38,30 +63,6 @@ def get_system_prompt(model_name: str, sub_agent_type: str | None = None) -> str
38
63
  if shutil.which(command) is not None:
39
64
  available_tools.append(f"{command}: {desc}")
40
65
 
41
- if sub_agent_type is None:
42
- match model_name:
43
- case "gpt-5.1-codex-max":
44
- file_key = "main_gpt_5_1_codex_max"
45
- case name if "gpt-5" in name:
46
- file_key = "main_gpt_5_1"
47
- case name if "gemini" in name:
48
- file_key = "main_gemini"
49
- case _:
50
- file_key = "main_claude"
51
- else:
52
- file_key = sub_agent_type
53
-
54
- try:
55
- prompt_path = PROMPT_FILES[file_key]
56
- except KeyError as exc:
57
- raise ValueError(f"Unknown prompt key: {file_key}") from exc
58
-
59
- base_prompt = files(__package__).joinpath(prompt_path).read_text(encoding="utf-8").strip()
60
-
61
- if model_name == "gpt-5.1-codex-max":
62
- # Do not add env info for gpt-5.1-codex-max
63
- return base_prompt
64
-
65
66
  env_lines: list[str] = [
66
67
  "",
67
68
  "",
@@ -80,6 +81,15 @@ def get_system_prompt(model_name: str, sub_agent_type: str | None = None) -> str
80
81
 
81
82
  env_lines.append("</env>")
82
83
 
83
- env_info = "\n".join(env_lines)
84
+ return "\n".join(env_lines)
85
+
86
+
87
+ def get_system_prompt(model_name: str, sub_agent_type: str | None = None) -> str:
88
+ """Get system prompt content for the given model and sub-agent type."""
89
+ file_key = _get_file_key(model_name, sub_agent_type)
90
+ base_prompt = _load_base_prompt(file_key)
91
+
92
+ if model_name == "gpt-5.1-codex-max":
93
+ return base_prompt
84
94
 
85
- return base_prompt + env_info
95
+ return base_prompt + _build_env_info(model_name)
@@ -150,16 +150,16 @@ async def todo_not_used_recently_reminder(
150
150
  return None
151
151
 
152
152
  # Count non-todo tool calls since the last TodoWrite
153
- other_tool_call_count_befor_last_todo = 0
153
+ other_tool_call_count_before_last_todo = 0
154
154
  for item in reversed(session.conversation_history):
155
155
  if isinstance(item, model.ToolCallItem):
156
156
  if item.name in (tools.TODO_WRITE, tools.UPDATE_PLAN):
157
157
  break
158
- other_tool_call_count_befor_last_todo += 1
159
- if other_tool_call_count_befor_last_todo >= const.TODO_REMINDER_TOOL_CALL_THRESHOLD:
158
+ other_tool_call_count_before_last_todo += 1
159
+ if other_tool_call_count_before_last_todo >= const.TODO_REMINDER_TOOL_CALL_THRESHOLD:
160
160
  break
161
161
 
162
- not_used_recently = other_tool_call_count_befor_last_todo >= const.TODO_REMINDER_TOOL_CALL_THRESHOLD
162
+ not_used_recently = other_tool_call_count_before_last_todo >= const.TODO_REMINDER_TOOL_CALL_THRESHOLD
163
163
 
164
164
  if not not_used_recently:
165
165
  return None