klaude-code 1.2.9__py3-none-any.whl → 1.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. klaude_code/cli/main.py +12 -1
  2. klaude_code/cli/runtime.py +7 -11
  3. klaude_code/command/__init__.py +68 -23
  4. klaude_code/command/clear_cmd.py +6 -2
  5. klaude_code/command/command_abc.py +5 -2
  6. klaude_code/command/diff_cmd.py +5 -2
  7. klaude_code/command/export_cmd.py +7 -4
  8. klaude_code/command/help_cmd.py +6 -2
  9. klaude_code/command/model_cmd.py +5 -2
  10. klaude_code/command/prompt_command.py +8 -3
  11. klaude_code/command/refresh_cmd.py +6 -2
  12. klaude_code/command/registry.py +17 -5
  13. klaude_code/command/release_notes_cmd.py +5 -2
  14. klaude_code/command/status_cmd.py +8 -4
  15. klaude_code/command/terminal_setup_cmd.py +7 -4
  16. klaude_code/const/__init__.py +1 -1
  17. klaude_code/core/agent.py +55 -9
  18. klaude_code/core/executor.py +2 -2
  19. klaude_code/core/manager/agent_manager.py +6 -7
  20. klaude_code/core/manager/llm_clients.py +47 -22
  21. klaude_code/core/manager/llm_clients_builder.py +19 -7
  22. klaude_code/core/manager/sub_agent_manager.py +1 -1
  23. klaude_code/core/reminders.py +0 -3
  24. klaude_code/core/task.py +2 -2
  25. klaude_code/core/tool/file/_utils.py +30 -0
  26. klaude_code/core/tool/file/edit_tool.py +5 -30
  27. klaude_code/core/tool/file/multi_edit_tool.py +6 -31
  28. klaude_code/core/tool/file/read_tool.py +6 -18
  29. klaude_code/core/tool/file/write_tool.py +5 -30
  30. klaude_code/core/tool/memory/__init__.py +5 -0
  31. klaude_code/core/tool/memory/skill_loader.py +2 -1
  32. klaude_code/core/tool/memory/skill_tool.py +13 -0
  33. klaude_code/llm/__init__.py +2 -12
  34. klaude_code/llm/anthropic/client.py +2 -1
  35. klaude_code/llm/client.py +1 -1
  36. klaude_code/llm/codex/client.py +1 -1
  37. klaude_code/llm/openai_compatible/client.py +3 -2
  38. klaude_code/llm/openrouter/client.py +3 -3
  39. klaude_code/llm/registry.py +33 -7
  40. klaude_code/llm/responses/client.py +2 -1
  41. klaude_code/llm/responses/input.py +1 -1
  42. klaude_code/llm/usage.py +17 -8
  43. klaude_code/protocol/model.py +12 -7
  44. klaude_code/protocol/op.py +1 -0
  45. klaude_code/session/export.py +5 -5
  46. klaude_code/session/session.py +15 -5
  47. klaude_code/ui/core/input.py +1 -1
  48. klaude_code/ui/modes/repl/clipboard.py +5 -5
  49. klaude_code/ui/renderers/metadata.py +1 -1
  50. klaude_code/ui/terminal/control.py +2 -2
  51. klaude_code/version.py +3 -3
  52. {klaude_code-1.2.9.dist-info → klaude_code-1.2.10.dist-info}/METADATA +1 -1
  53. {klaude_code-1.2.9.dist-info → klaude_code-1.2.10.dist-info}/RECORD +55 -54
  54. {klaude_code-1.2.9.dist-info → klaude_code-1.2.10.dist-info}/WHEEL +0 -0
  55. {klaude_code-1.2.9.dist-info → klaude_code-1.2.10.dist-info}/entry_points.txt +0 -0
@@ -115,7 +115,8 @@ class SkillLoader:
115
115
 
116
116
  return skill
117
117
 
118
- except Exception:
118
+ except (OSError, yaml.YAMLError) as e:
119
+ log_debug(f"Failed to load skill from {skill_path}: {e}")
119
120
  return None
120
121
 
121
122
  def discover_skills(self) -> list[Skill]:
@@ -13,15 +13,26 @@ class SkillTool(ToolABC):
13
13
  """Tool to execute/load a skill within the main conversation"""
14
14
 
15
15
  _skill_loader: SkillLoader | None = None
16
+ _discovery_done: bool = False
16
17
 
17
18
  @classmethod
18
19
  def set_skill_loader(cls, loader: SkillLoader) -> None:
19
20
  """Set the skill loader instance"""
20
21
  cls._skill_loader = loader
22
+ cls._discovery_done = False
23
+
24
+ @classmethod
25
+ def _ensure_skills_discovered(cls) -> None:
26
+ if cls._discovery_done:
27
+ return
28
+ if cls._skill_loader is not None:
29
+ cls._skill_loader.discover_skills()
30
+ cls._discovery_done = True
21
31
 
22
32
  @classmethod
23
33
  def schema(cls) -> llm_param.ToolSchema:
24
34
  """Generate schema with embedded available skills metadata"""
35
+ cls._ensure_skills_discovered()
25
36
  skills_xml = cls._generate_skills_xml()
26
37
 
27
38
  return llm_param.ToolSchema(
@@ -69,6 +80,8 @@ class SkillTool(ToolABC):
69
80
  output=f"Invalid arguments: {e}",
70
81
  )
71
82
 
83
+ cls._ensure_skills_discovered()
84
+
72
85
  if not cls._skill_loader:
73
86
  return model.ToolResultItem(
74
87
  status="error",
@@ -1,23 +1,13 @@
1
1
  """LLM package init.
2
2
 
3
- Imports built-in LLM clients so their ``@register`` decorators run and they
4
- become available via the registry.
3
+ LLM clients are lazily loaded to avoid heavy imports at module load time.
4
+ Only LLMClientABC and create_llm_client are exposed.
5
5
  """
6
6
 
7
- from .anthropic import AnthropicClient
8
7
  from .client import LLMClientABC
9
- from .codex import CodexClient
10
- from .openai_compatible import OpenAICompatibleClient
11
- from .openrouter import OpenRouterClient
12
8
  from .registry import create_llm_client
13
- from .responses import ResponsesClient
14
9
 
15
10
  __all__ = [
16
11
  "LLMClientABC",
17
- "ResponsesClient",
18
- "OpenAICompatibleClient",
19
- "OpenRouterClient",
20
- "AnthropicClient",
21
- "CodexClient",
22
12
  "create_llm_client",
23
13
  ]
@@ -46,7 +46,7 @@ class AnthropicClient(LLMClientABC):
46
46
  async def call(self, param: llm_param.LLMCallParameter) -> AsyncGenerator[model.ConversationItem, None]:
47
47
  param = apply_config_defaults(param, self.get_llm_config())
48
48
 
49
- metadata_tracker = MetadataTracker(cost_config=self._config.cost)
49
+ metadata_tracker = MetadataTracker(cost_config=self.get_llm_config().cost)
50
50
 
51
51
  messages = convert_history_to_input(param.input, param.model)
52
52
  tools = convert_tool_schema(param.tools)
@@ -179,6 +179,7 @@ class AnthropicClient(LLMClientABC):
179
179
  output_tokens=output_tokens,
180
180
  cached_tokens=cached_tokens,
181
181
  context_limit=param.context_limit,
182
+ max_tokens=param.max_tokens,
182
183
  )
183
184
  metadata_tracker.set_usage(usage)
184
185
  metadata_tracker.set_model_name(str(param.model))
klaude_code/llm/client.py CHANGED
@@ -19,7 +19,7 @@ class LLMClientABC(ABC):
19
19
  @abstractmethod
20
20
  async def call(self, param: llm_param.LLMCallParameter) -> AsyncGenerator[model.ConversationItem, None]:
21
21
  raise NotImplementedError
22
- yield cast(model.ConversationItem, None) # pyright: ignore[reportUnreachable]
22
+ yield cast(model.ConversationItem, None)
23
23
 
24
24
  def get_llm_config(self) -> llm_param.LLMConfigParameter:
25
25
  return self._config
@@ -84,7 +84,7 @@ class CodexClient(LLMClientABC):
84
84
  # Codex API requires store=False
85
85
  param.store = False
86
86
 
87
- metadata_tracker = MetadataTracker(cost_config=self._config.cost)
87
+ metadata_tracker = MetadataTracker(cost_config=self.get_llm_config().cost)
88
88
 
89
89
  inputs = convert_history_to_input(param.input, param.model)
90
90
  tools = convert_tool_schema(param.tools)
@@ -47,7 +47,7 @@ class OpenAICompatibleClient(LLMClientABC):
47
47
  messages = convert_history_to_input(param.input, param.system, param.model)
48
48
  tools = convert_tool_schema(param.tools)
49
49
 
50
- metadata_tracker = MetadataTracker(cost_config=self._config.cost)
50
+ metadata_tracker = MetadataTracker(cost_config=self.get_llm_config().cost)
51
51
 
52
52
  extra_body = {}
53
53
  extra_headers = {"extra": json.dumps({"session_id": param.session_id}, sort_keys=True)}
@@ -88,7 +88,7 @@ class OpenAICompatibleClient(LLMClientABC):
88
88
  if (
89
89
  event.usage is not None and event.usage.completion_tokens is not None # pyright: ignore[reportUnnecessaryComparison] gcp gemini will return None usage field
90
90
  ):
91
- metadata_tracker.set_usage(convert_usage(event.usage, param.context_limit))
91
+ metadata_tracker.set_usage(convert_usage(event.usage, param.context_limit, param.max_tokens))
92
92
  if event.model:
93
93
  metadata_tracker.set_model_name(event.model)
94
94
  if provider := getattr(event, "provider", None):
@@ -104,6 +104,7 @@ class OpenAICompatibleClient(LLMClientABC):
104
104
  convert_usage(
105
105
  openai.types.CompletionUsage.model_validate(getattr(event.choices[0], "usage")),
106
106
  param.context_limit,
107
+ param.max_tokens,
107
108
  )
108
109
  )
109
110
 
@@ -38,7 +38,7 @@ class OpenRouterClient(LLMClientABC):
38
38
  messages = convert_history_to_input(param.input, param.system, param.model)
39
39
  tools = convert_tool_schema(param.tools)
40
40
 
41
- metadata_tracker = MetadataTracker(cost_config=self._config.cost)
41
+ metadata_tracker = MetadataTracker(cost_config=self.get_llm_config().cost)
42
42
 
43
43
  extra_body: dict[str, object] = {
44
44
  "usage": {"include": True} # To get the cache tokens at the end of the response
@@ -73,7 +73,7 @@ class OpenRouterClient(LLMClientABC):
73
73
  max_tokens=param.max_tokens,
74
74
  tools=tools,
75
75
  verbosity=param.verbosity,
76
- extra_body=extra_body, # pyright: ignore[reportUnknownArgumentType]
76
+ extra_body=extra_body,
77
77
  extra_headers=extra_headers, # pyright: ignore[reportUnknownArgumentType]
78
78
  )
79
79
 
@@ -100,7 +100,7 @@ class OpenRouterClient(LLMClientABC):
100
100
  if (
101
101
  event.usage is not None and event.usage.completion_tokens is not None # pyright: ignore[reportUnnecessaryComparison]
102
102
  ): # gcp gemini will return None usage field
103
- metadata_tracker.set_usage(convert_usage(event.usage, param.context_limit))
103
+ metadata_tracker.set_usage(convert_usage(event.usage, param.context_limit, param.max_tokens))
104
104
  if event.model:
105
105
  metadata_tracker.set_model_name(event.model)
106
106
  if provider := getattr(event, "provider", None):
@@ -1,22 +1,48 @@
1
- from typing import Callable, TypeVar
1
+ from typing import TYPE_CHECKING, Callable, TypeVar
2
2
 
3
- from klaude_code.llm.client import LLMClientABC
4
3
  from klaude_code.protocol import llm_param
5
4
 
6
- _REGISTRY: dict[llm_param.LLMClientProtocol, type[LLMClientABC]] = {}
5
+ if TYPE_CHECKING:
6
+ from klaude_code.llm.client import LLMClientABC
7
7
 
8
- T = TypeVar("T", bound=LLMClientABC)
8
+ _T = TypeVar("_T", bound=type["LLMClientABC"])
9
9
 
10
+ # Track which protocols have been loaded
11
+ _loaded_protocols: set[llm_param.LLMClientProtocol] = set()
12
+ _REGISTRY: dict[llm_param.LLMClientProtocol, type["LLMClientABC"]] = {}
10
13
 
11
- def register(name: llm_param.LLMClientProtocol) -> Callable[[type[T]], type[T]]:
12
- def _decorator(cls: type[T]) -> type[T]:
14
+
15
+ def _load_protocol(protocol: llm_param.LLMClientProtocol) -> None:
16
+ """Load the module for a specific protocol on demand."""
17
+ if protocol in _loaded_protocols:
18
+ return
19
+ _loaded_protocols.add(protocol)
20
+
21
+ # Import only the needed module to trigger @register decorator
22
+ if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
23
+ from . import anthropic as _ # noqa: F401
24
+ elif protocol == llm_param.LLMClientProtocol.CODEX:
25
+ from . import codex as _ # noqa: F401
26
+ elif protocol == llm_param.LLMClientProtocol.OPENAI:
27
+ from . import openai_compatible as _ # noqa: F401
28
+ elif protocol == llm_param.LLMClientProtocol.OPENROUTER:
29
+ from . import openrouter as _ # noqa: F401
30
+ elif protocol == llm_param.LLMClientProtocol.RESPONSES:
31
+ from . import responses as _ # noqa: F401
32
+
33
+
34
+ def register(name: llm_param.LLMClientProtocol) -> Callable[[_T], _T]:
35
+ """Decorator to register an LLM client class for a protocol."""
36
+
37
+ def _decorator(cls: _T) -> _T:
13
38
  _REGISTRY[name] = cls
14
39
  return cls
15
40
 
16
41
  return _decorator
17
42
 
18
43
 
19
- def create_llm_client(config: llm_param.LLMConfigParameter) -> LLMClientABC:
44
+ def create_llm_client(config: llm_param.LLMConfigParameter) -> "LLMClientABC":
45
+ _load_protocol(config.protocol)
20
46
  if config.protocol not in _REGISTRY:
21
47
  raise ValueError(f"Unknown LLMClient protocol: {config.protocol}")
22
48
  return _REGISTRY[config.protocol].create(config)
@@ -102,6 +102,7 @@ async def parse_responses_stream(
102
102
  reasoning_tokens=event.response.usage.output_tokens_details.reasoning_tokens,
103
103
  total_tokens=event.response.usage.total_tokens,
104
104
  context_limit=param.context_limit,
105
+ max_tokens=param.max_tokens,
105
106
  )
106
107
  metadata_tracker.set_usage(usage)
107
108
  metadata_tracker.set_model_name(str(param.model))
@@ -159,7 +160,7 @@ class ResponsesClient(LLMClientABC):
159
160
  async def call(self, param: llm_param.LLMCallParameter) -> AsyncGenerator[model.ConversationItem, None]:
160
161
  param = apply_config_defaults(param, self.get_llm_config())
161
162
 
162
- metadata_tracker = MetadataTracker(cost_config=self._config.cost)
163
+ metadata_tracker = MetadataTracker(cost_config=self.get_llm_config().cost)
163
164
 
164
165
  inputs = convert_history_to_input(param.input, param.model)
165
166
  tools = convert_tool_schema(param.tools)
@@ -34,7 +34,7 @@ def _build_tool_result_item(tool: model.ToolResultItem) -> responses.ResponseInp
34
34
  "call_id": tool.call_id,
35
35
  "output": content_parts,
36
36
  }
37
- return item # type: ignore[return-value]
37
+ return item
38
38
 
39
39
 
40
40
  def convert_history_to_input(
klaude_code/llm/usage.py CHANGED
@@ -92,10 +92,14 @@ class MetadataTracker:
92
92
  return self._metadata_item
93
93
 
94
94
 
95
- def convert_usage(usage: openai.types.CompletionUsage, context_limit: int | None = None) -> model.Usage:
95
+ def convert_usage(
96
+ usage: openai.types.CompletionUsage,
97
+ context_limit: int | None = None,
98
+ max_tokens: int | None = None,
99
+ ) -> model.Usage:
96
100
  """Convert OpenAI CompletionUsage to internal Usage model.
97
101
 
98
- context_window_size is set to total_tokens from the API response,
102
+ context_token is set to total_tokens from the API response,
99
103
  representing the actual context window usage for this turn.
100
104
  """
101
105
  return model.Usage(
@@ -104,8 +108,9 @@ def convert_usage(usage: openai.types.CompletionUsage, context_limit: int | None
104
108
  reasoning_tokens=(usage.completion_tokens_details.reasoning_tokens if usage.completion_tokens_details else 0)
105
109
  or 0,
106
110
  output_tokens=usage.completion_tokens,
107
- context_window_size=usage.total_tokens,
111
+ context_token=usage.total_tokens,
108
112
  context_limit=context_limit,
113
+ max_tokens=max_tokens,
109
114
  )
110
115
 
111
116
 
@@ -114,19 +119,21 @@ def convert_anthropic_usage(
114
119
  output_tokens: int,
115
120
  cached_tokens: int,
116
121
  context_limit: int | None = None,
122
+ max_tokens: int | None = None,
117
123
  ) -> model.Usage:
118
124
  """Convert Anthropic usage data to internal Usage model.
119
125
 
120
- context_window_size is computed from input + cached + output tokens,
126
+ context_token is computed from input + cached + output tokens,
121
127
  representing the actual context window usage for this turn.
122
128
  """
123
- context_window_size = input_tokens + cached_tokens + output_tokens
129
+ context_token = input_tokens + cached_tokens + output_tokens
124
130
  return model.Usage(
125
131
  input_tokens=input_tokens,
126
132
  output_tokens=output_tokens,
127
133
  cached_tokens=cached_tokens,
128
- context_window_size=context_window_size,
134
+ context_token=context_token,
129
135
  context_limit=context_limit,
136
+ max_tokens=max_tokens,
130
137
  )
131
138
 
132
139
 
@@ -137,10 +144,11 @@ def convert_responses_usage(
137
144
  reasoning_tokens: int,
138
145
  total_tokens: int,
139
146
  context_limit: int | None = None,
147
+ max_tokens: int | None = None,
140
148
  ) -> model.Usage:
141
149
  """Convert OpenAI Responses API usage data to internal Usage model.
142
150
 
143
- context_window_size is set to total_tokens from the API response,
151
+ context_token is set to total_tokens from the API response,
144
152
  representing the actual context window usage for this turn.
145
153
  """
146
154
  return model.Usage(
@@ -148,6 +156,7 @@ def convert_responses_usage(
148
156
  output_tokens=output_tokens,
149
157
  cached_tokens=cached_tokens,
150
158
  reasoning_tokens=reasoning_tokens,
151
- context_window_size=total_tokens,
159
+ context_token=total_tokens,
152
160
  context_limit=context_limit,
161
+ max_tokens=max_tokens,
153
162
  )
@@ -4,6 +4,7 @@ from typing import Annotated, Literal
4
4
 
5
5
  from pydantic import BaseModel, ConfigDict, Field, computed_field
6
6
 
7
+ from klaude_code import const
7
8
  from klaude_code.protocol.commands import CommandName
8
9
  from klaude_code.protocol.tools import SubAgentType
9
10
 
@@ -19,8 +20,9 @@ class Usage(BaseModel):
19
20
  output_tokens: int = 0
20
21
 
21
22
  # Context window tracking
22
- context_window_size: int | None = None # Peak total_tokens seen (for context usage display)
23
+ context_token: int | None = None # Peak total_tokens seen (for context usage display)
23
24
  context_limit: int | None = None # Model's context limit
25
+ max_tokens: int | None = None # Max output tokens for this request
24
26
 
25
27
  throughput_tps: float | None = None
26
28
  first_token_latency_ms: float | None = None
@@ -31,13 +33,13 @@ class Usage(BaseModel):
31
33
  cache_read_cost: float | None = None # Cost for cached tokens
32
34
  currency: str = "USD" # Currency for cost display (USD or CNY)
33
35
 
34
- @computed_field # type: ignore[prop-decorator]
36
+ @computed_field
35
37
  @property
36
38
  def total_tokens(self) -> int:
37
39
  """Total tokens computed from input + output tokens."""
38
40
  return self.input_tokens + self.output_tokens
39
41
 
40
- @computed_field # type: ignore[prop-decorator]
42
+ @computed_field
41
43
  @property
42
44
  def total_cost(self) -> float | None:
43
45
  """Total cost computed from input + output + cache_read costs."""
@@ -45,15 +47,18 @@ class Usage(BaseModel):
45
47
  non_none = [c for c in costs if c is not None]
46
48
  return sum(non_none) if non_none else None
47
49
 
48
- @computed_field # type: ignore[prop-decorator]
50
+ @computed_field
49
51
  @property
50
52
  def context_usage_percent(self) -> float | None:
51
- """Context usage percentage computed from context_window_size / context_limit."""
53
+ """Context usage percentage computed from context_token / (context_limit - max_tokens)."""
52
54
  if self.context_limit is None or self.context_limit <= 0:
53
55
  return None
54
- if self.context_window_size is None:
56
+ if self.context_token is None:
55
57
  return None
56
- return (self.context_window_size / self.context_limit) * 100
58
+ effective_limit = self.context_limit - (self.max_tokens or const.DEFAULT_MAX_TOKENS)
59
+ if effective_limit <= 0:
60
+ return None
61
+ return (self.context_token / effective_limit) * 100
57
62
 
58
63
 
59
64
  class TodoItem(BaseModel):
@@ -67,6 +67,7 @@ class InitAgentOperation(Operation):
67
67
 
68
68
  type: OperationType = OperationType.INIT_AGENT
69
69
  session_id: str | None = None
70
+ is_new_session: bool = False
70
71
 
71
72
  async def execute(self, handler: OperationHandler) -> None:
72
73
  await handler.handle_init_agent(self)
@@ -294,7 +294,7 @@ def _try_render_todo_args(arguments: str) -> str | None:
294
294
  return None
295
295
 
296
296
  return f'<div class="todo-list">{"".join(items_html)}</div>'
297
- except Exception:
297
+ except (json.JSONDecodeError, KeyError, TypeError):
298
298
  return None
299
299
 
300
300
 
@@ -380,7 +380,7 @@ def _get_mermaid_link_html(
380
380
  try:
381
381
  args = json.loads(tool_call.arguments)
382
382
  code = args.get("code", "")
383
- except Exception:
383
+ except (json.JSONDecodeError, TypeError):
384
384
  code = ""
385
385
  else:
386
386
  code = ""
@@ -447,7 +447,7 @@ def _format_tool_call(tool_call: model.ToolCallItem, result: model.ToolResultIte
447
447
  try:
448
448
  parsed = json.loads(tool_call.arguments)
449
449
  args_text = json.dumps(parsed, ensure_ascii=False, indent=2)
450
- except Exception:
450
+ except (json.JSONDecodeError, TypeError):
451
451
  args_text = tool_call.arguments
452
452
 
453
453
  args_html = _escape_html(args_text or "")
@@ -469,7 +469,7 @@ def _format_tool_call(tool_call: model.ToolCallItem, result: model.ToolResultIte
469
469
  parsed_args = json.loads(tool_call.arguments)
470
470
  if parsed_args.get("command") in {"create", "str_replace", "insert"}:
471
471
  force_collapse = True
472
- except Exception:
472
+ except (json.JSONDecodeError, TypeError):
473
473
  pass
474
474
 
475
475
  should_collapse = force_collapse or _should_collapse(args_html)
@@ -506,7 +506,7 @@ def _format_tool_call(tool_call: model.ToolCallItem, result: model.ToolResultIte
506
506
  new_string = args_data.get("new_string", "")
507
507
  if old_string == "" and new_string:
508
508
  diff_text = "\n".join(f"+{line}" for line in new_string.splitlines())
509
- except Exception:
509
+ except (json.JSONDecodeError, TypeError):
510
510
  pass
511
511
 
512
512
  items_to_render: list[str] = []
@@ -103,7 +103,17 @@ class Session(BaseModel):
103
103
  return self._messages_dir() / f"{prefix}-{self.id}.jsonl"
104
104
 
105
105
  @classmethod
106
- def load(cls, id: str) -> "Session":
106
+ def create(cls, id: str | None = None) -> "Session":
107
+ """Create a new session without checking for existing files."""
108
+ return Session(id=id or uuid.uuid4().hex, work_dir=Path.cwd())
109
+
110
+ @classmethod
111
+ def load(cls, id: str, *, skip_if_missing: bool = False) -> "Session":
112
+ """Load an existing session or create a new one if not found."""
113
+
114
+ if skip_if_missing:
115
+ return Session(id=id, work_dir=Path.cwd())
116
+
107
117
  # Load session metadata
108
118
  sessions_dir = cls._sessions_dir()
109
119
  session_candidates = sorted(
@@ -167,7 +177,7 @@ class Session(BaseModel):
167
177
  item = cls_type(**data)
168
178
  # pyright: ignore[reportAssignmentType]
169
179
  history.append(item) # type: ignore[arg-type]
170
- except Exception:
180
+ except (json.JSONDecodeError, KeyError, TypeError):
171
181
  # Best-effort load; skip malformed lines
172
182
  continue
173
183
  sess.conversation_history = history
@@ -242,7 +252,7 @@ class Session(BaseModel):
242
252
  if ts > latest_ts:
243
253
  latest_ts = ts
244
254
  latest_id = sid
245
- except Exception:
255
+ except (json.JSONDecodeError, KeyError, TypeError, OSError):
246
256
  continue
247
257
  return latest_id
248
258
 
@@ -395,7 +405,7 @@ class Session(BaseModel):
395
405
  text_parts.append(text)
396
406
  return " ".join(text_parts) if text_parts else None
397
407
  return None
398
- except Exception:
408
+ except (json.JSONDecodeError, KeyError, TypeError, OSError):
399
409
  return None
400
410
  return None
401
411
 
@@ -403,7 +413,7 @@ class Session(BaseModel):
403
413
  for p in sessions_dir.glob("*.json"):
404
414
  try:
405
415
  data = json.loads(p.read_text())
406
- except Exception:
416
+ except (json.JSONDecodeError, OSError):
407
417
  # Skip unreadable files
408
418
  continue
409
419
  # Filter out sub-agent sessions
@@ -68,4 +68,4 @@ class InputProviderABC(ABC):
68
68
  UserInputPayload with text and optional images.
69
69
  """
70
70
  raise NotImplementedError
71
- yield UserInputPayload(text="") # pyright: ignore[reportUnreachable]
71
+ yield UserInputPayload(text="")
@@ -40,19 +40,19 @@ class ClipboardCaptureState:
40
40
  """Capture image from clipboard, save to disk, and return a tag like [Image #N]."""
41
41
  try:
42
42
  clipboard_data = ImageGrab.grabclipboard()
43
- except Exception:
43
+ except OSError:
44
44
  return None
45
45
  if not isinstance(clipboard_data, Image.Image):
46
46
  return None
47
47
  try:
48
48
  self._images_dir.mkdir(parents=True, exist_ok=True)
49
- except Exception:
49
+ except OSError:
50
50
  return None
51
51
  filename = f"clipboard_{uuid.uuid4().hex[:8]}.png"
52
52
  path = self._images_dir / filename
53
53
  try:
54
54
  clipboard_data.save(path, "PNG")
55
- except Exception:
55
+ except OSError:
56
56
  return None
57
57
  tag = f"[Image #{self._counter}]"
58
58
  self._counter += 1
@@ -123,7 +123,7 @@ def _encode_image_file(file_path: str) -> ImageURLPart | None:
123
123
  # Clipboard images are always saved as PNG
124
124
  data_url = f"data:image/png;base64,{encoded}"
125
125
  return ImageURLPart(image_url=ImageURLPart.ImageURL(url=data_url, id=None))
126
- except Exception:
126
+ except OSError:
127
127
  return None
128
128
 
129
129
 
@@ -148,5 +148,5 @@ def copy_to_clipboard(text: str) -> None:
148
148
  input=text.encode("utf-8"),
149
149
  check=True,
150
150
  )
151
- except Exception:
151
+ except (OSError, subprocess.SubprocessError):
152
152
  pass
@@ -118,7 +118,7 @@ def _render_task_metadata_block(
118
118
  if metadata.usage is not None:
119
119
  # Context (only for main agent)
120
120
  if show_context_and_time and metadata.usage.context_usage_percent is not None:
121
- context_size = format_number(metadata.usage.context_window_size or 0)
121
+ context_size = format_number(metadata.usage.context_token or 0)
122
122
  parts3.append(
123
123
  Text.assemble(
124
124
  ("context", ThemeKey.METADATA_DIM),
@@ -38,7 +38,7 @@ def start_esc_interrupt_monitor(
38
38
  # Fallback for non-interactive or non-POSIX environments.
39
39
  if not sys.stdin.isatty() or os.name != "posix":
40
40
 
41
- async def _noop() -> None: # type: ignore[return-type]
41
+ async def _noop() -> None:
42
42
  return None
43
43
 
44
44
  return stop_event, asyncio.create_task(_noop())
@@ -85,7 +85,7 @@ def start_esc_interrupt_monitor(
85
85
  log((f"esc monitor error: {exc}", "r red"))
86
86
  finally:
87
87
  try:
88
- termios.tcsetattr(fd, termios.TCSADRAIN, old) # type: ignore[name-defined]
88
+ termios.tcsetattr(fd, termios.TCSADRAIN, old)
89
89
  except Exception:
90
90
  pass
91
91
 
klaude_code/version.py CHANGED
@@ -57,7 +57,7 @@ def _get_installed_version() -> str | None:
57
57
  ver = ver[1:]
58
58
  return ver
59
59
  return None
60
- except Exception:
60
+ except (OSError, subprocess.SubprocessError):
61
61
  return None
62
62
 
63
63
 
@@ -67,7 +67,7 @@ def _get_latest_version() -> str | None:
67
67
  with urllib.request.urlopen(PYPI_URL, timeout=5) as response:
68
68
  data = json.loads(response.read().decode())
69
69
  return data.get("info", {}).get("version")
70
- except Exception:
70
+ except (OSError, json.JSONDecodeError, ValueError):
71
71
  return None
72
72
 
73
73
 
@@ -93,7 +93,7 @@ def _compare_versions(installed: str, latest: str) -> bool:
93
93
  installed_tuple = _parse_version(installed)
94
94
  latest_tuple = _parse_version(latest)
95
95
  return latest_tuple > installed_tuple
96
- except Exception:
96
+ except ValueError:
97
97
  return False
98
98
 
99
99
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: klaude-code
3
- Version: 1.2.9
3
+ Version: 1.2.10
4
4
  Summary: Add your description here
5
5
  Requires-Dist: anthropic>=0.66.0
6
6
  Requires-Dist: openai>=1.102.0