klaude-code 1.4.2__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,255 @@
1
+ """Thinking level configuration data and helpers.
2
+
3
+ This module contains thinking level definitions and helper functions
4
+ that are shared between command layer and UI layer.
5
+ """
6
+
7
+ from dataclasses import dataclass
8
+ from typing import Literal
9
+
10
+ from klaude_code.protocol import llm_param
11
+
12
+ ReasoningEffort = Literal["high", "medium", "low", "minimal", "none", "xhigh"]
13
+
14
+ # Thinking level options for different protocols
15
+ RESPONSES_LEVELS = ["low", "medium", "high"]
16
+ RESPONSES_GPT51_LEVELS = ["none", "low", "medium", "high"]
17
+ RESPONSES_GPT52_LEVELS = ["none", "low", "medium", "high", "xhigh"]
18
+ RESPONSES_CODEX_MAX_LEVELS = ["medium", "high", "xhigh"]
19
+ RESPONSES_GEMINI_FLASH_LEVELS = ["minimal", "low", "medium", "high"]
20
+
21
+ ANTHROPIC_LEVELS: list[tuple[str, int | None]] = [
22
+ ("off", 0),
23
+ ("low (2048 tokens)", 2048),
24
+ ("medium (8192 tokens)", 8192),
25
+ ("high (31999 tokens)", 31999),
26
+ ]
27
+
28
+
29
+ def is_openrouter_model_with_reasoning_effort(model_name: str | None) -> bool:
30
+ """Check if the model is GPT series, Grok or Gemini 3."""
31
+ if not model_name:
32
+ return False
33
+ model_lower = model_name.lower()
34
+ return model_lower.startswith(("openai/gpt-", "x-ai/grok-", "google/gemini-3"))
35
+
36
+
37
+ def _is_gpt51_model(model_name: str | None) -> bool:
38
+ """Check if the model is GPT-5.1."""
39
+ if not model_name:
40
+ return False
41
+ return model_name.lower() in ["gpt-5.1", "openai/gpt-5.1", "gpt-5.1-codex-2025-11-13"]
42
+
43
+
44
+ def _is_gpt52_model(model_name: str | None) -> bool:
45
+ """Check if the model is GPT-5.2."""
46
+ if not model_name:
47
+ return False
48
+ return model_name.lower() in ["gpt-5.2", "openai/gpt-5.2"]
49
+
50
+
51
+ def _is_codex_max_model(model_name: str | None) -> bool:
52
+ """Check if the model is GPT-5.1-codex-max."""
53
+ if not model_name:
54
+ return False
55
+ return "codex-max" in model_name.lower()
56
+
57
+
58
+ def _is_gemini_flash_model(model_name: str | None) -> bool:
59
+ """Check if the model is Gemini 3 Flash."""
60
+ if not model_name:
61
+ return False
62
+ return "gemini-3-flash" in model_name.lower()
63
+
64
+
65
+ def should_auto_trigger_thinking(model_name: str | None) -> bool:
66
+ """Check if model should auto-trigger thinking selection on switch."""
67
+ if not model_name:
68
+ return False
69
+ model_lower = model_name.lower()
70
+ return "gpt-5" in model_lower or "gemini-3" in model_lower or "opus" in model_lower
71
+
72
+
73
+ def get_levels_for_responses(model_name: str | None) -> list[str]:
74
+ """Get thinking levels for responses protocol."""
75
+ if _is_codex_max_model(model_name):
76
+ return RESPONSES_CODEX_MAX_LEVELS
77
+ if _is_gpt52_model(model_name):
78
+ return RESPONSES_GPT52_LEVELS
79
+ if _is_gpt51_model(model_name):
80
+ return RESPONSES_GPT51_LEVELS
81
+ if _is_gemini_flash_model(model_name):
82
+ return RESPONSES_GEMINI_FLASH_LEVELS
83
+ return RESPONSES_LEVELS
84
+
85
+
86
+ def format_current_thinking(config: llm_param.LLMConfigParameter) -> str:
87
+ """Format the current thinking configuration for display."""
88
+ thinking = config.thinking
89
+ if not thinking:
90
+ return "not configured"
91
+
92
+ protocol = config.protocol
93
+
94
+ if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
95
+ if thinking.reasoning_effort:
96
+ return f"reasoning_effort={thinking.reasoning_effort}"
97
+ return "not set"
98
+
99
+ if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
100
+ if thinking.type == "disabled":
101
+ return "off"
102
+ if thinking.type == "enabled":
103
+ return f"enabled (budget_tokens={thinking.budget_tokens})"
104
+ return "not set"
105
+
106
+ if protocol == llm_param.LLMClientProtocol.OPENROUTER:
107
+ if is_openrouter_model_with_reasoning_effort(config.model):
108
+ if thinking.reasoning_effort:
109
+ return f"reasoning_effort={thinking.reasoning_effort}"
110
+ else:
111
+ if thinking.type == "disabled":
112
+ return "off"
113
+ if thinking.type == "enabled":
114
+ return f"enabled (budget_tokens={thinking.budget_tokens})"
115
+ return "not set"
116
+
117
+ if protocol == llm_param.LLMClientProtocol.OPENAI:
118
+ if thinking.type == "disabled":
119
+ return "off"
120
+ if thinking.type == "enabled":
121
+ return f"enabled (budget_tokens={thinking.budget_tokens})"
122
+ return "not set"
123
+
124
+ return "unknown protocol"
125
+
126
+
127
+ # ---------------------------------------------------------------------------
128
+ # Thinking picker data structures
129
+ # ---------------------------------------------------------------------------
130
+
131
+
132
+ @dataclass
133
+ class ThinkingOption:
134
+ """A thinking option for selection.
135
+
136
+ Attributes:
137
+ label: Display label for this option (e.g., "low", "medium (8192 tokens)").
138
+ value: Encoded value string (e.g., "effort:low", "budget:2048").
139
+ """
140
+
141
+ label: str
142
+ value: str
143
+
144
+
145
+ @dataclass
146
+ class ThinkingPickerData:
147
+ """Data for building thinking picker UI.
148
+
149
+ Attributes:
150
+ options: List of thinking options.
151
+ message: Prompt message (e.g., "Select reasoning effort:").
152
+ current_value: Currently selected value, or None.
153
+ """
154
+
155
+ options: list[ThinkingOption]
156
+ message: str
157
+ current_value: str | None
158
+
159
+
160
+ def _build_effort_options(levels: list[str]) -> list[ThinkingOption]:
161
+ """Build effort-based thinking options."""
162
+ return [ThinkingOption(label=level, value=f"effort:{level}") for level in levels]
163
+
164
+
165
+ def _build_budget_options() -> list[ThinkingOption]:
166
+ """Build budget-based thinking options."""
167
+ return [ThinkingOption(label=label, value=f"budget:{tokens or 0}") for label, tokens in ANTHROPIC_LEVELS]
168
+
169
+
170
+ def _get_current_effort_value(thinking: llm_param.Thinking | None) -> str | None:
171
+ """Get current value for effort-based thinking."""
172
+ if thinking and thinking.reasoning_effort:
173
+ return f"effort:{thinking.reasoning_effort}"
174
+ return None
175
+
176
+
177
+ def _get_current_budget_value(thinking: llm_param.Thinking | None) -> str | None:
178
+ """Get current value for budget-based thinking."""
179
+ if thinking:
180
+ if thinking.type == "disabled":
181
+ return "budget:0"
182
+ if thinking.budget_tokens:
183
+ return f"budget:{thinking.budget_tokens}"
184
+ return None
185
+
186
+
187
+ def get_thinking_picker_data(config: llm_param.LLMConfigParameter) -> ThinkingPickerData | None:
188
+ """Get thinking picker data based on LLM config.
189
+
190
+ Returns:
191
+ ThinkingPickerData with options and current value, or None if protocol doesn't support thinking.
192
+ """
193
+ protocol = config.protocol
194
+ model_name = config.model
195
+ thinking = config.thinking
196
+
197
+ if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
198
+ levels = get_levels_for_responses(model_name)
199
+ return ThinkingPickerData(
200
+ options=_build_effort_options(levels),
201
+ message="Select reasoning effort:",
202
+ current_value=_get_current_effort_value(thinking),
203
+ )
204
+
205
+ if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
206
+ return ThinkingPickerData(
207
+ options=_build_budget_options(),
208
+ message="Select thinking level:",
209
+ current_value=_get_current_budget_value(thinking),
210
+ )
211
+
212
+ if protocol == llm_param.LLMClientProtocol.OPENROUTER:
213
+ if is_openrouter_model_with_reasoning_effort(model_name):
214
+ levels = get_levels_for_responses(model_name)
215
+ return ThinkingPickerData(
216
+ options=_build_effort_options(levels),
217
+ message="Select reasoning effort:",
218
+ current_value=_get_current_effort_value(thinking),
219
+ )
220
+ return ThinkingPickerData(
221
+ options=_build_budget_options(),
222
+ message="Select thinking level:",
223
+ current_value=_get_current_budget_value(thinking),
224
+ )
225
+
226
+ if protocol == llm_param.LLMClientProtocol.OPENAI:
227
+ return ThinkingPickerData(
228
+ options=_build_budget_options(),
229
+ message="Select thinking level:",
230
+ current_value=_get_current_budget_value(thinking),
231
+ )
232
+
233
+ return None
234
+
235
+
236
+ def parse_thinking_value(value: str) -> llm_param.Thinking | None:
237
+ """Parse a thinking value string into a Thinking object.
238
+
239
+ Args:
240
+ value: Encoded value string (e.g., "effort:low", "budget:2048").
241
+
242
+ Returns:
243
+ Thinking object, or None if invalid format.
244
+ """
245
+ if value.startswith("effort:"):
246
+ effort = value[7:]
247
+ return llm_param.Thinking(reasoning_effort=effort) # type: ignore[arg-type]
248
+
249
+ if value.startswith("budget:"):
250
+ budget = int(value[7:])
251
+ if budget == 0:
252
+ return llm_param.Thinking(type="disabled", budget_tokens=0)
253
+ return llm_param.Thinking(type="enabled", budget_tokens=budget)
254
+
255
+ return None
@@ -13,18 +13,13 @@ from collections.abc import Callable
13
13
  from dataclasses import dataclass
14
14
  from pathlib import Path
15
15
 
16
- from klaude_code.command import dispatch_command
17
- from klaude_code.command.thinking_cmd import (
18
- format_current_thinking,
19
- select_thinking_for_protocol,
20
- should_auto_trigger_thinking,
21
- )
22
16
  from klaude_code.config import load_config
23
17
  from klaude_code.core.agent import Agent, DefaultModelProfileProvider, ModelProfileProvider
24
18
  from klaude_code.core.manager import LLMClients, SubAgentManager
25
19
  from klaude_code.core.tool import current_run_subtask_callback
26
20
  from klaude_code.llm.registry import create_llm_client
27
21
  from klaude_code.protocol import commands, events, model, op
22
+ from klaude_code.protocol.llm_param import Thinking
28
23
  from klaude_code.protocol.op_handler import OperationHandler
29
24
  from klaude_code.protocol.sub_agent import SubAgentResult
30
25
  from klaude_code.session.export import build_export_html, get_default_export_path
@@ -181,7 +176,11 @@ class ExecutorContext:
181
176
  await self._ensure_agent(operation.session_id)
182
177
 
183
178
  async def handle_user_input(self, operation: op.UserInputOperation) -> None:
184
- """Handle a user input operation by dispatching it into operations."""
179
+ """Handle a user input operation.
180
+
181
+ Core should not parse slash commands. The UI/CLI layer is responsible for
182
+ turning raw user input into one or more operations.
183
+ """
185
184
 
186
185
  if operation.session_id is None:
187
186
  raise ValueError("session_id cannot be None")
@@ -190,33 +189,18 @@ class ExecutorContext:
190
189
  agent = await self._ensure_agent(session_id)
191
190
  user_input = operation.input
192
191
 
193
- # Emit the original user input to UI (even if the persisted text differs).
194
192
  await self.emit_event(
195
193
  events.UserMessageEvent(content=user_input.text, session_id=session_id, images=user_input.images)
196
194
  )
195
+ agent.session.append_history([model.UserMessageItem(content=user_input.text, images=user_input.images)])
197
196
 
198
- result = await dispatch_command(user_input, agent, submission_id=operation.id)
199
- ops: list[op.Operation] = list(result.operations or [])
200
-
201
- run_ops = [candidate for candidate in ops if isinstance(candidate, op.RunAgentOperation)]
202
- if len(run_ops) > 1:
203
- raise ValueError("Multiple RunAgentOperation results are not supported")
204
-
205
- persisted_user_input = run_ops[0].input if run_ops else user_input
206
-
207
- if result.persist_user_input:
208
- agent.session.append_history(
209
- [model.UserMessageItem(content=persisted_user_input.text, images=persisted_user_input.images)]
197
+ await self.handle_run_agent(
198
+ op.RunAgentOperation(
199
+ id=operation.id,
200
+ session_id=session_id,
201
+ input=user_input,
210
202
  )
211
-
212
- if result.events:
213
- for evt in result.events:
214
- if result.persist_events and isinstance(evt, events.DeveloperMessageEvent):
215
- agent.session.append_history([evt.item])
216
- await self.emit_event(evt)
217
-
218
- for operation_item in ops:
219
- await operation_item.execute(handler=self)
203
+ )
220
204
 
221
205
  async def handle_run_agent(self, operation: op.RunAgentOperation) -> None:
222
206
  agent = await self._ensure_agent(operation.session_id)
@@ -243,56 +227,62 @@ class ExecutorContext:
243
227
  config.main_model = operation.model_name
244
228
  await config.save()
245
229
 
246
- default_note = " (saved as default)" if operation.save_as_default else ""
247
- developer_item = model.DeveloperMessageItem(
248
- content=f"Switched to: {llm_config.model}{default_note}",
249
- command_output=model.CommandOutput(command_name=commands.CommandName.MODEL),
250
- )
251
- agent.session.append_history([developer_item])
252
-
253
- await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
230
+ if operation.emit_switch_message:
231
+ default_note = " (saved as default)" if operation.save_as_default else ""
232
+ developer_item = model.DeveloperMessageItem(
233
+ content=f"Switched to: {llm_config.model}{default_note}",
234
+ command_output=model.CommandOutput(command_name=commands.CommandName.MODEL),
235
+ )
236
+ agent.session.append_history([developer_item])
237
+ await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
254
238
 
255
239
  if self._on_model_change is not None:
256
240
  self._on_model_change(llm_client.model_name)
257
241
 
258
- if should_auto_trigger_thinking(llm_config.model):
259
- thinking_op = op.ChangeThinkingOperation(session_id=operation.session_id)
260
- await thinking_op.execute(handler=self)
261
- # WelcomeEvent is already handled by the thinking change
262
- else:
242
+ if operation.emit_welcome_event:
263
243
  await self.emit_event(events.WelcomeEvent(llm_config=llm_config, work_dir=str(agent.session.work_dir)))
264
244
 
265
245
  async def handle_change_thinking(self, operation: op.ChangeThinkingOperation) -> None:
266
- """Handle a change thinking operation by prompting user to select thinking level."""
246
+ """Handle a change thinking operation.
247
+
248
+ Interactive thinking selection must happen in the UI/CLI layer. Core only
249
+ applies a concrete thinking configuration.
250
+ """
267
251
  agent = await self._ensure_agent(operation.session_id)
268
- if not agent.profile:
269
- return
270
252
 
271
253
  config = agent.profile.llm_client.get_llm_config()
272
- current = format_current_thinking(config)
273
-
274
- new_thinking = await select_thinking_for_protocol(config)
275
254
 
276
- if new_thinking is None:
255
+ def _format_thinking_for_display(thinking: Thinking | None) -> str:
256
+ if thinking is None:
257
+ return "not configured"
258
+ if thinking.reasoning_effort:
259
+ return f"reasoning_effort={thinking.reasoning_effort}"
260
+ if thinking.type == "disabled":
261
+ return "off"
262
+ if thinking.type == "enabled":
263
+ if thinking.budget_tokens is None:
264
+ return "enabled"
265
+ return f"enabled (budget_tokens={thinking.budget_tokens})"
266
+ return "not set"
267
+
268
+ if operation.thinking is None:
269
+ raise ValueError("thinking must be provided; interactive selection belongs to UI")
270
+
271
+ current = _format_thinking_for_display(config.thinking)
272
+ config.thinking = operation.thinking
273
+ agent.session.model_thinking = operation.thinking
274
+ new_status = _format_thinking_for_display(config.thinking)
275
+
276
+ if operation.emit_switch_message:
277
277
  developer_item = model.DeveloperMessageItem(
278
- content="(thinking unchanged)",
278
+ content=f"Thinking changed: {current} -> {new_status}",
279
279
  command_output=model.CommandOutput(command_name=commands.CommandName.THINKING),
280
280
  )
281
+ agent.session.append_history([developer_item])
281
282
  await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
282
- return
283
-
284
- config.thinking = new_thinking
285
- agent.session.model_thinking = new_thinking
286
- new_status = format_current_thinking(config)
287
-
288
- developer_item = model.DeveloperMessageItem(
289
- content=f"Thinking changed: {current} -> {new_status}",
290
- command_output=model.CommandOutput(command_name=commands.CommandName.THINKING),
291
- )
292
- agent.session.append_history([developer_item])
293
283
 
294
- await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
295
- await self.emit_event(events.WelcomeEvent(work_dir=str(agent.session.work_dir), llm_config=config))
284
+ if operation.emit_welcome_event:
285
+ await self.emit_event(events.WelcomeEvent(work_dir=str(agent.session.work_dir), llm_config=config))
296
286
 
297
287
  async def handle_clear_session(self, operation: op.ClearSessionOperation) -> None:
298
288
  agent = await self._ensure_agent(operation.session_id)
@@ -1,6 +1,17 @@
1
+ from dataclasses import dataclass
1
2
  from enum import Enum
2
3
 
3
4
 
5
+ @dataclass(frozen=True, slots=True)
6
+ class CommandInfo:
7
+ """Lightweight command metadata for UI purposes (no logic)."""
8
+
9
+ name: str
10
+ summary: str
11
+ support_addition_params: bool = False
12
+ placeholder: str = ""
13
+
14
+
4
15
  class CommandName(str, Enum):
5
16
  INIT = "init"
6
17
  DEBUG = "debug"
@@ -13,6 +13,7 @@ from uuid import uuid4
13
13
 
14
14
  from pydantic import BaseModel, Field
15
15
 
16
+ from klaude_code.protocol.llm_param import Thinking
16
17
  from klaude_code.protocol.model import UserInputPayload
17
18
 
18
19
  if TYPE_CHECKING:
@@ -75,6 +76,17 @@ class ChangeModelOperation(Operation):
75
76
  session_id: str
76
77
  model_name: str
77
78
  save_as_default: bool = False
79
+ # When True, the executor must not auto-trigger an interactive thinking selector.
80
+ # This is required for in-prompt model switching where the terminal is already
81
+ # controlled by a prompt_toolkit PromptSession.
82
+ defer_thinking_selection: bool = False
83
+ # When False, do not emit WelcomeEvent (which renders a banner/panel).
84
+ # This is useful for in-prompt model switching where extra output is noisy.
85
+ emit_welcome_event: bool = True
86
+
87
+ # When False, do not emit the "Switched to: ..." developer message.
88
+ # This is useful for in-prompt model switching where extra output is noisy.
89
+ emit_switch_message: bool = True
78
90
 
79
91
  async def execute(self, handler: OperationHandler) -> None:
80
92
  await handler.handle_change_model(self)
@@ -85,6 +97,9 @@ class ChangeThinkingOperation(Operation):
85
97
 
86
98
  type: OperationType = OperationType.CHANGE_THINKING
87
99
  session_id: str
100
+ thinking: Thinking | None = None
101
+ emit_welcome_event: bool = True
102
+ emit_switch_message: bool = True
88
103
 
89
104
  async def execute(self, handler: OperationHandler) -> None:
90
105
  await handler.handle_change_thinking(self)
@@ -1,4 +1,4 @@
1
- from .selector import resume_select_session
1
+ from .selector import SessionSelectOption, build_session_select_options
2
2
  from .session import Session
3
3
 
4
- __all__ = ["Session", "resume_select_session"]
4
+ __all__ = ["Session", "SessionSelectOption", "build_session_select_options"]
@@ -1,7 +1,5 @@
1
1
  import time
2
-
3
- from klaude_code.trace import log, log_debug
4
- from klaude_code.ui.terminal.selector import SelectItem, select_one
2
+ from dataclasses import dataclass
5
3
 
6
4
  from .session import Session
7
5
 
@@ -30,69 +28,43 @@ def _relative_time(ts: float) -> str:
30
28
  return f"{months} month{'s' if months != 1 else ''} ago"
31
29
 
32
30
 
33
- def resume_select_session() -> str | None:
31
+ @dataclass(frozen=True, slots=True)
32
+ class SessionSelectOption:
33
+ """Option data for session selection UI."""
34
+
35
+ session_id: str
36
+ first_user_message: str
37
+ messages_count: str
38
+ relative_time: str
39
+ model_name: str
40
+
41
+
42
+ def build_session_select_options() -> list[SessionSelectOption]:
43
+ """Build session selection options data.
44
+
45
+ Returns:
46
+ List of SessionSelectOption, or empty list if no sessions exist.
47
+ """
34
48
  sessions = Session.list_sessions()
35
49
  if not sessions:
36
- log("No sessions found for this project.")
37
- return None
38
-
39
- try:
40
- from prompt_toolkit.styles import Style
50
+ return []
41
51
 
42
- items: list[SelectItem[str]] = []
43
- for s in sessions:
44
- first_msg = s.first_user_message or "N/A"
45
- first_msg = first_msg.strip().replace("\n", " ")
52
+ options: list[SessionSelectOption] = []
53
+ for s in sessions:
54
+ first_msg = s.first_user_message or "N/A"
55
+ first_msg = first_msg.strip().replace("\n", " ")
46
56
 
47
- msg_count = "N/A" if s.messages_count == -1 else f"{s.messages_count} messages"
48
- model = s.model_name or "N/A"
57
+ msg_count = "N/A" if s.messages_count == -1 else f"{s.messages_count} messages"
58
+ model = s.model_name or "N/A"
49
59
 
50
- title = [
51
- ("class:msg", f"{first_msg}\n"),
52
- ("class:meta", f" {msg_count} · {_relative_time(s.updated_at)} · {model} · {s.id}\n\n"),
53
- ]
54
- items.append(
55
- SelectItem(
56
- title=title,
57
- value=str(s.id),
58
- search_text=f"{first_msg} {model} {s.id}",
59
- )
60
+ options.append(
61
+ SessionSelectOption(
62
+ session_id=str(s.id),
63
+ first_user_message=first_msg,
64
+ messages_count=msg_count,
65
+ relative_time=_relative_time(s.updated_at),
66
+ model_name=model,
60
67
  )
61
-
62
- return select_one(
63
- message="Select a session to resume:",
64
- items=items,
65
- pointer="→",
66
- style=Style(
67
- [
68
- ("msg", ""),
69
- ("meta", "fg:ansibrightblack"),
70
- ("pointer", "bold fg:ansigreen"),
71
- ("highlighted", "fg:ansigreen"),
72
- ("search_prefix", "fg:ansibrightblack"),
73
- ("search_success", "noinherit fg:ansigreen"),
74
- ("search_none", "noinherit fg:ansired"),
75
- ("question", "bold"),
76
- ("text", ""),
77
- ]
78
- ),
79
68
  )
80
- except Exception as e:
81
- log_debug(f"Failed to use prompt_toolkit for session select, {e}")
82
69
 
83
- for i, s in enumerate(sessions, 1):
84
- first_msg = (s.first_user_message or "N/A").strip().replace("\n", " ")
85
- if len(first_msg) > 60:
86
- first_msg = first_msg[:59] + "…"
87
- msg_count = "N/A" if s.messages_count == -1 else f"{s.messages_count} msgs"
88
- model = s.model_name or "N/A"
89
- print(f"{i}. {first_msg}")
90
- print(f" {_relative_time(s.updated_at)} · {msg_count} · {model}")
91
- try:
92
- raw = input("Select a session number: ").strip()
93
- idx = int(raw)
94
- if 1 <= idx <= len(sessions):
95
- return str(sessions[idx - 1].id)
96
- except (ValueError, EOFError):
97
- return None
98
- return None
70
+ return options