yaicli 0.4.0__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
yaicli/printer.py CHANGED
@@ -1,176 +1,131 @@
1
1
  import time
2
- import traceback
3
- from typing import (
4
- Any,
5
- Dict,
6
- Iterator,
7
- List,
8
- Optional,
9
- Tuple,
10
- )
11
-
12
- from rich.console import Console, Group, RenderableType
13
- from rich.live import Live
2
+ from dataclasses import dataclass, field
3
+ from typing import TYPE_CHECKING, Iterator, List, Tuple, Union
14
4
 
15
- from yaicli.console import get_console
16
- from yaicli.const import EventTypeEnum
17
- from yaicli.render import JustifyMarkdown as Markdown
18
- from yaicli.render import plain_formatter
5
+ from rich.console import Group, RenderableType
6
+ from rich.live import Live
19
7
 
8
+ from .client import RefreshLive
9
+ from .config import Config, get_config
10
+ from .console import YaiConsole, get_console
11
+ from .schemas import ChatMessage
12
+ from .render import Markdown, plain_formatter
20
13
 
21
- def cursor_animation() -> Iterator[str]:
22
- """Generate a cursor animation for the console."""
23
- cursors = ["_", " "]
24
- while True:
25
- # Use current time to determine cursor state (changes every 0.5 seconds)
26
- current_time = time.time()
27
- # Alternate between cursors based on time
28
- yield cursors[int(current_time * 2) % 2]
14
+ if TYPE_CHECKING:
15
+ from .schemas import LLMResponse
29
16
 
30
17
 
18
+ @dataclass
31
19
  class Printer:
32
- """Handles printing responses to the console, including stream processing."""
33
-
34
- _REASONING_PREFIX = "> "
35
- _CURSOR_ANIMATION_SLEEP = 0.005
36
-
37
- def __init__(
38
- self,
39
- config: Dict[str, Any],
40
- console: Console,
41
- verbose: bool = False,
42
- markdown: bool = True,
43
- reasoning_markdown: Optional[bool] = None,
44
- content_markdown: Optional[bool] = None,
45
- ):
46
- """Initialize the Printer class.
47
-
48
- Args:
49
- config (Dict[str, Any]): The configuration dictionary.
50
- console (Console): The console object.
51
- verbose (bool): Whether to print verbose output.
52
- markdown (bool): Whether to use Markdown formatting for all output (legacy).
53
- reasoning_markdown (Optional[bool]): Whether to use Markdown for reasoning sections.
54
- content_markdown (Optional[bool]): Whether to use Markdown for content sections.
55
- """
56
- self.config = config
57
- self.console = console or get_console()
58
- self.verbose = verbose
59
- self.code_theme = config["CODE_THEME"]
60
- self.in_reasoning: bool = False
61
- # Print reasoning content or not
62
- self.show_reasoning = config["SHOW_REASONING"]
63
-
64
- # Use explicit settings if provided, otherwise fall back to the global markdown setting
65
- self.reasoning_markdown = reasoning_markdown if reasoning_markdown is not None else markdown
66
- self.content_markdown = content_markdown if content_markdown is not None else markdown
67
-
68
- # Set formatters for reasoning and content
69
- self.reasoning_formatter = Markdown if self.reasoning_markdown else plain_formatter
20
+ console: YaiConsole = field(default_factory=get_console)
21
+ config: Config = field(default_factory=get_config)
22
+ content_markdown: bool = True
23
+
24
+ _REASONING_PREFIX: str = "> "
25
+ _UPDATE_INTERVAL: float = 0.01
26
+
27
+ def __post_init__(self):
28
+ self.code_theme: str = self.config["CODE_THEME"]
29
+ self.show_reasoning: bool = self.config["SHOW_REASONING"]
30
+ # Set formatter for reasoning and content
31
+ self.reasoning_formatter = Markdown
70
32
  self.content_formatter = Markdown if self.content_markdown else plain_formatter
33
+ # Track if we're currently processing reasoning content
34
+ self.in_reasoning: bool = False
71
35
 
72
36
  def _reset_state(self) -> None:
73
- """Resets the printer state for a new stream."""
37
+ """Reset printer state for a new stream."""
74
38
  self.in_reasoning = False
75
39
 
76
- def _process_reasoning_chunk(self, chunk: str, content: str, reasoning: str) -> Tuple[str, str]:
77
- """Adds a reasoning chunk to the reasoning text.
78
- This method handles the processing of reasoning chunks, and update the reasoning state
79
- when <think> tag is closed.
40
+ def _check_and_update_think_tags(self, content: str, reasoning: str) -> Tuple[str, str]:
41
+ """Check for <think> tags in the accumulated content and reasoning.
80
42
 
81
- Args:
82
- chunk (str): The reasoning chunk to process.
83
- content (str): The current content text.
84
- reasoning (str): The current reasoning text.
85
-
86
- Returns:
87
- Tuple[str, str]: The updated content text and reasoning text.
88
- """
89
- if not self.in_reasoning:
90
- self.in_reasoning = True
91
- reasoning = ""
92
-
93
- tmp = chunk.replace("\n", f"\n{self._REASONING_PREFIX}")
94
- tmp_reasoning = reasoning + tmp
95
-
96
- reasoning += chunk
97
- if "</think>" in tmp_reasoning:
98
- self.in_reasoning = False
99
- reasoning, content = reasoning.split("</think>", maxsplit=1)
100
- return content, reasoning
101
-
102
- def _process_content_chunk(self, chunk: str, content: str, reasoning: str) -> Tuple[str, str]:
103
- """Adds a content chunk to the content text.
104
- This method handles the processing of content chunks, and update the reasoning state
105
- when <think> tag is opened.
43
+ This function checks the entire accumulated text for <think> tags
44
+ and updates state accordingly.
106
45
 
107
46
  Args:
108
- chunk (str): The content chunk to process.
109
- content (str): The current content text.
110
- reasoning (str): The current reasoning text.
47
+ content: Current accumulated content text
48
+ reasoning: Current accumulated reasoning text
111
49
 
112
50
  Returns:
113
- Tuple[str, str]: The updated content text and reasoning text.
51
+ Updated content and reasoning after tag processing
114
52
  """
115
- if content == "":
116
- chunk = chunk.lstrip() # Remove leading whitespace from first chunk
53
+ # First, check if we have a <think> opener in content
54
+ if "<think>" in content and not self.in_reasoning:
55
+ parts = content.split("<think>", 1)
56
+ new_content = parts[0]
57
+ new_reasoning = parts[1]
58
+ self.in_reasoning = True
117
59
 
118
- if self.in_reasoning:
60
+ # Check if the new reasoning has a </think> closer
61
+ if "</think>" in new_reasoning:
62
+ closer_parts = new_reasoning.split("</think>", 1)
63
+ reasoning += closer_parts[0]
64
+ new_content += closer_parts[1]
65
+ self.in_reasoning = False
66
+ return new_content, reasoning
67
+ else:
68
+ # No closer yet
69
+ reasoning += new_reasoning
70
+ return new_content, reasoning
71
+
72
+ # Check if we have a </think> closer in reasoning
73
+ if "</think>" in reasoning and self.in_reasoning:
74
+ parts = reasoning.split("</think>", 1)
75
+ new_reasoning = parts[0]
76
+ content += parts[1]
119
77
  self.in_reasoning = False
120
- content += chunk
121
-
122
- if content.startswith("<think>"):
123
- # Remove <think> tag and leading whitespace
124
- self.in_reasoning = True
125
- reasoning = content[7:].lstrip()
126
- content = "" # Content starts after the initial <think> tag
78
+ return content, new_reasoning
127
79
 
128
80
  return content, reasoning
129
81
 
130
- def _handle_event(self, event: Dict[str, Any], content: str, reasoning: str) -> Tuple[str, str]:
131
- """Process a single stream event and return the updated content and reasoning.
82
+ def _process_chunk(self, chunk_content: str, chunk_reasoning: str, content: str, reasoning: str) -> Tuple[str, str]:
83
+ """Process a single chunk and update content and reasoning.
132
84
 
133
85
  Args:
134
- event (Dict[str, Any]): The stream event to process.
135
- content (str): The current content text (non-reasoning).
136
- reasoning (str): The current reasoning text.
86
+ chunk_content: Content from the current chunk
87
+ chunk_reasoning: Reasoning from the current chunk
88
+ content: Current accumulated content
89
+ reasoning: Current accumulated reasoning
90
+
137
91
  Returns:
138
- Tuple[str, str]: The updated content text and reasoning text.
92
+ Updated content and reasoning
139
93
  """
140
- event_type = event.get("type")
141
- chunk = event.get("chunk")
142
-
143
- if event_type == EventTypeEnum.ERROR and self.verbose:
144
- self.console.print(f"Stream error: {event.get('message')}", style="dim")
145
- return content, reasoning
146
-
147
- # Handle explicit reasoning end event
148
- if event_type == EventTypeEnum.REASONING_END:
94
+ # Process reasoning field first (if present)
95
+ if chunk_reasoning:
149
96
  if self.in_reasoning:
150
- self.in_reasoning = False
151
- return content, reasoning
97
+ # Already in reasoning mode, append to reasoning
98
+ reasoning += chunk_reasoning
99
+ else:
100
+ # Force reasoning mode for explicit reasoning field
101
+ self.in_reasoning = True
102
+ reasoning += chunk_reasoning
152
103
 
153
- if event_type in (EventTypeEnum.REASONING, EventTypeEnum.CONTENT) and chunk:
154
- if event_type == EventTypeEnum.REASONING or self.in_reasoning:
155
- return self._process_reasoning_chunk(str(chunk), content, reasoning)
156
- return self._process_content_chunk(str(chunk), content, reasoning)
104
+ # Then process content field (if present)
105
+ if chunk_content:
106
+ if self.in_reasoning:
107
+ # In reasoning mode, append to reasoning
108
+ reasoning += chunk_content
109
+ else:
110
+ # Normal content mode
111
+ content += chunk_content
157
112
 
158
- return content, reasoning
113
+ # Check for any <think> tags in the updated content/reasoning
114
+ return self._check_and_update_think_tags(content, reasoning)
159
115
 
160
116
  def _format_display_text(self, content: str, reasoning: str) -> RenderableType:
161
117
  """Format the text for display, combining content and reasoning if needed.
162
118
 
163
119
  Args:
164
- content (str): The content text.
165
- reasoning (str): The reasoning text.
120
+ content: The content text.
121
+ reasoning: The reasoning text.
166
122
 
167
123
  Returns:
168
- RenderableType: The formatted text ready for display as a Rich renderable.
124
+ The formatted text ready for display as a Rich renderable.
169
125
  """
170
126
  # Create list of display elements to avoid type issues with concatenation
171
127
  display_elements: List[RenderableType] = []
172
128
 
173
- reasoning = reasoning.strip()
174
129
  # Format reasoning with proper formatting if it exists
175
130
  if reasoning and self.show_reasoning:
176
131
  raw_reasoning = reasoning.replace("\n", f"\n{self._REASONING_PREFIX}")
@@ -182,7 +137,6 @@ class Printer:
182
137
  formatted_reasoning = self.reasoning_formatter(reasoning_header + raw_reasoning, code_theme=self.code_theme)
183
138
  display_elements.append(formatted_reasoning)
184
139
 
185
- content = content.strip()
186
140
  # Format content if it exists
187
141
  if content:
188
142
  formatted_content = self.content_formatter(content, code_theme=self.code_theme)
@@ -199,102 +153,68 @@ class Printer:
199
153
  # Use Rich Group to combine multiple renderables
200
154
  return Group(*display_elements)
201
155
 
202
- def _update_live_display(self, live: Live, content: str, reasoning: str, cursor: Iterator[str]) -> None:
203
- """Update live display content and execute cursor animation
204
- Sleep for a short duration to control the cursor animation speed.
205
-
206
- Args:
207
- live (Live): The live display object.
208
- content (str): The current content text.
209
- reasoning (str): The current reasoning text.
210
- cursor (Iterator[str]): The cursor animation iterator.
211
- """
156
+ def display_normal(
157
+ self, content_iterator: Iterator[Union["LLMResponse", RefreshLive]], messages: list["ChatMessage"]
158
+ ) -> tuple[str, str]:
159
+ """Process and display non-stream LLMContent, including reasoning and content parts."""
160
+ self._reset_state()
161
+ full_content = full_reasoning = ""
212
162
 
213
- cursor_char = next(cursor)
163
+ for chunk in content_iterator:
164
+ if not isinstance(chunk, LLMResponse):
165
+ continue
214
166
 
215
- # Handle cursor placement based on current state
216
- if self.in_reasoning and self.show_reasoning:
217
- # For reasoning, add cursor in plaintext to reasoning section
218
- if reasoning:
219
- if reasoning.endswith("\n"):
220
- cursor_line = f"\n{self._REASONING_PREFIX}{cursor_char}"
221
- else:
222
- cursor_line = cursor_char
167
+ # Process chunk and update content/reasoning
168
+ full_content, full_reasoning = self._process_chunk(
169
+ chunk.content or "", chunk.reasoning or "", full_content, full_reasoning
170
+ )
223
171
 
224
- # Re-format with cursor added
225
- raw_reasoning = reasoning + cursor_line.replace(self._REASONING_PREFIX, "")
226
- formatted_display = self._format_display_text(content, raw_reasoning)
227
- else:
228
- # If reasoning just started with no content yet
229
- reasoning_header = f"\nThinking:\n{self._REASONING_PREFIX}{cursor_char}"
230
- formatted_reasoning = self.reasoning_formatter(reasoning_header, code_theme=self.code_theme)
231
- formatted_display = Group(formatted_reasoning)
232
- else:
233
- # For content, add cursor to content section
234
- formatted_content_with_cursor = content + cursor_char
235
- formatted_display = self._format_display_text(formatted_content_with_cursor, reasoning)
236
-
237
- live.update(formatted_display)
238
- time.sleep(self._CURSOR_ANIMATION_SLEEP)
172
+ # Display reasoning
173
+ if self.show_reasoning and full_reasoning:
174
+ reasoning = full_reasoning.replace("\n", f"\n{self._REASONING_PREFIX}")
175
+ self.console.print("Thinking:")
176
+ self.console.print(self.reasoning_formatter(reasoning))
239
177
 
240
- def display_stream(
241
- self, stream_iterator: Iterator[Dict[str, Any]], with_assistant_prefix: bool = True
242
- ) -> Tuple[Optional[str], Optional[str]]:
243
- """Display streaming response content
244
- Handle stream events and update the live display accordingly.
245
- This method separates content and reasoning blocks for display and further processing.
178
+ # Display content
179
+ if full_content:
180
+ self.console.print()
181
+ self.console.print(self.content_formatter(full_content))
246
182
 
247
- Args:
248
- stream_iterator (Iterator[Dict[str, Any]]): The stream iterator to process.
249
- with_assistant_prefix (bool): Whether to display the "Assistant:" prefix.
250
- Returns:
251
- Tuple[Optional[str], Optional[str]]: The final content and reasoning texts if successful, None otherwise.
252
- """
253
- if with_assistant_prefix:
254
- self.console.print("Assistant:", style="bold green")
255
- self._reset_state() # Reset state for the new stream
256
- content = ""
257
- reasoning = ""
258
- cursor = cursor_animation()
259
-
260
- with Live(console=self.console) as live:
261
- try:
262
- for event in stream_iterator:
263
- content, reasoning = self._handle_event(event, content, reasoning)
264
-
265
- if event.get("type") in (
266
- EventTypeEnum.CONTENT,
267
- EventTypeEnum.REASONING,
268
- EventTypeEnum.REASONING_END,
269
- ):
270
- self._update_live_display(live, content, reasoning, cursor)
271
-
272
- # Remove cursor and finalize display
273
- live.update(self._format_display_text(content, reasoning))
274
- return content, reasoning
275
-
276
- except Exception as e:
277
- self.console.print(f"An error occurred during stream display: {e}", style="red")
278
- if self.verbose:
279
- traceback.print_exc()
280
- return None, None
183
+ messages.append(ChatMessage(role="assistant", content=full_content))
281
184
 
282
- def display_normal(
283
- self, content: Optional[str], reasoning: Optional[str] = None, with_assistant_prefix: bool = True
284
- ) -> None:
285
- """Display a complete, non-streamed response.
185
+ return full_content, full_reasoning
286
186
 
287
- Args:
288
- content (Optional[str]): The main content to display.
289
- reasoning (Optional[str]): The reasoning content to display.
290
- with_assistant_prefix (bool): Whether to display the "Assistant:" prefix.
291
- """
292
- if with_assistant_prefix:
293
- self.console.print("Assistant:", style="bold green")
294
- if content or reasoning:
295
- # Use the existing _format_display_text method
296
- formatted_display = self._format_display_text(content or "", reasoning or "")
297
- self.console.print(formatted_display)
298
- self.console.print() # Add a newline for spacing
299
- else:
300
- self.console.print("Assistant did not provide any content.", style="yellow")
187
+ def display_stream(
188
+ self, stream_iterator: Iterator[Union["LLMResponse", RefreshLive]], messages: list["ChatMessage"]
189
+ ) -> tuple[str, str]:
190
+ """Process and display LLMContent stream, including reasoning and content parts."""
191
+ self._reset_state()
192
+ full_content = full_reasoning = ""
193
+ live = Live(console=self.console)
194
+ live.start()
195
+
196
+ for chunk in stream_iterator:
197
+ if isinstance(chunk, RefreshLive):
198
+ # Refresh live display when in next completion
199
+ live.stop()
200
+ messages.append(ChatMessage(role="assistant", content=full_content))
201
+ live = Live(console=self.console)
202
+ live.start()
203
+ # Initialize full_content and full_reasoning for the next completion
204
+ full_content = full_reasoning = ""
205
+ self._reset_state()
206
+ continue
207
+
208
+ # Process chunk and update content/reasoning
209
+ full_content, full_reasoning = self._process_chunk(
210
+ chunk.content or "", chunk.reasoning or "", full_content, full_reasoning
211
+ )
212
+
213
+ # Update display
214
+ formatted_display = self._format_display_text(full_content, full_reasoning)
215
+ live.update(formatted_display)
216
+ time.sleep(self._UPDATE_INTERVAL)
217
+
218
+ live.stop()
219
+ messages.append(ChatMessage(role="assistant", content=full_content))
220
+ return full_content, full_reasoning
yaicli/render.py CHANGED
@@ -2,7 +2,7 @@ from typing import Any
2
2
 
3
3
  from rich.markdown import Markdown
4
4
 
5
- from yaicli.config import cfg
5
+ from .config import cfg
6
6
 
7
7
 
8
8
  class JustifyMarkdown(Markdown):
yaicli/role.py ADDED
@@ -0,0 +1,231 @@
1
+ import json
2
+ from dataclasses import asdict, dataclass, field
3
+ from pathlib import Path
4
+ from typing import Any, Dict, TypeVar
5
+
6
+ import typer
7
+ from rich.table import Table
8
+
9
+ from .config import cfg
10
+ from .console import YaiConsole, get_console
11
+ from .const import DEFAULT_ROLES, ROLES_DIR, DefaultRoleNames
12
+ from .utils import detect_os, detect_shell, option_callback
13
+
14
+ T = TypeVar("T")
15
+
16
+
17
+ @dataclass
18
+ class Role:
19
+ name: str
20
+ prompt: str
21
+ variables: Dict[str, Any] = field(default_factory=dict)
22
+
23
+ def __post_init__(self) -> None:
24
+ if not self.name or not isinstance(self.name, str):
25
+ raise ValueError("Role must have a non-empty name")
26
+
27
+ if not self.prompt or not isinstance(self.prompt, str):
28
+ raise ValueError("Role must have a non-empty description")
29
+
30
+ if not self.variables:
31
+ self.variables = {"_os": detect_os(cfg), "_shell": detect_shell(cfg)}
32
+ self.prompt = self.prompt.format(**self.variables)
33
+
34
+ def to_dict(self) -> Dict[str, Any]:
35
+ return asdict(self)
36
+
37
+
38
+ @dataclass
39
+ class RoleManager:
40
+ roles: Dict[str, Role] = field(default_factory=dict)
41
+ roles_dir: Path = ROLES_DIR
42
+ console: YaiConsole = get_console()
43
+
44
+ def __post_init__(self) -> None:
45
+ self._ensure_roles_dir()
46
+ self._load_default_roles()
47
+ self._load_user_roles()
48
+
49
+ def _ensure_roles_dir(self) -> None:
50
+ """Ensure the roles directory exists, and create default roles if they don't exist"""
51
+ self.roles_dir.mkdir(parents=True, exist_ok=True)
52
+ for role in DEFAULT_ROLES.values():
53
+ if not (self.roles_dir / f"{role['name']}.json").exists():
54
+ with open(self.roles_dir / f"{role['name']}.json", "w") as f:
55
+ json.dump(role, f, indent=2)
56
+
57
+ def _load_default_roles(self) -> None:
58
+ """Load default roles"""
59
+ for name, role_dict in DEFAULT_ROLES.items():
60
+ self.roles[name] = Role(**role_dict)
61
+
62
+ def _load_user_roles(self) -> None:
63
+ """Load user-defined roles, user can overwrite default roles"""
64
+ if not self.roles_dir.exists():
65
+ return
66
+
67
+ for filename in self.roles_dir.glob("*.json"):
68
+ try:
69
+ with open(filename, "r") as f:
70
+ role_dict = json.load(f)
71
+ role = Role(**role_dict)
72
+ self.roles[role.name] = role
73
+ except (json.JSONDecodeError, KeyError, TypeError) as e:
74
+ self.console.print(f"Error loading role from {filename}: {e}", style="red")
75
+
76
+ def get_role(self, name: str) -> Role:
77
+ """Get a role by name"""
78
+ if name not in self.roles:
79
+ raise ValueError(f"Role '{name}' does not exist.")
80
+ return self.roles[name]
81
+
82
+ def create_role(self, name: str, description: str) -> Role:
83
+ """Create and save a new role"""
84
+ role = Role(name=name, prompt=description)
85
+ self.roles[name] = role
86
+
87
+ # Save to file
88
+ role_path = self.roles_dir / f"{name}.json"
89
+ with open(role_path, "w") as f:
90
+ json.dump(role.to_dict(), f, indent=2)
91
+
92
+ return role
93
+
94
+ def delete_role(self, name: str) -> bool:
95
+ """Delete a role"""
96
+
97
+ # Delete role file
98
+ role_path = self.roles_dir / f"{name}.json"
99
+ if role_path.exists():
100
+ role_path.unlink()
101
+
102
+ # Delete role from memory
103
+ if name in self.roles:
104
+ del self.roles[name]
105
+ return True
106
+
107
+ return False
108
+
109
+ def list_roles(self) -> list:
110
+ """List all available roles info"""
111
+ roles_list = []
112
+ for role_id, role in sorted(self.roles.items()):
113
+ roles_list.append(
114
+ {
115
+ "id": role_id,
116
+ "name": role.name,
117
+ "prompt": role.prompt,
118
+ "is_default": role_id in DEFAULT_ROLES,
119
+ "filepath": self.roles_dir / f"{role_id}.json",
120
+ }
121
+ )
122
+ return roles_list
123
+
124
+ def print_roles(self) -> None:
125
+ """Print all role information"""
126
+ table = Table("Name", "Description", "Temperature", "Top-P", title="Available Roles")
127
+
128
+ for role in self.list_roles():
129
+ table.add_row(
130
+ role.name,
131
+ role.prompt,
132
+ str(role.temperature),
133
+ str(role.top_p),
134
+ )
135
+
136
+ self.console.print(table)
137
+
138
+ @classmethod
139
+ @option_callback
140
+ def print_list_option(cls, _: Any):
141
+ """Print the list of roles.
142
+ This method is a cli option callback.
143
+ """
144
+ table = Table(show_header=True, show_footer=False)
145
+ table.add_column("Name", style="dim")
146
+ table.add_column("Filepath", style="dim")
147
+ for file in sorted(cls.roles_dir.glob("*.json"), key=lambda f: f.stat().st_mtime):
148
+ table.add_row(file.stem, str(file))
149
+ cls.console.print(table)
150
+ cls.console.print("Use `ai --show-role <name>` to view a role.", style="dim")
151
+
152
+ @classmethod
153
+ @option_callback
154
+ def create_role_option(cls, value: str) -> None:
155
+ """Create role option callback"""
156
+ if not value:
157
+ return
158
+
159
+ role_manager = RoleManager()
160
+
161
+ # Check if role name already exists
162
+ if value in role_manager.roles:
163
+ cls.console.print(f"Role '{value}' already exists.", style="red")
164
+ return
165
+
166
+ # Get role description
167
+ description = typer.prompt("Enter role description")
168
+
169
+ # Create role
170
+ role = role_manager.create_role(value, description)
171
+ cls.console.print(f"Created role: {role.name}", style="green")
172
+
173
+ @classmethod
174
+ @option_callback
175
+ def delete_role_option(cls, value: str) -> None:
176
+ """Delete role option callback"""
177
+ if not value:
178
+ return
179
+
180
+ role_manager = RoleManager()
181
+
182
+ # Check if role exists
183
+ if value not in role_manager.roles:
184
+ cls.console.print(f"Role '{value}' does not exist.", style="yellow")
185
+ return
186
+
187
+ # Delete role
188
+ if role_manager.delete_role(value):
189
+ cls.console.print(f"Deleted role: {value}", style="green")
190
+ else:
191
+ cls.console.print(f"Failed to delete role: {value}", style="red")
192
+
193
+ @classmethod
194
+ @option_callback
195
+ def show_role_option(cls, value: str) -> None:
196
+ """Show role option callback"""
197
+ if not value:
198
+ return
199
+
200
+ role_manager = RoleManager()
201
+
202
+ # Check if role exists
203
+ role = role_manager.get_role(value)
204
+ if not role:
205
+ cls.console.print(f"Role '{value}' does not exist.", style="red")
206
+ return
207
+
208
+ # Show role information
209
+ cls.console.print(f"[bold]Name:[/bold] {role.name}")
210
+ cls.console.print(f"[bold]Description:[/bold] {role.prompt}")
211
+
212
+ @classmethod
213
+ def check_id_ok(cls, value: str) -> str:
214
+ """Check if role ID is valid option callback"""
215
+ if not value:
216
+ # Empty value is valid
217
+ return value
218
+ if value in DEFAULT_ROLES:
219
+ # Built-in role is valid
220
+ return value
221
+
222
+ role_manager = RoleManager()
223
+
224
+ if value not in role_manager.roles:
225
+ cls.console.print(f"Role '{value}' does not exist. Using default role.", style="red")
226
+ return DefaultRoleNames.DEFAULT
227
+
228
+ return value
229
+
230
+
231
+ role_mgr = RoleManager()