yaicli 0.0.19__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyproject.toml +10 -4
- yaicli/__init__.py +0 -0
- yaicli/api.py +324 -0
- yaicli/cli.py +332 -0
- yaicli/config.py +183 -0
- yaicli/const.py +119 -0
- yaicli/entry.py +95 -0
- yaicli/history.py +72 -0
- yaicli/printer.py +244 -0
- yaicli/utils.py +112 -0
- {yaicli-0.0.19.dist-info → yaicli-0.1.0.dist-info}/METADATA +280 -233
- yaicli-0.1.0.dist-info/RECORD +15 -0
- yaicli-0.1.0.dist-info/entry_points.txt +3 -0
- yaicli-0.0.19.dist-info/RECORD +0 -7
- yaicli-0.0.19.dist-info/entry_points.txt +0 -2
- yaicli.py +0 -667
- {yaicli-0.0.19.dist-info → yaicli-0.1.0.dist-info}/WHEEL +0 -0
- {yaicli-0.0.19.dist-info → yaicli-0.1.0.dist-info}/licenses/LICENSE +0 -0
yaicli/entry.py
ADDED
@@ -0,0 +1,95 @@
|
|
1
|
+
import sys
|
2
|
+
from typing import Annotated, Optional
|
3
|
+
|
4
|
+
import typer
|
5
|
+
|
6
|
+
from yaicli.cli import CLI
|
7
|
+
from yaicli.const import DEFAULT_CONFIG_INI
|
8
|
+
|
9
|
+
app = typer.Typer(
|
10
|
+
name="yaicli",
|
11
|
+
help="YAICLI - Yet Another AI CLI Interface.",
|
12
|
+
context_settings={"help_option_names": ["-h", "--help"]},
|
13
|
+
pretty_exceptions_enable=False, # Let the CLI handle errors gracefully
|
14
|
+
add_completion=False, # Disable default shell completion for now
|
15
|
+
)
|
16
|
+
|
17
|
+
|
18
|
+
@app.command()
|
19
|
+
def main(
|
20
|
+
ctx: typer.Context,
|
21
|
+
prompt: Annotated[
|
22
|
+
Optional[str], typer.Argument(help="The prompt to send to the LLM. Reads from stdin if available.")
|
23
|
+
] = None,
|
24
|
+
chat: Annotated[
|
25
|
+
bool, typer.Option("--chat", "-c", help="Start in interactive chat mode.", rich_help_panel="Mode Options")
|
26
|
+
] = False,
|
27
|
+
shell: Annotated[
|
28
|
+
bool,
|
29
|
+
typer.Option(
|
30
|
+
"--shell",
|
31
|
+
"-s",
|
32
|
+
help="Generate and optionally execute a shell command (non-interactive).",
|
33
|
+
rich_help_panel="Mode Options",
|
34
|
+
),
|
35
|
+
] = False,
|
36
|
+
verbose: Annotated[
|
37
|
+
bool,
|
38
|
+
typer.Option(
|
39
|
+
"--verbose", "-V", help="Show verbose output (e.g., loaded config).", rich_help_panel="Other Options"
|
40
|
+
),
|
41
|
+
] = False,
|
42
|
+
template: Annotated[
|
43
|
+
bool,
|
44
|
+
typer.Option(
|
45
|
+
"--template", help="Show the default config file template and exit.", rich_help_panel="Other Options"
|
46
|
+
),
|
47
|
+
] = False,
|
48
|
+
): # Removed trailing comma
|
49
|
+
"""YAICLI: Your AI assistant in the command line.
|
50
|
+
|
51
|
+
Call with a PROMPT to get a direct answer, use --shell to execute as command,
|
52
|
+
or use --chat for an interactive session.
|
53
|
+
"""
|
54
|
+
if template:
|
55
|
+
print(DEFAULT_CONFIG_INI)
|
56
|
+
raise typer.Exit()
|
57
|
+
|
58
|
+
# Combine prompt argument with stdin content if available
|
59
|
+
final_prompt = prompt
|
60
|
+
if not sys.stdin.isatty():
|
61
|
+
stdin_content = sys.stdin.read().strip()
|
62
|
+
if stdin_content:
|
63
|
+
if final_prompt:
|
64
|
+
# Prepend stdin content to the argument prompt
|
65
|
+
final_prompt = f"{stdin_content}\n\n{final_prompt}"
|
66
|
+
else:
|
67
|
+
final_prompt = stdin_content
|
68
|
+
|
69
|
+
# Basic validation for conflicting options or missing prompt
|
70
|
+
if not final_prompt and not chat:
|
71
|
+
# If no prompt and not starting chat, show help
|
72
|
+
typer.echo(ctx.get_help())
|
73
|
+
raise typer.Exit()
|
74
|
+
if chat and final_prompt:
|
75
|
+
# Warn if both chat mode and a prompt are given (prompt will be ignored)
|
76
|
+
# Or, could use the prompt as the first message in chat mode
|
77
|
+
print("Warning: Starting in chat mode. Initial prompt argument will be ignored.")
|
78
|
+
|
79
|
+
try:
|
80
|
+
# Instantiate the main CLI class
|
81
|
+
cli_instance = CLI(verbose=verbose)
|
82
|
+
# Run the appropriate mode
|
83
|
+
cli_instance.run(chat=chat, shell=shell, prompt=final_prompt)
|
84
|
+
except Exception as e:
|
85
|
+
# Catch potential errors during CLI initialization or run
|
86
|
+
print(f"An error occurred: {e}")
|
87
|
+
if verbose:
|
88
|
+
import traceback
|
89
|
+
|
90
|
+
traceback.print_exc()
|
91
|
+
raise typer.Exit(code=1)
|
92
|
+
|
93
|
+
|
94
|
+
if __name__ == "__main__":
|
95
|
+
app()
|
yaicli/history.py
ADDED
@@ -0,0 +1,72 @@
|
|
1
|
+
from os.path import exists
|
2
|
+
|
3
|
+
from prompt_toolkit.history import FileHistory, _StrOrBytesPath
|
4
|
+
|
5
|
+
|
6
|
+
class LimitedFileHistory(FileHistory):
|
7
|
+
"""Limited file history.
|
8
|
+
|
9
|
+
This class extends the FileHistory class from prompt_toolkit.history.
|
10
|
+
It adds a limit to the number of entries in the history file.
|
11
|
+
"""
|
12
|
+
|
13
|
+
def __init__(self, filename: _StrOrBytesPath, max_entries: int = 500, trim_every: int = 2):
|
14
|
+
"""Initialize the LimitedFileHistory object.
|
15
|
+
|
16
|
+
Args:
|
17
|
+
filename: Path to the history file
|
18
|
+
max_entries: Maximum number of entries to keep
|
19
|
+
trim_every: Trim history every `trim_every` appends
|
20
|
+
|
21
|
+
Examples:
|
22
|
+
>>> history = LimitedFileHistory("~/.yaicli_history", max_entries=500, trim_every=10)
|
23
|
+
>>> history.append_string("echo hello")
|
24
|
+
>>> history.append_string("echo world")
|
25
|
+
>>> session = PromptSession(history=history)
|
26
|
+
"""
|
27
|
+
self.max_entries = max_entries
|
28
|
+
self._append_count = 0
|
29
|
+
self._trim_every = trim_every
|
30
|
+
super().__init__(filename)
|
31
|
+
|
32
|
+
def store_string(self, string: str) -> None:
|
33
|
+
"""Store a string in the history file.
|
34
|
+
|
35
|
+
Call the original method to deposit a new record.
|
36
|
+
"""
|
37
|
+
super().store_string(string)
|
38
|
+
|
39
|
+
self._append_count += 1
|
40
|
+
if self._append_count >= self._trim_every:
|
41
|
+
self._trim_history()
|
42
|
+
self._append_count = 0
|
43
|
+
|
44
|
+
def _trim_history(self):
|
45
|
+
"""Trim the history file to the specified maximum number of entries."""
|
46
|
+
if not exists(self.filename):
|
47
|
+
return
|
48
|
+
|
49
|
+
with open(self.filename, "r", encoding="utf-8") as f:
|
50
|
+
lines = f.readlines()
|
51
|
+
|
52
|
+
# By record: each record starts with "# timestamp" followed by a number of "+lines".
|
53
|
+
entries = []
|
54
|
+
current_entry = []
|
55
|
+
|
56
|
+
for line in lines:
|
57
|
+
if line.startswith("# "):
|
58
|
+
if current_entry:
|
59
|
+
entries.append(current_entry)
|
60
|
+
current_entry = [line]
|
61
|
+
elif line.startswith("+") or line.strip() == "":
|
62
|
+
current_entry.append(line)
|
63
|
+
|
64
|
+
if current_entry:
|
65
|
+
entries.append(current_entry)
|
66
|
+
|
67
|
+
# Keep the most recent max_entries row (the next row is newer)
|
68
|
+
trimmed_entries = entries[-self.max_entries :]
|
69
|
+
|
70
|
+
with open(self.filename, "w", encoding="utf-8") as f:
|
71
|
+
for entry in trimmed_entries:
|
72
|
+
f.writelines(entry)
|
yaicli/printer.py
ADDED
@@ -0,0 +1,244 @@
|
|
1
|
+
import itertools
|
2
|
+
import time
|
3
|
+
import traceback
|
4
|
+
from typing import (
|
5
|
+
Any,
|
6
|
+
Dict,
|
7
|
+
Iterator,
|
8
|
+
Optional,
|
9
|
+
Tuple,
|
10
|
+
)
|
11
|
+
|
12
|
+
from rich import get_console
|
13
|
+
from rich.console import Console
|
14
|
+
from rich.live import Live
|
15
|
+
from rich.markdown import Markdown
|
16
|
+
|
17
|
+
from yaicli.const import DEFAULT_CODE_THEME, EventTypeEnum
|
18
|
+
|
19
|
+
|
20
|
+
class Printer:
|
21
|
+
"""Handles printing responses to the console, including stream processing."""
|
22
|
+
|
23
|
+
_REASONING_PREFIX = "> "
|
24
|
+
_CURSOR_ANIMATION_SLEEP = 0.005
|
25
|
+
|
26
|
+
def __init__(self, config: Dict[str, Any], console: Console, verbose: bool = False):
|
27
|
+
self.config = config
|
28
|
+
self.console = console or get_console()
|
29
|
+
self.verbose = verbose
|
30
|
+
self.code_theme = config.get("CODE_THEME", DEFAULT_CODE_THEME)
|
31
|
+
self.in_reasoning: bool = False
|
32
|
+
|
33
|
+
def _reset_state(self) -> None:
|
34
|
+
"""Resets the printer state for a new stream."""
|
35
|
+
self.in_reasoning = False
|
36
|
+
|
37
|
+
def _process_reasoning_chunk(self, chunk: str, content: str, reasoning: str) -> Tuple[str, str]:
|
38
|
+
"""Adds a reasoning chunk to the reasoning text.
|
39
|
+
This method handles the processing of reasoning chunks, and update the reasoning state
|
40
|
+
when <think> tag is closed.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
chunk (str): The reasoning chunk to process.
|
44
|
+
content (str): The current content text.
|
45
|
+
reasoning (str): The current reasoning text.
|
46
|
+
|
47
|
+
Returns:
|
48
|
+
Tuple[str, str]: The updated content text and reasoning text.
|
49
|
+
"""
|
50
|
+
if not self.in_reasoning:
|
51
|
+
self.in_reasoning = True
|
52
|
+
reasoning = ""
|
53
|
+
|
54
|
+
tmp = chunk.replace("\n", f"\n{self._REASONING_PREFIX}")
|
55
|
+
tmp_reasoning = reasoning + tmp
|
56
|
+
|
57
|
+
reasoning += chunk
|
58
|
+
if "</think>" in tmp_reasoning:
|
59
|
+
self.in_reasoning = False
|
60
|
+
reasoning, content = reasoning.split("</think>", maxsplit=1)
|
61
|
+
return content, reasoning
|
62
|
+
|
63
|
+
def _process_content_chunk(self, chunk: str, content: str, reasoning: str) -> Tuple[str, str]:
|
64
|
+
"""Adds a content chunk to the content text.
|
65
|
+
This method handles the processing of content chunks, and update the reasoning state
|
66
|
+
when <think> tag is opened.
|
67
|
+
|
68
|
+
Args:
|
69
|
+
chunk (str): The content chunk to process.
|
70
|
+
content (str): The current content text.
|
71
|
+
reasoning (str): The current reasoning text.
|
72
|
+
|
73
|
+
Returns:
|
74
|
+
Tuple[str, str]: The updated content text and reasoning text.
|
75
|
+
"""
|
76
|
+
if content == "":
|
77
|
+
chunk = chunk.lstrip() # Remove leading whitespace from first chunk
|
78
|
+
|
79
|
+
if self.in_reasoning:
|
80
|
+
self.in_reasoning = False
|
81
|
+
content += chunk
|
82
|
+
|
83
|
+
if content.startswith("<think>"):
|
84
|
+
# Remove <think> tag and leading whitespace
|
85
|
+
self.in_reasoning = True
|
86
|
+
reasoning = content[7:].lstrip()
|
87
|
+
content = "" # Content starts after the initial <think> tag
|
88
|
+
|
89
|
+
return content, reasoning
|
90
|
+
|
91
|
+
def _handle_event(self, event: Dict[str, Any], content: str, reasoning: str) -> Tuple[str, str]:
|
92
|
+
"""Process a single stream event and return the updated content and reasoning.
|
93
|
+
|
94
|
+
Args:
|
95
|
+
event (Dict[str, Any]): The stream event to process.
|
96
|
+
content (str): The current content text (non-reasoning).
|
97
|
+
reasoning (str): The current reasoning text.
|
98
|
+
Returns:
|
99
|
+
Tuple[str, str]: The updated content text and reasoning text.
|
100
|
+
"""
|
101
|
+
event_type = event.get("type")
|
102
|
+
chunk = event.get("chunk")
|
103
|
+
|
104
|
+
if event_type == EventTypeEnum.ERROR and self.verbose:
|
105
|
+
self.console.print(f"Stream error: {event.get('message')}", style="dim")
|
106
|
+
return content, reasoning
|
107
|
+
|
108
|
+
# Handle explicit reasoning end event
|
109
|
+
if event_type == EventTypeEnum.REASONING_END:
|
110
|
+
if self.in_reasoning:
|
111
|
+
self.in_reasoning = False
|
112
|
+
return content, reasoning
|
113
|
+
|
114
|
+
if event_type in (EventTypeEnum.REASONING, EventTypeEnum.CONTENT) and chunk:
|
115
|
+
if event_type == EventTypeEnum.REASONING or self.in_reasoning:
|
116
|
+
return self._process_reasoning_chunk(str(chunk), content, reasoning)
|
117
|
+
return self._process_content_chunk(str(chunk), content, reasoning)
|
118
|
+
|
119
|
+
return content, reasoning
|
120
|
+
|
121
|
+
def _format_display_text(self, content: str, reasoning: str) -> str:
|
122
|
+
"""Format the text for display, combining content and reasoning if needed.
|
123
|
+
|
124
|
+
Args:
|
125
|
+
content (str): The content text.
|
126
|
+
reasoning (str): The reasoning text.
|
127
|
+
|
128
|
+
Returns:
|
129
|
+
str: The formatted text for display.
|
130
|
+
"""
|
131
|
+
display_text = ""
|
132
|
+
|
133
|
+
# Add reasoning with proper formatting if it exists
|
134
|
+
if reasoning:
|
135
|
+
formatted_reasoning = reasoning.replace("\n", f"\n{self._REASONING_PREFIX}")
|
136
|
+
if not formatted_reasoning.startswith(self._REASONING_PREFIX):
|
137
|
+
formatted_reasoning = self._REASONING_PREFIX + formatted_reasoning
|
138
|
+
# Reasoning prefix is now added per line
|
139
|
+
display_text += f"\nThinking:\n{formatted_reasoning}"
|
140
|
+
|
141
|
+
# Only add newlines if there is content following the reasoning
|
142
|
+
if content:
|
143
|
+
display_text += "\n\n"
|
144
|
+
|
145
|
+
# Add content
|
146
|
+
display_text += content
|
147
|
+
|
148
|
+
return display_text
|
149
|
+
|
150
|
+
def _update_live_display(self, live: Live, content: str, reasoning: str, cursor: Iterator[str]) -> None:
|
151
|
+
"""Update live display content and execute cursor animation
|
152
|
+
Sleep for a short duration to control the cursor animation speed.
|
153
|
+
|
154
|
+
Args:
|
155
|
+
live (Live): The live display object.
|
156
|
+
content (str): The current content text.
|
157
|
+
reasoning (str): The current reasoning text.
|
158
|
+
cursor (Iterator[str]): The cursor animation iterator.
|
159
|
+
"""
|
160
|
+
# Format display text without cursor first
|
161
|
+
display_text = self._format_display_text(content, reasoning)
|
162
|
+
cursor_char = next(cursor)
|
163
|
+
|
164
|
+
# Add cursor at the end of reasoning or content depending on current state
|
165
|
+
if self.in_reasoning:
|
166
|
+
# Add cursor at the end of reasoning content
|
167
|
+
if reasoning:
|
168
|
+
# Append cursor directly if reasoning ends without newline, otherwise append after prefix
|
169
|
+
if reasoning.endswith("\n"):
|
170
|
+
display_text += f"\n{self._REASONING_PREFIX}{cursor_char}"
|
171
|
+
else:
|
172
|
+
display_text += cursor_char
|
173
|
+
else:
|
174
|
+
# If reasoning just started and no content yet
|
175
|
+
# Updated to match formatting
|
176
|
+
display_text = f"\nThinking:\n{self._REASONING_PREFIX}{cursor_char}"
|
177
|
+
else:
|
178
|
+
# Add cursor at the end of normal content
|
179
|
+
display_text += cursor_char
|
180
|
+
|
181
|
+
markdown = Markdown(display_text, code_theme=self.code_theme)
|
182
|
+
live.update(markdown)
|
183
|
+
time.sleep(self._CURSOR_ANIMATION_SLEEP)
|
184
|
+
|
185
|
+
def display_stream(self, stream_iterator: Iterator[Dict[str, Any]]) -> Tuple[Optional[str], Optional[str]]:
|
186
|
+
"""Display streaming response content
|
187
|
+
Handle stream events and update the live display accordingly.
|
188
|
+
This method separates content and reasoning blocks for display and further processing.
|
189
|
+
|
190
|
+
Args:
|
191
|
+
stream_iterator (Iterator[Dict[str, Any]]): The stream iterator to process.
|
192
|
+
Returns:
|
193
|
+
Tuple[Optional[str], Optional[str]]: The final content and reasoning texts if successful, None otherwise.
|
194
|
+
"""
|
195
|
+
self.console.print("Assistant:", style="bold green")
|
196
|
+
self._reset_state() # Reset state for the new stream
|
197
|
+
content = ""
|
198
|
+
reasoning = ""
|
199
|
+
# Removed in_reasoning local variable initialization
|
200
|
+
cursor = itertools.cycle(["_", " "])
|
201
|
+
|
202
|
+
with Live(console=self.console) as live:
|
203
|
+
try:
|
204
|
+
for event in stream_iterator:
|
205
|
+
# Pass only content and reasoning, rely on self.in_reasoning
|
206
|
+
content, reasoning = self._handle_event(event, content, reasoning)
|
207
|
+
|
208
|
+
if event.get("type") in (
|
209
|
+
EventTypeEnum.CONTENT,
|
210
|
+
EventTypeEnum.REASONING,
|
211
|
+
EventTypeEnum.REASONING_END,
|
212
|
+
):
|
213
|
+
# Pass only necessary variables, rely on self.in_reasoning
|
214
|
+
self._update_live_display(live, content, reasoning, cursor)
|
215
|
+
|
216
|
+
# Remove cursor and finalize display
|
217
|
+
display_text = self._format_display_text(content, reasoning)
|
218
|
+
live.update(Markdown(display_text, code_theme=self.code_theme))
|
219
|
+
return content, reasoning
|
220
|
+
|
221
|
+
except Exception as e:
|
222
|
+
self.console.print(f"An error occurred during stream display: {e}", style="red")
|
223
|
+
display_text = self._format_display_text(content, reasoning) + " [Display Error]"
|
224
|
+
live.update(Markdown(markup=display_text, code_theme=self.code_theme))
|
225
|
+
if self.verbose:
|
226
|
+
traceback.print_exc()
|
227
|
+
return None, None
|
228
|
+
|
229
|
+
def display_normal(self, content: Optional[str], reasoning: Optional[str] = None) -> None:
|
230
|
+
"""Display a complete, non-streamed response.
|
231
|
+
|
232
|
+
Args:
|
233
|
+
content (Optional[str]): The main content to display.
|
234
|
+
reasoning (Optional[str]): The reasoning content to display.
|
235
|
+
"""
|
236
|
+
self.console.print("Assistant:", style="bold green")
|
237
|
+
|
238
|
+
if content or reasoning:
|
239
|
+
# Use the existing _format_display_text method
|
240
|
+
display_text = self._format_display_text(content or "", reasoning or "")
|
241
|
+
self.console.print(Markdown(display_text, code_theme=self.code_theme))
|
242
|
+
self.console.print() # Add a newline for spacing
|
243
|
+
else:
|
244
|
+
self.console.print("Assistant did not provide any content.", style="yellow")
|
yaicli/utils.py
ADDED
@@ -0,0 +1,112 @@
|
|
1
|
+
import platform
|
2
|
+
from os import getenv
|
3
|
+
from os.path import basename, pathsep
|
4
|
+
from typing import Any, Optional, TypeVar
|
5
|
+
|
6
|
+
from distro import name as distro_name
|
7
|
+
|
8
|
+
from yaicli.const import DEFAULT_OS_NAME, DEFAULT_SHELL_NAME
|
9
|
+
|
10
|
+
T = TypeVar("T", int, float, str)
|
11
|
+
|
12
|
+
|
13
|
+
def detect_os(config: dict[str, Any]) -> str:
|
14
|
+
"""Detect operating system + version based on config or system info."""
|
15
|
+
os_name_config = config.get("OS_NAME", DEFAULT_OS_NAME)
|
16
|
+
if os_name_config != DEFAULT_OS_NAME:
|
17
|
+
return os_name_config
|
18
|
+
|
19
|
+
current_platform = platform.system()
|
20
|
+
if current_platform == "Linux":
|
21
|
+
return "Linux/" + distro_name(pretty=True)
|
22
|
+
if current_platform == "Windows":
|
23
|
+
return "Windows " + platform.release()
|
24
|
+
if current_platform == "Darwin":
|
25
|
+
return "Darwin/MacOS " + platform.mac_ver()[0]
|
26
|
+
return current_platform
|
27
|
+
|
28
|
+
|
29
|
+
def detect_shell(config: dict[str, Any]) -> str:
|
30
|
+
"""Detect shell name based on config or environment."""
|
31
|
+
shell_name_config = config.get("SHELL_NAME", DEFAULT_SHELL_NAME)
|
32
|
+
if shell_name_config != DEFAULT_SHELL_NAME:
|
33
|
+
return shell_name_config
|
34
|
+
|
35
|
+
current_platform = platform.system()
|
36
|
+
if current_platform in ("Windows", "nt"):
|
37
|
+
# Basic check for PowerShell based on environment variables
|
38
|
+
is_powershell = len(getenv("PSModulePath", "").split(pathsep)) >= 3
|
39
|
+
return "powershell.exe" if is_powershell else "cmd.exe"
|
40
|
+
|
41
|
+
# For Linux/MacOS, check SHELL environment variable
|
42
|
+
return basename(getenv("SHELL") or "/bin/sh")
|
43
|
+
|
44
|
+
|
45
|
+
def filter_command(command: str) -> Optional[str]:
|
46
|
+
"""Filter out unwanted characters from command
|
47
|
+
|
48
|
+
The LLM may return commands in markdown format with code blocks.
|
49
|
+
This method removes markdown formatting from the command.
|
50
|
+
It handles various formats including:
|
51
|
+
- Commands surrounded by ``` (plain code blocks)
|
52
|
+
- Commands with language specifiers like ```bash, ```zsh, etc.
|
53
|
+
- Commands with specific examples like ```ls -al```
|
54
|
+
|
55
|
+
example:
|
56
|
+
```bash\nls -la\n``` ==> ls -al
|
57
|
+
```zsh\nls -la\n``` ==> ls -al
|
58
|
+
```ls -la``` ==> ls -la
|
59
|
+
ls -la ==> ls -la
|
60
|
+
```\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
|
61
|
+
```bash\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
|
62
|
+
```plaintext\nls -la\n``` ==> ls -la
|
63
|
+
"""
|
64
|
+
if not command or not command.strip():
|
65
|
+
return ""
|
66
|
+
|
67
|
+
# Handle commands that are already without code blocks
|
68
|
+
if "```" not in command:
|
69
|
+
return command.strip()
|
70
|
+
|
71
|
+
# Handle code blocks with or without language specifiers
|
72
|
+
lines = command.strip().split("\n")
|
73
|
+
|
74
|
+
# Check if it's a single-line code block like ```ls -al```
|
75
|
+
if len(lines) == 1 and lines[0].startswith("```") and lines[0].endswith("```"):
|
76
|
+
return lines[0][3:-3].strip()
|
77
|
+
|
78
|
+
# Handle multi-line code blocks
|
79
|
+
if lines[0].startswith("```"):
|
80
|
+
# Remove the opening ``` line (with or without language specifier)
|
81
|
+
content_lines = lines[1:]
|
82
|
+
|
83
|
+
# If the last line is a closing ```, remove it
|
84
|
+
if content_lines and content_lines[-1].strip() == "```":
|
85
|
+
content_lines = content_lines[:-1]
|
86
|
+
|
87
|
+
# Join the remaining lines and strip any extra whitespace
|
88
|
+
return "\n".join(line.strip() for line in content_lines if line.strip())
|
89
|
+
else:
|
90
|
+
# If the first line doesn't start with ```, return the entire command without the ``` characters
|
91
|
+
return command.strip().replace("```", "")
|
92
|
+
|
93
|
+
|
94
|
+
def str2bool(value: str) -> bool:
|
95
|
+
"""Convert a string representation of truth to true (1) or false (0).
|
96
|
+
True values are 'y', 'yes', 't', 'true', 'on', and '1';
|
97
|
+
false values are 'n', 'no', 'f', 'false', 'off', and '0'.
|
98
|
+
Raises ValueError if 'value' is anything else.
|
99
|
+
"""
|
100
|
+
if value in {False, True}:
|
101
|
+
return bool(value)
|
102
|
+
|
103
|
+
norm = value.strip().lower()
|
104
|
+
|
105
|
+
if norm in {"1", "true", "t", "yes", "y", "on"}:
|
106
|
+
return True
|
107
|
+
|
108
|
+
if norm in {"0", "false", "f", "no", "n", "off"}:
|
109
|
+
return False
|
110
|
+
|
111
|
+
# Handle empty strings and other invalid values
|
112
|
+
raise ValueError(f"Invalid boolean value: {value}")
|