ngpt 3.5.6__py3-none-any.whl → 3.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ngpt/cli/modes/shell.py +519 -72
- ngpt/cli/ui.py +16 -11
- {ngpt-3.5.6.dist-info → ngpt-3.7.0.dist-info}/METADATA +1 -1
- {ngpt-3.5.6.dist-info → ngpt-3.7.0.dist-info}/RECORD +7 -7
- {ngpt-3.5.6.dist-info → ngpt-3.7.0.dist-info}/WHEEL +0 -0
- {ngpt-3.5.6.dist-info → ngpt-3.7.0.dist-info}/entry_points.txt +0 -0
- {ngpt-3.5.6.dist-info → ngpt-3.7.0.dist-info}/licenses/LICENSE +0 -0
ngpt/cli/modes/shell.py
CHANGED
@@ -1,11 +1,15 @@
|
|
1
1
|
from ..formatters import COLORS
|
2
|
-
from ..ui import spinner
|
2
|
+
from ..ui import spinner, copy_to_clipboard
|
3
|
+
from ..renderers import prettify_markdown, has_markdown_renderer, prettify_streaming_markdown, show_available_renderers
|
3
4
|
from ...utils import enhance_prompt_with_web_search
|
4
5
|
import subprocess
|
5
6
|
import sys
|
6
7
|
import threading
|
7
8
|
import platform
|
8
9
|
import os
|
10
|
+
import shutil
|
11
|
+
import re
|
12
|
+
import time
|
9
13
|
|
10
14
|
# System prompt for shell command generation
|
11
15
|
SHELL_SYSTEM_PROMPT = """Your role: Provide only plain text without Markdown formatting. Do not show any warnings or information regarding your capabilities. Do not provide any description. If you need to store any data, assume it will be stored in the chat. Provide only {shell_name} command for {operating_system} without any description. If there is a lack of details, provide most logical solution. Ensure the output is a valid shell command. If multiple steps required try to combine them together.
|
@@ -34,6 +38,345 @@ Your role: Provide only plain text without Markdown formatting. Do not show any
|
|
34
38
|
|
35
39
|
Command:"""
|
36
40
|
|
41
|
+
def detect_shell():
|
42
|
+
"""Detect the current shell type and OS more accurately.
|
43
|
+
|
44
|
+
Returns:
|
45
|
+
tuple: (shell_name, highlight_language, operating_system) - the detected shell name,
|
46
|
+
appropriate syntax highlighting language, and operating system
|
47
|
+
"""
|
48
|
+
os_type = platform.system()
|
49
|
+
|
50
|
+
# Determine OS with detailed information
|
51
|
+
if os_type == "Darwin":
|
52
|
+
operating_system = "MacOS"
|
53
|
+
elif os_type == "Linux":
|
54
|
+
# Try to get Linux distribution name
|
55
|
+
try:
|
56
|
+
result = subprocess.run(["lsb_release", "-si"], capture_output=True, text=True)
|
57
|
+
distro = result.stdout.strip()
|
58
|
+
operating_system = f"Linux/{distro}" if distro else "Linux"
|
59
|
+
except:
|
60
|
+
operating_system = "Linux"
|
61
|
+
elif os_type == "Windows":
|
62
|
+
operating_system = "Windows"
|
63
|
+
else:
|
64
|
+
operating_system = os_type
|
65
|
+
|
66
|
+
# Handle WSL specially - it looks like Linux but runs on Windows
|
67
|
+
is_wsl = False
|
68
|
+
try:
|
69
|
+
with open('/proc/version', 'r') as f:
|
70
|
+
if 'microsoft' in f.read().lower():
|
71
|
+
is_wsl = True
|
72
|
+
operating_system = "Windows/WSL"
|
73
|
+
except:
|
74
|
+
pass
|
75
|
+
|
76
|
+
# Try to detect the shell by examining environment variables
|
77
|
+
try:
|
78
|
+
# Check for specific shell by examining SHELL_NAME or equivalent
|
79
|
+
if os_type == "Windows" and not is_wsl:
|
80
|
+
# Check for Git Bash or MSYS2/Cygwin
|
81
|
+
if "MINGW" in os.environ.get("MSYSTEM", "") or "MSYS" in os.environ.get("MSYSTEM", ""):
|
82
|
+
return "bash", "bash", operating_system
|
83
|
+
|
84
|
+
# Check if we're in Git Bash by examining PATH for /mingw/
|
85
|
+
if any("/mingw/" in path.lower() for path in os.environ.get("PATH", "").split(os.pathsep)):
|
86
|
+
return "bash", "bash", operating_system
|
87
|
+
|
88
|
+
# Check for WSL within Windows
|
89
|
+
if "WSL" in os.environ.get("PATH", "") or "Microsoft" in os.environ.get("PATH", ""):
|
90
|
+
return "bash", "bash", operating_system
|
91
|
+
|
92
|
+
# Check for explicit shell path in environment
|
93
|
+
if os.environ.get("SHELL"):
|
94
|
+
shell_path = os.environ.get("SHELL").lower()
|
95
|
+
if "bash" in shell_path:
|
96
|
+
return "bash", "bash", operating_system
|
97
|
+
elif "zsh" in shell_path:
|
98
|
+
return "zsh", "zsh", operating_system
|
99
|
+
elif "powershell" in shell_path:
|
100
|
+
return "powershell.exe", "powershell", operating_system
|
101
|
+
elif "cmd" in shell_path:
|
102
|
+
return "cmd.exe", "batch", operating_system
|
103
|
+
|
104
|
+
# Check for PowerShell vs CMD
|
105
|
+
if os.environ.get("PSModulePath"):
|
106
|
+
# Further distinguish PowerShell vs PowerShell Core
|
107
|
+
if "pwsh" in os.environ.get("PSModulePath", "").lower():
|
108
|
+
return "pwsh", "powershell", operating_system
|
109
|
+
else:
|
110
|
+
return "powershell.exe", "powershell", operating_system
|
111
|
+
else:
|
112
|
+
return "cmd.exe", "batch", operating_system
|
113
|
+
else:
|
114
|
+
# Unix-like systems - try to get more specific
|
115
|
+
shell_path = os.environ.get("SHELL", "/bin/bash")
|
116
|
+
shell_name = os.path.basename(shell_path)
|
117
|
+
|
118
|
+
# Map shell name to syntax highlight language
|
119
|
+
if shell_name == "zsh":
|
120
|
+
return shell_name, "zsh", operating_system
|
121
|
+
elif shell_name == "fish":
|
122
|
+
return shell_name, "fish", operating_system
|
123
|
+
elif "csh" in shell_name:
|
124
|
+
return shell_name, "csh", operating_system
|
125
|
+
else:
|
126
|
+
# Default to bash for sh, bash, and other shells
|
127
|
+
return shell_name, "bash", operating_system
|
128
|
+
except Exception as e:
|
129
|
+
# Fall back to simple detection if anything fails
|
130
|
+
if os_type == "Windows":
|
131
|
+
return "powershell.exe", "powershell", operating_system
|
132
|
+
else:
|
133
|
+
return "bash", "bash", operating_system
|
134
|
+
|
135
|
+
def setup_streaming(args, logger=None):
|
136
|
+
"""Set up streaming configuration based on command-line arguments.
|
137
|
+
|
138
|
+
Args:
|
139
|
+
args: The parsed command-line arguments
|
140
|
+
logger: Optional logger instance for logging
|
141
|
+
|
142
|
+
Returns:
|
143
|
+
tuple: (should_stream, use_stream_prettify, use_regular_prettify,
|
144
|
+
stream_setup) - Configuration settings and streaming components
|
145
|
+
"""
|
146
|
+
# Default values - initialize all at once
|
147
|
+
stream_callback = live_display = stop_spinner_func = None
|
148
|
+
stop_spinner = spinner_thread = stop_spinner_event = None
|
149
|
+
should_stream = True # Default to streaming
|
150
|
+
use_stream_prettify = use_regular_prettify = False
|
151
|
+
first_content_received = False
|
152
|
+
|
153
|
+
# Determine final behavior based on flag priority
|
154
|
+
if args.stream_prettify:
|
155
|
+
# Highest priority: stream-prettify
|
156
|
+
if has_markdown_renderer('rich'):
|
157
|
+
should_stream = True
|
158
|
+
use_stream_prettify = True
|
159
|
+
live_display, stream_callback, setup_spinner = prettify_streaming_markdown(args.renderer)
|
160
|
+
if not live_display:
|
161
|
+
# Fallback if live display fails
|
162
|
+
use_stream_prettify = False
|
163
|
+
use_regular_prettify = True
|
164
|
+
should_stream = False
|
165
|
+
print(f"{COLORS['yellow']}Live display setup failed. Falling back to regular prettify mode.{COLORS['reset']}")
|
166
|
+
else:
|
167
|
+
# Rich not available for stream-prettify
|
168
|
+
print(f"{COLORS['yellow']}Warning: Rich is not available for --stream-prettify. Install with: pip install \"ngpt[full]\".{COLORS['reset']}")
|
169
|
+
print(f"{COLORS['yellow']}Falling back to default streaming without prettify.{COLORS['reset']}")
|
170
|
+
should_stream = True
|
171
|
+
use_stream_prettify = False
|
172
|
+
elif args.no_stream:
|
173
|
+
# Second priority: no-stream
|
174
|
+
should_stream = False
|
175
|
+
use_regular_prettify = False # No prettify if no streaming
|
176
|
+
elif args.prettify:
|
177
|
+
# Third priority: prettify (requires disabling stream)
|
178
|
+
if has_markdown_renderer(args.renderer):
|
179
|
+
should_stream = False
|
180
|
+
use_regular_prettify = True
|
181
|
+
print(f"{COLORS['yellow']}Note: Using standard markdown rendering (--prettify). For streaming markdown rendering, use --stream-prettify instead.{COLORS['reset']}")
|
182
|
+
else:
|
183
|
+
# Renderer not available for prettify
|
184
|
+
print(f"{COLORS['yellow']}Warning: Renderer '{args.renderer}' not available for --prettify.{COLORS['reset']}")
|
185
|
+
show_available_renderers()
|
186
|
+
print(f"{COLORS['yellow']}Falling back to default streaming without prettify.{COLORS['reset']}")
|
187
|
+
should_stream = True
|
188
|
+
use_regular_prettify = False
|
189
|
+
|
190
|
+
# Create a wrapper for the stream callback that will stop the spinner on first content
|
191
|
+
if stream_callback:
|
192
|
+
original_callback = stream_callback
|
193
|
+
|
194
|
+
def spinner_handling_callback(content, **kwargs):
|
195
|
+
nonlocal first_content_received
|
196
|
+
|
197
|
+
# On first content, stop the spinner
|
198
|
+
if not first_content_received and stop_spinner_func:
|
199
|
+
first_content_received = True
|
200
|
+
# Stop the spinner
|
201
|
+
stop_spinner_func()
|
202
|
+
# Ensure spinner message is cleared with an extra blank line
|
203
|
+
sys.stdout.write("\r" + " " * 100 + "\r")
|
204
|
+
sys.stdout.flush()
|
205
|
+
|
206
|
+
# Call the original callback to update the display
|
207
|
+
if original_callback:
|
208
|
+
original_callback(content, **kwargs)
|
209
|
+
|
210
|
+
# Use our wrapper callback
|
211
|
+
if use_stream_prettify and live_display:
|
212
|
+
stream_callback = spinner_handling_callback
|
213
|
+
|
214
|
+
# Set up the spinner if we have a live display
|
215
|
+
stop_spinner_event = threading.Event()
|
216
|
+
stop_spinner_func = setup_spinner(stop_spinner_event, color=COLORS['cyan'])
|
217
|
+
|
218
|
+
# Create spinner for non-stream-prettify modes EXCEPT no-stream
|
219
|
+
if not use_stream_prettify and not args.no_stream:
|
220
|
+
# Prepare spinner (but don't start it yet - will be started in generate_with_model)
|
221
|
+
stop_spinner = threading.Event()
|
222
|
+
spinner_thread = threading.Thread(
|
223
|
+
target=spinner,
|
224
|
+
args=("Generating...",),
|
225
|
+
kwargs={"stop_event": stop_spinner, "color": COLORS['cyan']}
|
226
|
+
)
|
227
|
+
spinner_thread.daemon = True
|
228
|
+
|
229
|
+
# Create a stream_setup dict to hold all the variables - use a dict comprehension
|
230
|
+
stream_setup = {
|
231
|
+
'stream_callback': stream_callback,
|
232
|
+
'live_display': live_display,
|
233
|
+
'stop_spinner_func': stop_spinner_func,
|
234
|
+
'stop_spinner': stop_spinner,
|
235
|
+
'spinner_thread': spinner_thread,
|
236
|
+
'stop_spinner_event': stop_spinner_event,
|
237
|
+
'first_content_received': first_content_received
|
238
|
+
}
|
239
|
+
|
240
|
+
return (should_stream, use_stream_prettify, use_regular_prettify, stream_setup)
|
241
|
+
|
242
|
+
def generate_with_model(client, prompt, messages, args, stream_setup,
|
243
|
+
use_stream_prettify, should_stream, spinner_message="Generating...",
|
244
|
+
temp_override=None, logger=None):
|
245
|
+
"""Generate content using the model with proper streaming and spinner handling.
|
246
|
+
|
247
|
+
Args:
|
248
|
+
client: The NGPTClient instance
|
249
|
+
prompt: The prompt to send to the model
|
250
|
+
messages: The formatted messages to send
|
251
|
+
args: The parsed command-line arguments
|
252
|
+
stream_setup: The streaming setup from setup_streaming
|
253
|
+
use_stream_prettify: Whether to use stream prettify
|
254
|
+
should_stream: Whether to stream the response
|
255
|
+
spinner_message: Message to show in the spinner
|
256
|
+
temp_override: Optional temperature override
|
257
|
+
logger: Optional logger instance
|
258
|
+
|
259
|
+
Returns:
|
260
|
+
str: The generated content
|
261
|
+
"""
|
262
|
+
# Extract variables from stream_setup - only unpack what we need
|
263
|
+
stream_callback = stream_setup['stream_callback']
|
264
|
+
stop_spinner = stream_setup['stop_spinner']
|
265
|
+
spinner_thread = stream_setup['spinner_thread']
|
266
|
+
stop_spinner_event = stream_setup['stop_spinner_event']
|
267
|
+
stop_spinner_func = stream_setup['stop_spinner_func']
|
268
|
+
|
269
|
+
# Show spinner for all modes except no-stream
|
270
|
+
if not args.no_stream:
|
271
|
+
# Two possible spinner types:
|
272
|
+
# 1. Rich spinner for stream_prettify
|
273
|
+
# 2. Regular spinner for all other modes (including --prettify)
|
274
|
+
|
275
|
+
if use_stream_prettify and stop_spinner_func:
|
276
|
+
# Rich spinner is handled by callbacks
|
277
|
+
pass
|
278
|
+
elif spinner_thread and stop_spinner:
|
279
|
+
# Start the regular spinner thread
|
280
|
+
spinner_thread._args = (spinner_message,)
|
281
|
+
if not spinner_thread.is_alive():
|
282
|
+
spinner_thread.start()
|
283
|
+
else:
|
284
|
+
# No-stream mode just gets a status message
|
285
|
+
print(spinner_message)
|
286
|
+
|
287
|
+
# Set temperature
|
288
|
+
temp = args.temperature if temp_override is None else temp_override
|
289
|
+
|
290
|
+
try:
|
291
|
+
# Make the API call
|
292
|
+
return client.chat(
|
293
|
+
prompt=prompt,
|
294
|
+
stream=should_stream,
|
295
|
+
messages=messages,
|
296
|
+
temperature=temp,
|
297
|
+
top_p=args.top_p,
|
298
|
+
max_tokens=args.max_tokens,
|
299
|
+
stream_callback=stream_callback
|
300
|
+
)
|
301
|
+
except KeyboardInterrupt:
|
302
|
+
print("\nRequest cancelled by user.")
|
303
|
+
return ""
|
304
|
+
except Exception as e:
|
305
|
+
print(f"Error generating content: {e}")
|
306
|
+
return ""
|
307
|
+
finally:
|
308
|
+
# Stop the spinner
|
309
|
+
if use_stream_prettify and stop_spinner_event:
|
310
|
+
# Stop rich spinner
|
311
|
+
if not stream_setup['first_content_received']:
|
312
|
+
stop_spinner_event.set()
|
313
|
+
elif stop_spinner:
|
314
|
+
# Stop regular spinner
|
315
|
+
stop_spinner.set()
|
316
|
+
if spinner_thread and spinner_thread.is_alive():
|
317
|
+
spinner_thread.join()
|
318
|
+
|
319
|
+
# Clear the spinner line completely
|
320
|
+
sys.stdout.write("\r" + " " * 100 + "\r")
|
321
|
+
sys.stdout.flush()
|
322
|
+
|
323
|
+
def display_content(content, content_type, highlight_lang, args, use_stream_prettify, use_regular_prettify):
|
324
|
+
"""Display generated content with appropriate formatting.
|
325
|
+
|
326
|
+
Args:
|
327
|
+
content: The content to display
|
328
|
+
content_type: Type of content ('command' or 'description')
|
329
|
+
highlight_lang: Language for syntax highlighting
|
330
|
+
args: The parsed command-line arguments
|
331
|
+
use_stream_prettify: Whether stream prettify is enabled
|
332
|
+
use_regular_prettify: Whether regular prettify is enabled
|
333
|
+
"""
|
334
|
+
if not content:
|
335
|
+
return
|
336
|
+
|
337
|
+
# Define title based on content type - use a lookup instead of if-else
|
338
|
+
titles = {
|
339
|
+
'command': "Generated Command",
|
340
|
+
'description': "Command Description"
|
341
|
+
}
|
342
|
+
title = titles.get(content_type, "Generated Content")
|
343
|
+
|
344
|
+
# Format content appropriately - create formatted content only when needed
|
345
|
+
if use_regular_prettify and has_markdown_renderer(args.renderer):
|
346
|
+
if content_type == 'command':
|
347
|
+
formatted_content = f"### {title}\n\n```{highlight_lang}\n{content}\n```"
|
348
|
+
else: # description
|
349
|
+
formatted_content = f"### {title}\n\n{content}"
|
350
|
+
|
351
|
+
# Only show formatted content if not already shown by stream-prettify
|
352
|
+
if not use_stream_prettify:
|
353
|
+
if use_regular_prettify and has_markdown_renderer(args.renderer):
|
354
|
+
# Use rich renderer for pretty output
|
355
|
+
prettify_markdown(formatted_content, args.renderer)
|
356
|
+
elif args.no_stream:
|
357
|
+
# Simple output for no-stream mode (no box)
|
358
|
+
if content_type == 'command':
|
359
|
+
print(f"\n{title}:\n{COLORS['green']}{content}{COLORS['reset']}\n")
|
360
|
+
else:
|
361
|
+
print(f"\n{title}:\n{content}\n")
|
362
|
+
else:
|
363
|
+
# Regular display or fallback
|
364
|
+
if content_type == 'command':
|
365
|
+
# Box formatting for commands in regular mode - calculate once
|
366
|
+
term_width = shutil.get_terminal_size().columns
|
367
|
+
box_width = min(term_width - 4, len(content) + 8)
|
368
|
+
horizontal_line = "─" * box_width
|
369
|
+
spacing = box_width - len(title) - 11
|
370
|
+
content_spacing = box_width - len(content) - 2
|
371
|
+
|
372
|
+
print(f"\n┌{horizontal_line}┐")
|
373
|
+
print(f"│ {COLORS['bold']}{title}:{COLORS['reset']} {' ' * spacing}│")
|
374
|
+
print(f"│ {COLORS['green']}{content}{COLORS['reset']}{' ' * content_spacing}│")
|
375
|
+
print(f"└{horizontal_line}┘\n")
|
376
|
+
else:
|
377
|
+
# Simple display for descriptions
|
378
|
+
print(f"\n{content}\n")
|
379
|
+
|
37
380
|
def shell_mode(client, args, logger=None):
|
38
381
|
"""Handle the shell command generation mode.
|
39
382
|
|
@@ -42,6 +385,7 @@ def shell_mode(client, args, logger=None):
|
|
42
385
|
args: The parsed command-line arguments
|
43
386
|
logger: Optional logger instance
|
44
387
|
"""
|
388
|
+
# Get the user prompt more efficiently
|
45
389
|
if args.prompt is None:
|
46
390
|
try:
|
47
391
|
print("Enter shell command description: ", end='')
|
@@ -56,11 +400,12 @@ def shell_mode(client, args, logger=None):
|
|
56
400
|
if logger:
|
57
401
|
logger.log("user", prompt)
|
58
402
|
|
59
|
-
# Enhance prompt with web search if enabled
|
403
|
+
# Enhance prompt with web search if enabled - reuse variables
|
60
404
|
if args.web_search:
|
405
|
+
original_prompt = prompt
|
406
|
+
web_search_succeeded = False
|
407
|
+
|
61
408
|
try:
|
62
|
-
original_prompt = prompt
|
63
|
-
|
64
409
|
# Start spinner for web search
|
65
410
|
stop_spinner = threading.Event()
|
66
411
|
spinner_thread = threading.Thread(
|
@@ -73,50 +418,29 @@ def shell_mode(client, args, logger=None):
|
|
73
418
|
|
74
419
|
try:
|
75
420
|
prompt = enhance_prompt_with_web_search(prompt, logger=logger, disable_citations=True)
|
76
|
-
|
421
|
+
web_search_succeeded = True
|
422
|
+
finally:
|
423
|
+
# Always stop the spinner
|
77
424
|
stop_spinner.set()
|
78
425
|
spinner_thread.join()
|
426
|
+
|
79
427
|
# Clear the spinner line completely
|
80
428
|
sys.stdout.write("\r" + " " * 100 + "\r")
|
81
429
|
sys.stdout.flush()
|
82
|
-
print("Enhanced input with web search results.")
|
83
|
-
except Exception as e:
|
84
|
-
# Stop the spinner before re-raising
|
85
|
-
stop_spinner.set()
|
86
|
-
spinner_thread.join()
|
87
|
-
raise e
|
88
430
|
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
431
|
+
if web_search_succeeded:
|
432
|
+
print("Enhanced input with web search results.")
|
433
|
+
|
434
|
+
# Log the enhanced prompt if logging is enabled
|
435
|
+
if logger:
|
436
|
+
# Use "web_search" role instead of "system" for clearer logs
|
437
|
+
logger.log("web_search", prompt.replace(original_prompt, "").strip())
|
93
438
|
except Exception as e:
|
94
439
|
print(f"{COLORS['yellow']}Warning: Failed to enhance prompt with web search: {str(e)}{COLORS['reset']}")
|
95
440
|
# Continue with the original prompt if web search fails
|
96
441
|
|
97
|
-
#
|
98
|
-
|
99
|
-
if os_type == "Darwin":
|
100
|
-
operating_system = "MacOS"
|
101
|
-
elif os_type == "Linux":
|
102
|
-
# Try to get Linux distribution name
|
103
|
-
try:
|
104
|
-
result = subprocess.run(["lsb_release", "-si"], capture_output=True, text=True)
|
105
|
-
distro = result.stdout.strip()
|
106
|
-
operating_system = f"Linux/{distro}" if distro else "Linux"
|
107
|
-
except:
|
108
|
-
operating_system = "Linux"
|
109
|
-
elif os_type == "Windows":
|
110
|
-
operating_system = "Windows"
|
111
|
-
else:
|
112
|
-
operating_system = os_type
|
113
|
-
|
114
|
-
# Determine shell type
|
115
|
-
if os_type == "Windows":
|
116
|
-
shell_name = "powershell.exe" if os.environ.get("PSModulePath") else "cmd.exe"
|
117
|
-
else:
|
118
|
-
shell_name = os.environ.get("SHELL", "/bin/bash")
|
119
|
-
shell_name = os.path.basename(shell_name)
|
442
|
+
# Detect shell type, highlight language, and operating system
|
443
|
+
shell_name, highlight_lang, operating_system = detect_shell()
|
120
444
|
|
121
445
|
# Format the system prompt based on whether preprompt is provided
|
122
446
|
if args.preprompt:
|
@@ -148,36 +472,21 @@ def shell_mode(client, args, logger=None):
|
|
148
472
|
if logger:
|
149
473
|
logger.log("system", system_prompt)
|
150
474
|
|
151
|
-
#
|
152
|
-
|
153
|
-
spinner_thread = threading.Thread(
|
154
|
-
target=spinner,
|
155
|
-
args=("Generating command...",),
|
156
|
-
kwargs={"stop_event": stop_spinner, "color": COLORS['cyan']}
|
157
|
-
)
|
158
|
-
spinner_thread.daemon = True
|
159
|
-
spinner_thread.start()
|
475
|
+
# Set up streaming once and reuse for both command and description
|
476
|
+
should_stream, use_stream_prettify, use_regular_prettify, stream_setup = setup_streaming(args)
|
160
477
|
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
finally:
|
174
|
-
# Stop the spinner
|
175
|
-
stop_spinner.set()
|
176
|
-
spinner_thread.join()
|
177
|
-
|
178
|
-
# Clear the spinner line completely
|
179
|
-
sys.stdout.write("\r" + " " * 100 + "\r")
|
180
|
-
sys.stdout.flush()
|
478
|
+
# Generate the command
|
479
|
+
command = generate_with_model(
|
480
|
+
client=client,
|
481
|
+
prompt=prompt,
|
482
|
+
messages=messages,
|
483
|
+
args=args,
|
484
|
+
stream_setup=stream_setup,
|
485
|
+
use_stream_prettify=use_stream_prettify,
|
486
|
+
should_stream=should_stream,
|
487
|
+
spinner_message="Generating command...",
|
488
|
+
logger=logger
|
489
|
+
)
|
181
490
|
|
182
491
|
if not command:
|
183
492
|
return # Error already printed by client
|
@@ -185,17 +494,57 @@ def shell_mode(client, args, logger=None):
|
|
185
494
|
# Log the generated command if logging is enabled
|
186
495
|
if logger:
|
187
496
|
logger.log("assistant", command)
|
188
|
-
|
189
|
-
|
497
|
+
|
498
|
+
# Get the most up-to-date shell type at command generation time
|
499
|
+
_, highlight_lang, _ = detect_shell()
|
500
|
+
|
501
|
+
# Format with proper syntax highlighting for streaming prettify - only if needed
|
502
|
+
if use_stream_prettify and stream_setup['stream_callback'] and command:
|
503
|
+
# Create properly formatted markdown for streaming display
|
504
|
+
formatted_command = f"```{highlight_lang}\n{command}\n```"
|
505
|
+
# Update the live display with the formatted command
|
506
|
+
stream_setup['stream_callback'](formatted_command, complete=True)
|
507
|
+
|
508
|
+
# Display the command
|
509
|
+
display_content(
|
510
|
+
content=command,
|
511
|
+
content_type='command',
|
512
|
+
highlight_lang=highlight_lang,
|
513
|
+
args=args,
|
514
|
+
use_stream_prettify=use_stream_prettify,
|
515
|
+
use_regular_prettify=use_regular_prettify
|
516
|
+
)
|
517
|
+
|
518
|
+
# Display options with better formatting - prepare strings once
|
519
|
+
options_text = f"{COLORS['bold']}Options:{COLORS['reset']}"
|
520
|
+
options = [
|
521
|
+
f" {COLORS['cyan']}C{COLORS['reset']} - Copy - Copy the command to clipboard",
|
522
|
+
f" {COLORS['cyan']}E{COLORS['reset']} - Execute - Run the command in your shell",
|
523
|
+
f" {COLORS['cyan']}D{COLORS['reset']} - Describe - Explain what this command does",
|
524
|
+
f" {COLORS['cyan']}A{COLORS['reset']} - Abort - Cancel and return to prompt"
|
525
|
+
]
|
526
|
+
prompt_text = f"\nWhat would you like to do? [{COLORS['cyan']}C{COLORS['reset']}/{COLORS['cyan']}E{COLORS['reset']}/{COLORS['cyan']}D{COLORS['reset']}/{COLORS['cyan']}A{COLORS['reset']}] "
|
527
|
+
|
528
|
+
# Print options with proper flushing to ensure display
|
529
|
+
print(options_text, flush=True)
|
530
|
+
for option in options:
|
531
|
+
print(option, flush=True)
|
532
|
+
|
533
|
+
# Add a small delay to ensure terminal rendering is complete,
|
534
|
+
# especially important for stream-prettify mode
|
535
|
+
if use_stream_prettify:
|
536
|
+
time.sleep(0.2)
|
537
|
+
|
538
|
+
# Print prompt and flush to ensure it appears
|
539
|
+
print(prompt_text, end='', flush=True)
|
190
540
|
|
191
541
|
try:
|
192
|
-
print("Do you want to execute this command? [y/N] ", end='')
|
193
542
|
response = input().lower()
|
194
543
|
except KeyboardInterrupt:
|
195
544
|
print("\nCommand execution cancelled by user.")
|
196
545
|
return
|
197
546
|
|
198
|
-
if response == '
|
547
|
+
if response == 'e':
|
199
548
|
# Log the execution if logging is enabled
|
200
549
|
if logger:
|
201
550
|
logger.log("system", f"Executing command: {command}")
|
@@ -224,4 +573,102 @@ def shell_mode(client, args, logger=None):
|
|
224
573
|
if logger:
|
225
574
|
logger.log("system", f"Command error: {error}")
|
226
575
|
|
227
|
-
print(f"\nError:\n{error}")
|
576
|
+
print(f"\nError:\n{error}")
|
577
|
+
elif response == 'c':
|
578
|
+
# Copy command to clipboard without confirmation prompt
|
579
|
+
copied = copy_to_clipboard(command, skip_confirmation=True)
|
580
|
+
if not copied:
|
581
|
+
print(f"{COLORS['yellow']}Failed to copy to clipboard. Command: {COLORS['green']}{command}{COLORS['reset']}")
|
582
|
+
|
583
|
+
# Log the copy if logging is enabled
|
584
|
+
if logger:
|
585
|
+
logger.log("system", "Command copied to clipboard")
|
586
|
+
elif response == 'd':
|
587
|
+
# Ask LLM to describe what the command does
|
588
|
+
describe_prompt = f"Please explain this command: {command}"
|
589
|
+
|
590
|
+
# Create system prompt for description that includes OS and shell info
|
591
|
+
describe_system_prompt = f"You are a helpful assistant explaining shell commands. The user is running {shell_name} on {operating_system}. Explain what the following shell command does in detail, considering this specific environment. Include any potential risks, side effects, or compatibility issues with this OS/shell combination."
|
592
|
+
|
593
|
+
# Prepare messages for the chat API
|
594
|
+
describe_messages = [
|
595
|
+
{"role": "system", "content": describe_system_prompt},
|
596
|
+
{"role": "user", "content": describe_prompt}
|
597
|
+
]
|
598
|
+
|
599
|
+
# Log the system prompt if logging is enabled
|
600
|
+
if logger:
|
601
|
+
logger.log("system", f"Command description requested for {operating_system}/{shell_name}")
|
602
|
+
|
603
|
+
# Set up fresh streaming for description - reuse existing setup when possible
|
604
|
+
# We only need to refresh the streaming setup if we're using stream_prettify
|
605
|
+
if use_stream_prettify:
|
606
|
+
_, use_stream_prettify_desc, use_regular_prettify_desc, stream_setup_desc = setup_streaming(args)
|
607
|
+
else:
|
608
|
+
# Reuse the existing setup for non-prettify streaming
|
609
|
+
use_stream_prettify_desc = use_stream_prettify
|
610
|
+
use_regular_prettify_desc = use_regular_prettify
|
611
|
+
|
612
|
+
# Always create a fresh spinner for description
|
613
|
+
stop_spinner = threading.Event()
|
614
|
+
spinner_thread = threading.Thread(
|
615
|
+
target=spinner,
|
616
|
+
args=("Generating command description...",),
|
617
|
+
kwargs={"stop_event": stop_spinner, "color": COLORS['cyan']}
|
618
|
+
)
|
619
|
+
spinner_thread.daemon = True
|
620
|
+
|
621
|
+
# Create a new stream setup with the fresh spinner
|
622
|
+
stream_setup_desc = {
|
623
|
+
'stream_callback': stream_setup.get('stream_callback'),
|
624
|
+
'live_display': stream_setup.get('live_display'),
|
625
|
+
'stop_spinner_func': stream_setup.get('stop_spinner_func'),
|
626
|
+
'stop_spinner': stop_spinner,
|
627
|
+
'spinner_thread': spinner_thread,
|
628
|
+
'stop_spinner_event': stream_setup.get('stop_spinner_event'),
|
629
|
+
'first_content_received': False
|
630
|
+
}
|
631
|
+
|
632
|
+
# Generate the description
|
633
|
+
description = generate_with_model(
|
634
|
+
client=client,
|
635
|
+
prompt=describe_prompt,
|
636
|
+
messages=describe_messages,
|
637
|
+
args=args,
|
638
|
+
stream_setup=stream_setup_desc,
|
639
|
+
use_stream_prettify=use_stream_prettify_desc,
|
640
|
+
should_stream=should_stream,
|
641
|
+
spinner_message="Generating command description...",
|
642
|
+
temp_override=0.3,
|
643
|
+
logger=logger
|
644
|
+
)
|
645
|
+
|
646
|
+
if not description:
|
647
|
+
return # Error already printed
|
648
|
+
|
649
|
+
# Log the generated description if logging is enabled
|
650
|
+
if logger:
|
651
|
+
logger.log("assistant", description)
|
652
|
+
|
653
|
+
# Format with proper markdown for streaming prettify - only if needed
|
654
|
+
if use_stream_prettify_desc and stream_setup_desc['stream_callback'] and description:
|
655
|
+
# Format description as markdown for prettier display
|
656
|
+
md_description = f"### Command Description\n\n{description}"
|
657
|
+
# Update the live display with the formatted description
|
658
|
+
stream_setup_desc['stream_callback'](md_description, complete=True)
|
659
|
+
|
660
|
+
# Display the description
|
661
|
+
display_content(
|
662
|
+
content=description,
|
663
|
+
content_type='description',
|
664
|
+
highlight_lang=highlight_lang,
|
665
|
+
args=args,
|
666
|
+
use_stream_prettify=use_stream_prettify_desc,
|
667
|
+
use_regular_prettify=use_regular_prettify_desc
|
668
|
+
)
|
669
|
+
elif response == 'a' or response == '':
|
670
|
+
print("\nCommand aborted.")
|
671
|
+
|
672
|
+
# Log the abort if logging is enabled
|
673
|
+
if logger:
|
674
|
+
logger.log("system", "Command aborted by user")
|
ngpt/cli/ui.py
CHANGED
@@ -174,12 +174,13 @@ def get_terminal_input():
|
|
174
174
|
except (IOError, OSError):
|
175
175
|
return None
|
176
176
|
|
177
|
-
def copy_to_clipboard(content, prompt_message=None):
|
177
|
+
def copy_to_clipboard(content, prompt_message=None, skip_confirmation=False):
|
178
178
|
"""Copy content to clipboard with user confirmation.
|
179
179
|
|
180
180
|
Args:
|
181
181
|
content: The text content to copy to clipboard
|
182
182
|
prompt_message: Optional custom message for the prompt (default: "Copy to clipboard? (y/n)")
|
183
|
+
skip_confirmation: When True, skips the confirmation prompt and copies directly
|
183
184
|
|
184
185
|
Returns:
|
185
186
|
bool: True if copied to clipboard successfully, False otherwise
|
@@ -189,17 +190,21 @@ def copy_to_clipboard(content, prompt_message=None):
|
|
189
190
|
return False
|
190
191
|
|
191
192
|
try:
|
192
|
-
#
|
193
|
-
if
|
194
|
-
|
193
|
+
# Skip confirmation if requested
|
194
|
+
if skip_confirmation:
|
195
|
+
answer = 'y'
|
196
|
+
else:
|
197
|
+
# Default prompt message
|
198
|
+
if prompt_message is None:
|
199
|
+
prompt_message = "Copy to clipboard? (y/n)"
|
200
|
+
|
201
|
+
# Make the prompt more visible with colors and formatting
|
202
|
+
clipboard_prompt = f"{COLORS['cyan']}{COLORS['bold']}{prompt_message}{COLORS['reset']} "
|
203
|
+
print(clipboard_prompt, end="")
|
204
|
+
sys.stdout.flush()
|
195
205
|
|
196
|
-
|
197
|
-
|
198
|
-
print(clipboard_prompt, end="")
|
199
|
-
sys.stdout.flush()
|
200
|
-
|
201
|
-
# Cross-platform terminal input
|
202
|
-
answer = get_terminal_input()
|
206
|
+
# Cross-platform terminal input
|
207
|
+
answer = get_terminal_input()
|
203
208
|
|
204
209
|
if answer == 'y':
|
205
210
|
try:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ngpt
|
3
|
-
Version: 3.
|
3
|
+
Version: 3.7.0
|
4
4
|
Summary: Swiss army knife for LLMs: powerful CLI and interactive chatbot in one package. Seamlessly work with OpenAI, Ollama, Groq, Claude, Gemini, or any OpenAI-compatible API to generate code, craft git commits, rewrite text, and execute shell commands.
|
5
5
|
Project-URL: Homepage, https://github.com/nazdridoy/ngpt
|
6
6
|
Project-URL: Repository, https://github.com/nazdridoy/ngpt
|
@@ -7,22 +7,22 @@ ngpt/cli/config_manager.py,sha256=NQQcWnjUppAAd0s0p9YAf8EyKS1ex5-0EB4DvKdB4dk,36
|
|
7
7
|
ngpt/cli/formatters.py,sha256=HBYGlx_7eoAKyzfy0Vq5L0yn8yVKjngqYBukMmXCcz0,9401
|
8
8
|
ngpt/cli/main.py,sha256=oKX7ryTIrsvQRJHVnH2a763pGyNZthq81wkrRILwHLw,28932
|
9
9
|
ngpt/cli/renderers.py,sha256=m71BeUXKynpKKGXFzwRSW1XngvyKiZ_xEsdujUbU0MA,16597
|
10
|
-
ngpt/cli/ui.py,sha256=
|
10
|
+
ngpt/cli/ui.py,sha256=tVJGTP1DWjCRq7ONFdOOKPHcVQz0MqiLyJtodKFabTk,9612
|
11
11
|
ngpt/cli/modes/__init__.py,sha256=KP7VR6Xw9k1p5Jcu0F38RDxSFvFIzH3j1ThDLNwznUI,363
|
12
12
|
ngpt/cli/modes/chat.py,sha256=jfKkrtSkx1gKPsKXDMxZ7BiJiMsCtFHyZCGIdmNQ0fk,7816
|
13
13
|
ngpt/cli/modes/code.py,sha256=3avR9OM-D3r4HHfVm2bTfCOlsYQoqgtvU49zGzYfUqw,12513
|
14
14
|
ngpt/cli/modes/gitcommsg.py,sha256=Alm1OLxXkuteiDSnDxjmnPvlSggGG2sTlUBAqJaYaN4,46739
|
15
15
|
ngpt/cli/modes/interactive.py,sha256=TtBrZUX45CVfKOPvkb1ya7dIQhXLILtn7ajmfM9ohso,17419
|
16
16
|
ngpt/cli/modes/rewrite.py,sha256=EKCPZwvu0MTDpD-nj_oty8vjVQpaF4ucwmTG99LJT6M,10736
|
17
|
-
ngpt/cli/modes/shell.py,sha256=
|
17
|
+
ngpt/cli/modes/shell.py,sha256=VT5Lc2pDOs7C7RY53mxg0soaxIfFYmdN7HNC_cEKJZ4,29335
|
18
18
|
ngpt/cli/modes/text.py,sha256=7t5WWXMFxGkBM5HMP4irbN9aQwxE2YgywjiVPep710k,6417
|
19
19
|
ngpt/utils/__init__.py,sha256=qu_66I1Vtav2f1LDiPn5J3DUsbK7o1CSScMcTkYqxoM,1179
|
20
20
|
ngpt/utils/cli_config.py,sha256=Ug8cECBTIuzOwkBWidLTfs-OAdOsCMJ2bNa70pOADfw,11195
|
21
21
|
ngpt/utils/config.py,sha256=wsArA4osnh8fKqOvtsPqqBxAz3DpdjtaWUFaRtnUdyc,10452
|
22
22
|
ngpt/utils/log.py,sha256=f1jg2iFo35PAmsarH8FVL_62plq4VXH0Mu2QiP6RJGw,15934
|
23
23
|
ngpt/utils/web_search.py,sha256=w5ke4KJMRxq7r5jtbUXvspja6XhjoPZloVkZ0IvBXIE,30731
|
24
|
-
ngpt-3.
|
25
|
-
ngpt-3.
|
26
|
-
ngpt-3.
|
27
|
-
ngpt-3.
|
28
|
-
ngpt-3.
|
24
|
+
ngpt-3.7.0.dist-info/METADATA,sha256=nnaAlOrIabLAvTq6nbbJ9F_-Y8JEzcWO4NCGlMYWYTE,23912
|
25
|
+
ngpt-3.7.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
26
|
+
ngpt-3.7.0.dist-info/entry_points.txt,sha256=SqAAvLhMrsEpkIr4YFRdUeyuXQ9o0IBCeYgE6AVojoI,44
|
27
|
+
ngpt-3.7.0.dist-info/licenses/LICENSE,sha256=mQkpWoADxbHqE0HRefYLJdm7OpdrXBr3vNv5bZ8w72M,1065
|
28
|
+
ngpt-3.7.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|