ngpt 2.9.2__py3-none-any.whl → 2.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ngpt/cli/args.py +161 -0
- ngpt/cli/interactive.py +18 -33
- ngpt/cli/main.py +89 -130
- ngpt/cli/modes/chat.py +15 -1
- ngpt/cli/modes/code.py +11 -1
- ngpt/cli/modes/shell.py +33 -3
- ngpt/cli/modes/text.py +15 -1
- ngpt/cli_config.py +1 -1
- ngpt/log.py +180 -0
- {ngpt-2.9.2.dist-info → ngpt-2.11.0.dist-info}/METADATA +14 -9
- ngpt-2.11.0.dist-info/RECORD +25 -0
- ngpt-2.9.2.dist-info/RECORD +0 -23
- {ngpt-2.9.2.dist-info → ngpt-2.11.0.dist-info}/WHEEL +0 -0
- {ngpt-2.9.2.dist-info → ngpt-2.11.0.dist-info}/entry_points.txt +0 -0
- {ngpt-2.9.2.dist-info → ngpt-2.11.0.dist-info}/licenses/LICENSE +0 -0
ngpt/cli/args.py
ADDED
@@ -0,0 +1,161 @@
|
|
1
|
+
import argparse
|
2
|
+
import sys
|
3
|
+
from .. import __version__
|
4
|
+
from .formatters import COLORS, ColoredHelpFormatter
|
5
|
+
from .renderers import has_markdown_renderer, warn_if_no_markdown_renderer
|
6
|
+
|
7
|
+
def setup_argument_parser():
|
8
|
+
"""Set up and return a fully configured argument parser for nGPT CLI."""
|
9
|
+
# Colorize description - use a shorter description to avoid line wrapping issues
|
10
|
+
description = f"{COLORS['cyan']}{COLORS['bold']}nGPT{COLORS['reset']} - Interact with AI language models via OpenAI-compatible APIs"
|
11
|
+
|
12
|
+
# Minimalist, clean epilog design
|
13
|
+
epilog = f"\n{COLORS['yellow']}nGPT {COLORS['bold']}v{__version__}{COLORS['reset']} • {COLORS['green']}Docs: {COLORS['bold']}https://nazdridoy.github.io/ngpt/usage/cli_usage.html{COLORS['reset']}"
|
14
|
+
|
15
|
+
parser = argparse.ArgumentParser(description=description, formatter_class=ColoredHelpFormatter, epilog=epilog)
|
16
|
+
|
17
|
+
# Add custom error method with color
|
18
|
+
original_error = parser.error
|
19
|
+
def error_with_color(message):
|
20
|
+
parser.print_usage(sys.stderr)
|
21
|
+
parser.exit(2, f"{COLORS['bold']}{COLORS['yellow']}error: {COLORS['reset']}{message}\n")
|
22
|
+
parser.error = error_with_color
|
23
|
+
|
24
|
+
# Custom version action with color
|
25
|
+
class ColoredVersionAction(argparse.Action):
|
26
|
+
def __call__(self, parser, namespace, values, option_string=None):
|
27
|
+
print(f"{COLORS['green']}{COLORS['bold']}nGPT{COLORS['reset']} version {COLORS['yellow']}{__version__}{COLORS['reset']}")
|
28
|
+
parser.exit()
|
29
|
+
|
30
|
+
# Version flag
|
31
|
+
parser.add_argument('-v', '--version', action=ColoredVersionAction, nargs=0, help='Show version information and exit')
|
32
|
+
|
33
|
+
# Config options
|
34
|
+
config_group = parser.add_argument_group('Configuration Options')
|
35
|
+
config_group.add_argument('--config', nargs='?', const=True, help='Path to a custom config file or, if no value provided, enter interactive configuration mode to create a new config')
|
36
|
+
config_group.add_argument('--config-index', type=int, default=0, help='Index of the configuration to use or edit (default: 0)')
|
37
|
+
config_group.add_argument('--provider', help='Provider name to identify the configuration to use')
|
38
|
+
config_group.add_argument('--remove', action='store_true', help='Remove the configuration at the specified index (requires --config and --config-index)')
|
39
|
+
config_group.add_argument('--show-config', action='store_true', help='Show the current configuration(s) and exit')
|
40
|
+
config_group.add_argument('--all', action='store_true', help='Show details for all configurations (requires --show-config)')
|
41
|
+
config_group.add_argument('--list-models', action='store_true', help='List all available models for the current configuration and exit')
|
42
|
+
config_group.add_argument('--list-renderers', action='store_true', help='Show available markdown renderers for use with --prettify')
|
43
|
+
|
44
|
+
# Global options
|
45
|
+
global_group = parser.add_argument_group('Global Options')
|
46
|
+
global_group.add_argument('--api-key', help='API key for the service')
|
47
|
+
global_group.add_argument('--base-url', help='Base URL for the API')
|
48
|
+
global_group.add_argument('--model', help='Model to use')
|
49
|
+
global_group.add_argument('--web-search', action='store_true',
|
50
|
+
help='Enable web search capability (Note: Your API endpoint must support this feature)')
|
51
|
+
global_group.add_argument('-n', '--no-stream', action='store_true',
|
52
|
+
help='Return the whole response without streaming')
|
53
|
+
global_group.add_argument('--temperature', type=float, default=0.7,
|
54
|
+
help='Set temperature (controls randomness, default: 0.7)')
|
55
|
+
global_group.add_argument('--top_p', type=float, default=1.0,
|
56
|
+
help='Set top_p (controls diversity, default: 1.0)')
|
57
|
+
global_group.add_argument('--max_tokens', type=int,
|
58
|
+
help='Set max response length in tokens')
|
59
|
+
global_group.add_argument('--log', metavar='FILE', nargs='?', const=True,
|
60
|
+
help='Set filepath to log conversation to, or create a temporary log file if no path provided')
|
61
|
+
global_group.add_argument('--preprompt',
|
62
|
+
help='Set custom system prompt to control AI behavior')
|
63
|
+
global_group.add_argument('--prettify', action='store_const', const='auto',
|
64
|
+
help='Render markdown responses and code with syntax highlighting and formatting')
|
65
|
+
global_group.add_argument('--stream-prettify', action='store_true',
|
66
|
+
help='Enable streaming with markdown rendering (automatically uses Rich renderer)')
|
67
|
+
global_group.add_argument('--renderer', choices=['auto', 'rich', 'glow'], default='auto',
|
68
|
+
help='Select which markdown renderer to use with --prettify (auto, rich, or glow)')
|
69
|
+
|
70
|
+
# Mode flags (mutually exclusive)
|
71
|
+
mode_group = parser.add_argument_group('Modes (mutually exclusive)')
|
72
|
+
mode_exclusive_group = mode_group.add_mutually_exclusive_group()
|
73
|
+
mode_exclusive_group.add_argument('-i', '--interactive', action='store_true', help='Start an interactive chat session')
|
74
|
+
mode_exclusive_group.add_argument('-s', '--shell', action='store_true', help='Generate and execute shell commands')
|
75
|
+
mode_exclusive_group.add_argument('-c', '--code', action='store_true', help='Generate code')
|
76
|
+
mode_exclusive_group.add_argument('-t', '--text', action='store_true', help='Enter multi-line text input (submit with Ctrl+D)')
|
77
|
+
# Note: --show-config is handled separately and implicitly acts as a mode
|
78
|
+
|
79
|
+
# Language option for code mode
|
80
|
+
parser.add_argument('--language', default="python", help='Programming language to generate code in (for code mode)')
|
81
|
+
|
82
|
+
# Prompt argument
|
83
|
+
parser.add_argument('prompt', nargs='?', default=None, help='The prompt to send')
|
84
|
+
|
85
|
+
# Add CLI configuration command
|
86
|
+
config_group.add_argument('--cli-config', nargs='*', metavar='COMMAND',
|
87
|
+
help='Manage CLI configuration (set, get, unset, list)')
|
88
|
+
|
89
|
+
return parser
|
90
|
+
|
91
|
+
def parse_args():
|
92
|
+
"""Parse command line arguments using the configured parser."""
|
93
|
+
parser = setup_argument_parser()
|
94
|
+
return parser.parse_args()
|
95
|
+
|
96
|
+
def validate_args(args):
|
97
|
+
"""Validate parsed arguments for correctness and compatibility."""
|
98
|
+
# Validate --all usage
|
99
|
+
if args.all and not args.show_config:
|
100
|
+
raise ValueError("--all can only be used with --show-config")
|
101
|
+
|
102
|
+
# Check if --prettify is used with --stream-prettify (conflict)
|
103
|
+
if args.prettify and args.stream_prettify:
|
104
|
+
raise ValueError("--prettify and --stream-prettify cannot be used together. Choose one option.")
|
105
|
+
|
106
|
+
# Check if --stream-prettify is used but Rich is not available
|
107
|
+
if args.stream_prettify and not has_markdown_renderer('rich'):
|
108
|
+
raise ValueError("--stream-prettify requires Rich to be installed. Install with: pip install \"ngpt[full]\" or pip install rich")
|
109
|
+
|
110
|
+
return args
|
111
|
+
|
112
|
+
def validate_markdown_renderer(args):
|
113
|
+
"""Validate that required markdown renderers are available.
|
114
|
+
|
115
|
+
Args:
|
116
|
+
args: The parsed command line arguments.
|
117
|
+
|
118
|
+
Returns:
|
119
|
+
tuple: (has_renderer, args)
|
120
|
+
- has_renderer: Boolean indicating if a renderer is available
|
121
|
+
- args: Potentially modified args with prettify disabled if no renderer is available
|
122
|
+
"""
|
123
|
+
has_renderer = True
|
124
|
+
if args.prettify:
|
125
|
+
has_renderer = warn_if_no_markdown_renderer(args.renderer)
|
126
|
+
if not has_renderer:
|
127
|
+
# Set a flag to disable prettify since we already warned the user
|
128
|
+
print(f"{COLORS['yellow']}Continuing without markdown rendering.{COLORS['reset']}")
|
129
|
+
args.prettify = False
|
130
|
+
|
131
|
+
return has_renderer, args
|
132
|
+
|
133
|
+
def handle_cli_config_args(args):
|
134
|
+
"""Process CLI configuration arguments and determine command parameters.
|
135
|
+
|
136
|
+
Args:
|
137
|
+
args: The parsed command line arguments.
|
138
|
+
|
139
|
+
Returns:
|
140
|
+
tuple: (should_handle, action, option, value)
|
141
|
+
- should_handle: True if --cli-config was specified and should be handled
|
142
|
+
- action: The action to perform (set, get, unset, list, help)
|
143
|
+
- option: The option name (or None)
|
144
|
+
- value: The option value (or None)
|
145
|
+
"""
|
146
|
+
if args.cli_config is None:
|
147
|
+
return (False, None, None, None)
|
148
|
+
|
149
|
+
# Show help if no arguments or "help" argument
|
150
|
+
if len(args.cli_config) == 0 or (len(args.cli_config) > 0 and args.cli_config[0].lower() == "help"):
|
151
|
+
return (True, "help", None, None)
|
152
|
+
|
153
|
+
action = args.cli_config[0].lower()
|
154
|
+
option = args.cli_config[1] if len(args.cli_config) > 1 else None
|
155
|
+
value = args.cli_config[2] if len(args.cli_config) > 2 else None
|
156
|
+
|
157
|
+
if action in ("set", "get", "unset", "list", "help"):
|
158
|
+
return (True, action, option, value)
|
159
|
+
else:
|
160
|
+
# Unknown action, show help
|
161
|
+
return (True, "help", None, None)
|
ngpt/cli/interactive.py
CHANGED
@@ -1,9 +1,11 @@
|
|
1
1
|
import sys
|
2
|
+
import os
|
2
3
|
import shutil
|
3
4
|
import datetime
|
4
5
|
import traceback
|
5
6
|
from .formatters import COLORS
|
6
7
|
from .renderers import prettify_markdown, prettify_streaming_markdown
|
8
|
+
from ..log import create_logger
|
7
9
|
|
8
10
|
# Optional imports for enhanced UI
|
9
11
|
try:
|
@@ -16,7 +18,7 @@ try:
|
|
16
18
|
except ImportError:
|
17
19
|
HAS_PROMPT_TOOLKIT = False
|
18
20
|
|
19
|
-
def interactive_chat_session(client, web_search=False, no_stream=False, temperature=0.7, top_p=1.0, max_tokens=None,
|
21
|
+
def interactive_chat_session(client, web_search=False, no_stream=False, temperature=0.7, top_p=1.0, max_tokens=None, preprompt=None, prettify=False, renderer='auto', stream_prettify=False, logger=None):
|
20
22
|
"""Start an interactive chat session with the AI.
|
21
23
|
|
22
24
|
Args:
|
@@ -26,11 +28,11 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
|
|
26
28
|
temperature: Controls randomness in the response
|
27
29
|
top_p: Controls diversity via nucleus sampling
|
28
30
|
max_tokens: Maximum number of tokens to generate in each response
|
29
|
-
log_file: Optional filepath to log conversation to
|
30
31
|
preprompt: Custom system prompt to control AI behavior
|
31
32
|
prettify: Whether to enable markdown rendering
|
32
33
|
renderer: Which markdown renderer to use
|
33
34
|
stream_prettify: Whether to enable streaming with prettify
|
35
|
+
logger: Logger instance for logging the conversation
|
34
36
|
"""
|
35
37
|
# Get terminal width for better formatting
|
36
38
|
try:
|
@@ -58,18 +60,9 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
|
|
58
60
|
|
59
61
|
print(f"\n{separator}\n")
|
60
62
|
|
61
|
-
#
|
62
|
-
|
63
|
-
|
64
|
-
try:
|
65
|
-
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
66
|
-
log_handle = open(log_file, 'a', encoding='utf-8')
|
67
|
-
log_handle.write(f"\n--- nGPT Session Log: {sys.argv} ---\n")
|
68
|
-
log_handle.write(f"Started at: {timestamp}\n\n")
|
69
|
-
print(f"{COLORS['green']}Logging conversation to: {log_file}{COLORS['reset']}")
|
70
|
-
except Exception as e:
|
71
|
-
print(f"{COLORS['yellow']}Warning: Could not open log file: {str(e)}{COLORS['reset']}")
|
72
|
-
log_handle = None
|
63
|
+
# Show logging info if logger is available
|
64
|
+
if logger:
|
65
|
+
print(f"{COLORS['green']}Logging conversation to: {logger.get_log_path()}{COLORS['reset']}")
|
73
66
|
|
74
67
|
# Custom separator - use the same length for consistency
|
75
68
|
def print_separator():
|
@@ -90,9 +83,8 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
|
|
90
83
|
conversation.append(system_message)
|
91
84
|
|
92
85
|
# Log system prompt if logging is enabled
|
93
|
-
if
|
94
|
-
|
95
|
-
log_handle.flush()
|
86
|
+
if logger and preprompt:
|
87
|
+
logger.log("system", system_prompt)
|
96
88
|
|
97
89
|
# Initialize prompt_toolkit history
|
98
90
|
prompt_history = InMemoryHistory() if HAS_PROMPT_TOOLKIT else None
|
@@ -187,9 +179,8 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
|
|
187
179
|
conversation.append(user_message)
|
188
180
|
|
189
181
|
# Log user message if logging is enabled
|
190
|
-
if
|
191
|
-
|
192
|
-
log_handle.flush()
|
182
|
+
if logger:
|
183
|
+
logger.log("user", user_input)
|
193
184
|
|
194
185
|
# Print assistant indicator with formatting
|
195
186
|
if not no_stream and not stream_prettify:
|
@@ -254,22 +245,16 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
|
|
254
245
|
else:
|
255
246
|
print(response)
|
256
247
|
|
257
|
-
# Log
|
258
|
-
if
|
259
|
-
|
260
|
-
log_handle.flush()
|
248
|
+
# Log AI response if logging is enabled
|
249
|
+
if logger:
|
250
|
+
logger.log("assistant", response)
|
261
251
|
|
262
252
|
# Print separator between exchanges
|
263
253
|
print_separator()
|
264
254
|
|
265
255
|
except KeyboardInterrupt:
|
266
|
-
print(f"\n\n{COLORS['
|
256
|
+
print(f"\n\n{COLORS['yellow']}Chat session interrupted by user.{COLORS['reset']}")
|
267
257
|
except Exception as e:
|
268
|
-
print(f"\n{COLORS['yellow']}Error
|
269
|
-
|
270
|
-
|
271
|
-
finally:
|
272
|
-
# Close log file if it was opened
|
273
|
-
if log_handle:
|
274
|
-
log_handle.write(f"\n--- End of Session ---\n")
|
275
|
-
log_handle.close()
|
258
|
+
print(f"\n{COLORS['yellow']}Error in chat session: {str(e)}{COLORS['reset']}")
|
259
|
+
if os.environ.get("NGPT_DEBUG"):
|
260
|
+
traceback.print_exc()
|
ngpt/cli/main.py
CHANGED
@@ -12,6 +12,7 @@ from ..cli_config import (
|
|
12
12
|
CLI_CONFIG_OPTIONS,
|
13
13
|
load_cli_config
|
14
14
|
)
|
15
|
+
from ..log import create_logger
|
15
16
|
from .. import __version__
|
16
17
|
|
17
18
|
from .formatters import COLORS, ColoredHelpFormatter
|
@@ -22,6 +23,7 @@ from .modes.chat import chat_mode
|
|
22
23
|
from .modes.code import code_mode
|
23
24
|
from .modes.shell import shell_mode
|
24
25
|
from .modes.text import text_mode
|
26
|
+
from .args import parse_args, validate_args, handle_cli_config_args, setup_argument_parser, validate_markdown_renderer
|
25
27
|
|
26
28
|
def show_cli_config_help():
|
27
29
|
"""Display help information about CLI configuration."""
|
@@ -171,109 +173,20 @@ def handle_cli_config(action, option=None, value=None):
|
|
171
173
|
show_cli_config_help()
|
172
174
|
|
173
175
|
def main():
|
174
|
-
#
|
175
|
-
|
176
|
+
# Parse command line arguments using args.py
|
177
|
+
args = parse_args()
|
176
178
|
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
# Add custom error method with color
|
183
|
-
original_error = parser.error
|
184
|
-
def error_with_color(message):
|
185
|
-
parser.print_usage(sys.stderr)
|
186
|
-
parser.exit(2, f"{COLORS['bold']}{COLORS['yellow']}error: {COLORS['reset']}{message}\n")
|
187
|
-
parser.error = error_with_color
|
188
|
-
|
189
|
-
# Custom version action with color
|
190
|
-
class ColoredVersionAction(argparse.Action):
|
191
|
-
def __call__(self, parser, namespace, values, option_string=None):
|
192
|
-
print(f"{COLORS['green']}{COLORS['bold']}nGPT{COLORS['reset']} version {COLORS['yellow']}{__version__}{COLORS['reset']}")
|
193
|
-
parser.exit()
|
194
|
-
|
195
|
-
# Version flag
|
196
|
-
parser.add_argument('-v', '--version', action=ColoredVersionAction, nargs=0, help='Show version information and exit')
|
197
|
-
|
198
|
-
# Config options
|
199
|
-
config_group = parser.add_argument_group('Configuration Options')
|
200
|
-
config_group.add_argument('--config', nargs='?', const=True, help='Path to a custom config file or, if no value provided, enter interactive configuration mode to create a new config')
|
201
|
-
config_group.add_argument('--config-index', type=int, default=0, help='Index of the configuration to use or edit (default: 0)')
|
202
|
-
config_group.add_argument('--provider', help='Provider name to identify the configuration to use')
|
203
|
-
config_group.add_argument('--remove', action='store_true', help='Remove the configuration at the specified index (requires --config and --config-index)')
|
204
|
-
config_group.add_argument('--show-config', action='store_true', help='Show the current configuration(s) and exit')
|
205
|
-
config_group.add_argument('--all', action='store_true', help='Show details for all configurations (requires --show-config)')
|
206
|
-
config_group.add_argument('--list-models', action='store_true', help='List all available models for the current configuration and exit')
|
207
|
-
config_group.add_argument('--list-renderers', action='store_true', help='Show available markdown renderers for use with --prettify')
|
208
|
-
|
209
|
-
# Global options
|
210
|
-
global_group = parser.add_argument_group('Global Options')
|
211
|
-
global_group.add_argument('--api-key', help='API key for the service')
|
212
|
-
global_group.add_argument('--base-url', help='Base URL for the API')
|
213
|
-
global_group.add_argument('--model', help='Model to use')
|
214
|
-
global_group.add_argument('--web-search', action='store_true',
|
215
|
-
help='Enable web search capability (Note: Your API endpoint must support this feature)')
|
216
|
-
global_group.add_argument('-n', '--no-stream', action='store_true',
|
217
|
-
help='Return the whole response without streaming')
|
218
|
-
global_group.add_argument('--temperature', type=float, default=0.7,
|
219
|
-
help='Set temperature (controls randomness, default: 0.7)')
|
220
|
-
global_group.add_argument('--top_p', type=float, default=1.0,
|
221
|
-
help='Set top_p (controls diversity, default: 1.0)')
|
222
|
-
global_group.add_argument('--max_tokens', type=int,
|
223
|
-
help='Set max response length in tokens')
|
224
|
-
global_group.add_argument('--log', metavar='FILE',
|
225
|
-
help='Set filepath to log conversation to (For interactive modes)')
|
226
|
-
global_group.add_argument('--preprompt',
|
227
|
-
help='Set custom system prompt to control AI behavior')
|
228
|
-
global_group.add_argument('--prettify', action='store_const', const='auto',
|
229
|
-
help='Render markdown responses and code with syntax highlighting and formatting')
|
230
|
-
global_group.add_argument('--stream-prettify', action='store_true',
|
231
|
-
help='Enable streaming with markdown rendering (automatically uses Rich renderer)')
|
232
|
-
global_group.add_argument('--renderer', choices=['auto', 'rich', 'glow'], default='auto',
|
233
|
-
help='Select which markdown renderer to use with --prettify (auto, rich, or glow)')
|
234
|
-
|
235
|
-
# Mode flags (mutually exclusive)
|
236
|
-
mode_group = parser.add_argument_group('Modes (mutually exclusive)')
|
237
|
-
mode_exclusive_group = mode_group.add_mutually_exclusive_group()
|
238
|
-
mode_exclusive_group.add_argument('-i', '--interactive', action='store_true', help='Start an interactive chat session')
|
239
|
-
mode_exclusive_group.add_argument('-s', '--shell', action='store_true', help='Generate and execute shell commands')
|
240
|
-
mode_exclusive_group.add_argument('-c', '--code', action='store_true', help='Generate code')
|
241
|
-
mode_exclusive_group.add_argument('-t', '--text', action='store_true', help='Enter multi-line text input (submit with Ctrl+D)')
|
242
|
-
# Note: --show-config is handled separately and implicitly acts as a mode
|
243
|
-
|
244
|
-
# Language option for code mode
|
245
|
-
parser.add_argument('--language', default="python", help='Programming language to generate code in (for code mode)')
|
246
|
-
|
247
|
-
# Prompt argument
|
248
|
-
parser.add_argument('prompt', nargs='?', default=None, help='The prompt to send')
|
249
|
-
|
250
|
-
# Add CLI configuration command
|
251
|
-
config_group.add_argument('--cli-config', nargs='*', metavar='COMMAND',
|
252
|
-
help='Manage CLI configuration (set, get, unset, list)')
|
253
|
-
|
254
|
-
args = parser.parse_args()
|
179
|
+
try:
|
180
|
+
args = validate_args(args)
|
181
|
+
except ValueError as e:
|
182
|
+
print(f"{COLORS['bold']}{COLORS['yellow']}error: {COLORS['reset']}{str(e)}\n")
|
183
|
+
sys.exit(2)
|
255
184
|
|
256
185
|
# Handle CLI configuration command
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
return
|
262
|
-
|
263
|
-
action = args.cli_config[0].lower()
|
264
|
-
option = args.cli_config[1] if len(args.cli_config) > 1 else None
|
265
|
-
value = args.cli_config[2] if len(args.cli_config) > 2 else None
|
266
|
-
|
267
|
-
if action in ("set", "get", "unset", "list", "help"):
|
268
|
-
handle_cli_config(action, option, value)
|
269
|
-
return
|
270
|
-
else:
|
271
|
-
show_cli_config_help()
|
272
|
-
return
|
273
|
-
|
274
|
-
# Validate --all usage
|
275
|
-
if args.all and not args.show_config:
|
276
|
-
parser.error("--all can only be used with --show-config")
|
186
|
+
should_handle_cli_config, action, option, value = handle_cli_config_args(args)
|
187
|
+
if should_handle_cli_config:
|
188
|
+
handle_cli_config(action, option, value)
|
189
|
+
return
|
277
190
|
|
278
191
|
# Handle --renderers flag to show available markdown renderers
|
279
192
|
if args.list_renderers:
|
@@ -283,6 +196,46 @@ def main():
|
|
283
196
|
# Load CLI configuration early
|
284
197
|
cli_config = load_cli_config()
|
285
198
|
|
199
|
+
# Initialize logger if --log is specified
|
200
|
+
logger = None
|
201
|
+
if args.log is not None:
|
202
|
+
# Check if the log value is a string that looks like a prompt (incorrectly parsed)
|
203
|
+
likely_prompt = False
|
204
|
+
likely_path = False
|
205
|
+
|
206
|
+
if isinstance(args.log, str) and args.prompt is None:
|
207
|
+
# Check if string looks like a path
|
208
|
+
if args.log.startswith('/') or args.log.startswith('./') or args.log.startswith('../') or args.log.startswith('~'):
|
209
|
+
likely_path = True
|
210
|
+
# Check if string has a file extension
|
211
|
+
elif '.' in os.path.basename(args.log):
|
212
|
+
likely_path = True
|
213
|
+
# Check if parent directory exists
|
214
|
+
elif os.path.exists(os.path.dirname(args.log)) and os.path.dirname(args.log) != '':
|
215
|
+
likely_path = True
|
216
|
+
# Check if string ends with a question mark (very likely a prompt)
|
217
|
+
elif args.log.strip().endswith('?'):
|
218
|
+
likely_prompt = True
|
219
|
+
# As a last resort, if it has spaces and doesn't look like a path, assume it's a prompt
|
220
|
+
elif ' ' in args.log and not likely_path:
|
221
|
+
likely_prompt = True
|
222
|
+
|
223
|
+
if likely_prompt and not likely_path:
|
224
|
+
# This is likely a prompt, not a log path
|
225
|
+
args.prompt = args.log
|
226
|
+
# Change log to True to create a temp file
|
227
|
+
args.log = True
|
228
|
+
|
229
|
+
# If --log is True, it means it was used without a path value
|
230
|
+
log_path = None if args.log is True else args.log
|
231
|
+
logger = create_logger(log_path)
|
232
|
+
if logger:
|
233
|
+
logger.open()
|
234
|
+
print(f"{COLORS['green']}Logging session to: {logger.get_log_path()}{COLORS['reset']}")
|
235
|
+
# If it's a temporary log file, inform the user
|
236
|
+
if logger.is_temporary():
|
237
|
+
print(f"{COLORS['green']}Created temporary log file.{COLORS['reset']}")
|
238
|
+
|
286
239
|
# Priority order for config selection:
|
287
240
|
# 1. Command-line arguments (args.provider, args.config_index)
|
288
241
|
# 2. CLI configuration (cli_config["provider"], cli_config["config-index"])
|
@@ -293,15 +246,23 @@ def main():
|
|
293
246
|
effective_config_index = args.config_index
|
294
247
|
|
295
248
|
# Only apply CLI config for provider/config-index if not explicitly set on command line
|
296
|
-
|
249
|
+
# If --config-index is explicitly provided, we should ignore provider from CLI config
|
250
|
+
config_index_from_cli = '--config-index' in sys.argv
|
251
|
+
provider_from_cli = '--provider' in sys.argv
|
252
|
+
|
253
|
+
if not provider_from_cli and 'provider' in cli_config and not config_index_from_cli:
|
297
254
|
effective_provider = cli_config['provider']
|
298
255
|
|
299
|
-
if
|
256
|
+
if not config_index_from_cli and 'config-index' in cli_config and not provider_from_cli:
|
300
257
|
effective_config_index = cli_config['config-index']
|
301
258
|
|
302
259
|
# Check for mutual exclusivity between provider and config-index
|
303
260
|
if effective_config_index != 0 and effective_provider:
|
304
|
-
|
261
|
+
from_cli_config = not provider_from_cli and 'provider' in cli_config
|
262
|
+
provider_source = "CLI config file (ngpt-cli.conf)" if from_cli_config else "command-line arguments"
|
263
|
+
error_msg = f"--config-index and --provider cannot be used together. Provider from {provider_source}."
|
264
|
+
print(f"{COLORS['bold']}{COLORS['yellow']}error: {COLORS['reset']}{error_msg}\n")
|
265
|
+
sys.exit(2)
|
305
266
|
|
306
267
|
# Handle interactive configuration mode
|
307
268
|
if args.config is True: # --config was used without a value
|
@@ -311,7 +272,8 @@ def main():
|
|
311
272
|
if args.remove:
|
312
273
|
# Validate that config_index is explicitly provided
|
313
274
|
if '--config-index' not in sys.argv and not effective_provider:
|
314
|
-
|
275
|
+
print(f"{COLORS['bold']}{COLORS['yellow']}error: {COLORS['reset']}--remove requires explicitly specifying --config-index or --provider\n")
|
276
|
+
sys.exit(2)
|
315
277
|
|
316
278
|
# Show config details before asking for confirmation
|
317
279
|
configs = load_configs(str(config_path))
|
@@ -489,6 +451,8 @@ def main():
|
|
489
451
|
|
490
452
|
# For interactive mode, we'll allow continuing without a specific prompt
|
491
453
|
if not args.prompt and not (args.shell or args.code or args.text or args.interactive or args.show_config or args.list_models):
|
454
|
+
# Simply use the parser's help
|
455
|
+
parser = setup_argument_parser()
|
492
456
|
parser.print_help()
|
493
457
|
return
|
494
458
|
|
@@ -498,25 +462,17 @@ def main():
|
|
498
462
|
|
499
463
|
# Check if --prettify is used but no markdown renderer is available
|
500
464
|
# This will warn the user immediately if they request prettify but don't have the tools
|
501
|
-
has_renderer =
|
502
|
-
if
|
503
|
-
|
504
|
-
|
505
|
-
# Set a flag to disable prettify since we already warned the user
|
506
|
-
print(f"{COLORS['yellow']}Continuing without markdown rendering.{COLORS['reset']}")
|
507
|
-
show_available_renderers()
|
508
|
-
args.prettify = False
|
509
|
-
|
510
|
-
# Check if --prettify is used with --stream-prettify (conflict)
|
511
|
-
if args.prettify and args.stream_prettify:
|
512
|
-
parser.error("--prettify and --stream-prettify cannot be used together. Choose one option.")
|
513
|
-
|
514
|
-
# Check if --stream-prettify is used but Rich is not available
|
515
|
-
if args.stream_prettify and not has_markdown_renderer('rich'):
|
516
|
-
parser.error("--stream-prettify requires Rich to be installed. Install with: pip install \"ngpt[full]\" or pip install rich")
|
517
|
-
|
465
|
+
has_renderer, args = validate_markdown_renderer(args)
|
466
|
+
if not has_renderer:
|
467
|
+
show_available_renderers()
|
468
|
+
|
518
469
|
# Initialize client using the potentially overridden active_config
|
519
|
-
client = NGPTClient(
|
470
|
+
client = NGPTClient(
|
471
|
+
api_key=active_config.get("api_key", args.api_key),
|
472
|
+
base_url=active_config.get("base_url", args.base_url),
|
473
|
+
provider=active_config.get("provider"),
|
474
|
+
model=active_config.get("model", args.model)
|
475
|
+
)
|
520
476
|
|
521
477
|
try:
|
522
478
|
# Handle listing models
|
@@ -549,32 +505,32 @@ def main():
|
|
549
505
|
temperature=args.temperature,
|
550
506
|
top_p=args.top_p,
|
551
507
|
max_tokens=args.max_tokens,
|
552
|
-
log_file=args.log,
|
553
508
|
preprompt=args.preprompt,
|
554
509
|
prettify=args.prettify,
|
555
510
|
renderer=args.renderer,
|
556
|
-
stream_prettify=args.stream_prettify
|
511
|
+
stream_prettify=args.stream_prettify,
|
512
|
+
logger=logger
|
557
513
|
)
|
558
514
|
elif args.shell:
|
559
515
|
# Apply CLI config for shell mode
|
560
516
|
args = apply_cli_config(args, "shell")
|
561
517
|
|
562
518
|
# Shell command generation mode
|
563
|
-
shell_mode(client, args)
|
519
|
+
shell_mode(client, args, logger=logger)
|
564
520
|
|
565
521
|
elif args.code:
|
566
522
|
# Apply CLI config for code mode
|
567
523
|
args = apply_cli_config(args, "code")
|
568
524
|
|
569
525
|
# Code generation mode
|
570
|
-
code_mode(client, args)
|
526
|
+
code_mode(client, args, logger=logger)
|
571
527
|
|
572
528
|
elif args.text:
|
573
529
|
# Apply CLI config for text mode
|
574
530
|
args = apply_cli_config(args, "text")
|
575
531
|
|
576
532
|
# Text mode (multiline input)
|
577
|
-
text_mode(client, args)
|
533
|
+
text_mode(client, args, logger=logger)
|
578
534
|
|
579
535
|
else:
|
580
536
|
# Default to chat mode
|
@@ -582,12 +538,15 @@ def main():
|
|
582
538
|
args = apply_cli_config(args, "all")
|
583
539
|
|
584
540
|
# Standard chat mode
|
585
|
-
chat_mode(client, args)
|
586
|
-
|
541
|
+
chat_mode(client, args, logger=logger)
|
587
542
|
except KeyboardInterrupt:
|
588
543
|
print("\nOperation cancelled by user. Exiting gracefully.")
|
589
544
|
# Make sure we exit with a non-zero status code to indicate the operation was cancelled
|
590
545
|
sys.exit(130) # 130 is the standard exit code for SIGINT (Ctrl+C)
|
591
546
|
except Exception as e:
|
592
547
|
print(f"Error: {e}")
|
593
|
-
sys.exit(1) # Exit with error code
|
548
|
+
sys.exit(1) # Exit with error code
|
549
|
+
finally:
|
550
|
+
# Close the logger if it exists
|
551
|
+
if logger:
|
552
|
+
logger.close()
|
ngpt/cli/modes/chat.py
CHANGED
@@ -1,13 +1,15 @@
|
|
1
1
|
from ..formatters import COLORS
|
2
2
|
from ..renderers import prettify_markdown, prettify_streaming_markdown
|
3
|
+
from ...log import create_logger
|
3
4
|
import sys
|
4
5
|
|
5
|
-
def chat_mode(client, args):
|
6
|
+
def chat_mode(client, args, logger=None):
|
6
7
|
"""Handle the standard chat mode with a single prompt.
|
7
8
|
|
8
9
|
Args:
|
9
10
|
client: The NGPTClient instance
|
10
11
|
args: The parsed command-line arguments
|
12
|
+
logger: Optional logger instance
|
11
13
|
"""
|
12
14
|
# Get the prompt
|
13
15
|
if args.prompt is None:
|
@@ -19,10 +21,18 @@ def chat_mode(client, args):
|
|
19
21
|
sys.exit(130)
|
20
22
|
else:
|
21
23
|
prompt = args.prompt
|
24
|
+
|
25
|
+
# Log the user message if logging is enabled
|
26
|
+
if logger:
|
27
|
+
logger.log("user", prompt)
|
22
28
|
|
23
29
|
# Create messages array with preprompt if available
|
24
30
|
messages = None
|
25
31
|
if args.preprompt:
|
32
|
+
# Log the system message if logging is enabled
|
33
|
+
if logger:
|
34
|
+
logger.log("system", args.preprompt)
|
35
|
+
|
26
36
|
messages = [
|
27
37
|
{"role": "system", "content": args.preprompt},
|
28
38
|
{"role": "user", "content": prompt}
|
@@ -63,6 +73,10 @@ def chat_mode(client, args):
|
|
63
73
|
# Stop live display if using stream-prettify
|
64
74
|
if args.stream_prettify and live_display:
|
65
75
|
live_display.stop()
|
76
|
+
|
77
|
+
# Log the AI response if logging is enabled
|
78
|
+
if logger and response:
|
79
|
+
logger.log("assistant", response)
|
66
80
|
|
67
81
|
# Handle non-stream response or regular prettify
|
68
82
|
if (args.no_stream or args.prettify) and response:
|
ngpt/cli/modes/code.py
CHANGED
@@ -1,13 +1,15 @@
|
|
1
1
|
from ..formatters import COLORS
|
2
2
|
from ..renderers import prettify_markdown, prettify_streaming_markdown, has_markdown_renderer, show_available_renderers
|
3
|
+
from ...log import create_logger
|
3
4
|
import sys
|
4
5
|
|
5
|
-
def code_mode(client, args):
|
6
|
+
def code_mode(client, args, logger=None):
|
6
7
|
"""Handle the code generation mode.
|
7
8
|
|
8
9
|
Args:
|
9
10
|
client: The NGPTClient instance
|
10
11
|
args: The parsed command-line arguments
|
12
|
+
logger: Optional logger instance
|
11
13
|
"""
|
12
14
|
if args.prompt is None:
|
13
15
|
try:
|
@@ -18,6 +20,10 @@ def code_mode(client, args):
|
|
18
20
|
sys.exit(130)
|
19
21
|
else:
|
20
22
|
prompt = args.prompt
|
23
|
+
|
24
|
+
# Log the user prompt if logging is enabled
|
25
|
+
if logger:
|
26
|
+
logger.log("user", prompt)
|
21
27
|
|
22
28
|
# Setup for streaming and prettify logic
|
23
29
|
stream_callback = None
|
@@ -86,6 +92,10 @@ def code_mode(client, args):
|
|
86
92
|
# Stop live display if using stream-prettify
|
87
93
|
if use_stream_prettify and live_display:
|
88
94
|
live_display.stop()
|
95
|
+
|
96
|
+
# Log the generated code if logging is enabled
|
97
|
+
if logger and generated_code:
|
98
|
+
logger.log("assistant", generated_code)
|
89
99
|
|
90
100
|
# Print non-streamed output if needed
|
91
101
|
if generated_code and not should_stream:
|
ngpt/cli/modes/shell.py
CHANGED
@@ -1,13 +1,15 @@
|
|
1
1
|
from ..formatters import COLORS
|
2
|
+
from ...log import create_logger
|
2
3
|
import subprocess
|
3
4
|
import sys
|
4
5
|
|
5
|
-
def shell_mode(client, args):
|
6
|
+
def shell_mode(client, args, logger=None):
|
6
7
|
"""Handle the shell command generation mode.
|
7
8
|
|
8
9
|
Args:
|
9
10
|
client: The NGPTClient instance
|
10
11
|
args: The parsed command-line arguments
|
12
|
+
logger: Optional logger instance
|
11
13
|
"""
|
12
14
|
if args.prompt is None:
|
13
15
|
try:
|
@@ -18,12 +20,20 @@ def shell_mode(client, args):
|
|
18
20
|
sys.exit(130)
|
19
21
|
else:
|
20
22
|
prompt = args.prompt
|
23
|
+
|
24
|
+
# Log the user prompt if logging is enabled
|
25
|
+
if logger:
|
26
|
+
logger.log("user", prompt)
|
21
27
|
|
22
28
|
command = client.generate_shell_command(prompt, web_search=args.web_search,
|
23
29
|
temperature=args.temperature, top_p=args.top_p,
|
24
30
|
max_tokens=args.max_tokens)
|
25
31
|
if not command:
|
26
32
|
return # Error already printed by client
|
33
|
+
|
34
|
+
# Log the generated command if logging is enabled
|
35
|
+
if logger:
|
36
|
+
logger.log("assistant", command)
|
27
37
|
|
28
38
|
print(f"\nGenerated command: {command}")
|
29
39
|
|
@@ -35,12 +45,32 @@ def shell_mode(client, args):
|
|
35
45
|
return
|
36
46
|
|
37
47
|
if response == 'y' or response == 'yes':
|
48
|
+
# Log the execution if logging is enabled
|
49
|
+
if logger:
|
50
|
+
logger.log("system", f"Executing command: {command}")
|
51
|
+
|
38
52
|
try:
|
39
53
|
try:
|
40
54
|
print("\nExecuting command... (Press Ctrl+C to cancel)")
|
41
55
|
result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
|
42
|
-
|
56
|
+
output = result.stdout
|
57
|
+
|
58
|
+
# Log the command output if logging is enabled
|
59
|
+
if logger:
|
60
|
+
logger.log("system", f"Command output: {output}")
|
61
|
+
|
62
|
+
print(f"\nOutput:\n{output}")
|
43
63
|
except KeyboardInterrupt:
|
44
64
|
print("\nCommand execution cancelled by user.")
|
65
|
+
|
66
|
+
# Log the cancellation if logging is enabled
|
67
|
+
if logger:
|
68
|
+
logger.log("system", "Command execution cancelled by user")
|
45
69
|
except subprocess.CalledProcessError as e:
|
46
|
-
|
70
|
+
error = e.stderr
|
71
|
+
|
72
|
+
# Log the error if logging is enabled
|
73
|
+
if logger:
|
74
|
+
logger.log("system", f"Command error: {error}")
|
75
|
+
|
76
|
+
print(f"\nError:\n{error}")
|
ngpt/cli/modes/text.py
CHANGED
@@ -1,13 +1,15 @@
|
|
1
1
|
from ..formatters import COLORS
|
2
2
|
from ..renderers import prettify_markdown, prettify_streaming_markdown
|
3
3
|
from ..ui import get_multiline_input
|
4
|
+
from ...log import create_logger
|
4
5
|
|
5
|
-
def text_mode(client, args):
|
6
|
+
def text_mode(client, args, logger=None):
|
6
7
|
"""Handle the multi-line text input mode.
|
7
8
|
|
8
9
|
Args:
|
9
10
|
client: The NGPTClient instance
|
10
11
|
args: The parsed command-line arguments
|
12
|
+
logger: Optional logger instance
|
11
13
|
"""
|
12
14
|
if args.prompt is not None:
|
13
15
|
prompt = args.prompt
|
@@ -20,9 +22,17 @@ def text_mode(client, args):
|
|
20
22
|
|
21
23
|
print("\nSubmission successful. Waiting for response...")
|
22
24
|
|
25
|
+
# Log the user message if logging is enabled
|
26
|
+
if logger:
|
27
|
+
logger.log("user", prompt)
|
28
|
+
|
23
29
|
# Create messages array with preprompt if available
|
24
30
|
messages = None
|
25
31
|
if args.preprompt:
|
32
|
+
# Log the system message if logging is enabled
|
33
|
+
if logger:
|
34
|
+
logger.log("system", args.preprompt)
|
35
|
+
|
26
36
|
messages = [
|
27
37
|
{"role": "system", "content": args.preprompt},
|
28
38
|
{"role": "user", "content": prompt}
|
@@ -64,6 +74,10 @@ def text_mode(client, args):
|
|
64
74
|
if args.stream_prettify and live_display:
|
65
75
|
live_display.stop()
|
66
76
|
|
77
|
+
# Log the AI response if logging is enabled
|
78
|
+
if logger and response:
|
79
|
+
logger.log("assistant", response)
|
80
|
+
|
67
81
|
# Handle non-stream response or regular prettify
|
68
82
|
if (args.no_stream or args.prettify) and response:
|
69
83
|
if args.prettify:
|
ngpt/cli_config.py
CHANGED
@@ -11,7 +11,7 @@ CLI_CONFIG_OPTIONS = {
|
|
11
11
|
"temperature": {"type": "float", "default": 0.7, "context": ["all"]},
|
12
12
|
"top_p": {"type": "float", "default": 1.0, "context": ["all"]},
|
13
13
|
"max_tokens": {"type": "int", "default": None, "context": ["all"]},
|
14
|
-
"log": {"type": "str", "default": None, "context": ["
|
14
|
+
"log": {"type": "str", "default": None, "context": ["all"]},
|
15
15
|
"preprompt": {"type": "str", "default": None, "context": ["all"]},
|
16
16
|
"no-stream": {"type": "bool", "default": False, "context": ["all"], "exclusive": ["prettify", "stream-prettify"]},
|
17
17
|
"prettify": {"type": "bool", "default": False, "context": ["all"], "exclusive": ["no-stream", "stream-prettify"]},
|
ngpt/log.py
ADDED
@@ -0,0 +1,180 @@
|
|
1
|
+
import os
|
2
|
+
import sys
|
3
|
+
import datetime
|
4
|
+
import tempfile
|
5
|
+
from pathlib import Path
|
6
|
+
from typing import Optional, TextIO, Dict, Any
|
7
|
+
|
8
|
+
# Simple color definitions for fallback message
|
9
|
+
COLORS = {
|
10
|
+
"green": "\033[32m",
|
11
|
+
"yellow": "\033[33m",
|
12
|
+
"reset": "\033[0m"
|
13
|
+
}
|
14
|
+
|
15
|
+
class Logger:
|
16
|
+
"""Handles logging functionality for ngpt"""
|
17
|
+
|
18
|
+
def __init__(self, log_path: Optional[str] = None):
|
19
|
+
"""
|
20
|
+
Initialize the logger.
|
21
|
+
|
22
|
+
Args:
|
23
|
+
log_path: Optional path to the log file. If None, a temporary file will be created.
|
24
|
+
"""
|
25
|
+
self.log_path = log_path
|
26
|
+
self.log_file: Optional[TextIO] = None
|
27
|
+
self.is_temp = False
|
28
|
+
self.command_args = sys.argv
|
29
|
+
|
30
|
+
if self.log_path is None:
|
31
|
+
# Create a temporary log file with date-time in the name
|
32
|
+
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
|
33
|
+
|
34
|
+
# Use OS-specific temp directory
|
35
|
+
if sys.platform == "win32":
|
36
|
+
# Windows
|
37
|
+
temp_dir = os.environ.get("TEMP", "")
|
38
|
+
self.log_path = os.path.join(temp_dir, f"ngpt-{timestamp}.log")
|
39
|
+
else:
|
40
|
+
# Linux/MacOS
|
41
|
+
self.log_path = f"/tmp/ngpt-{timestamp}.log"
|
42
|
+
|
43
|
+
self.is_temp = True
|
44
|
+
|
45
|
+
def __enter__(self):
|
46
|
+
"""Context manager entry"""
|
47
|
+
self.open()
|
48
|
+
return self
|
49
|
+
|
50
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
51
|
+
"""Context manager exit"""
|
52
|
+
self.close()
|
53
|
+
|
54
|
+
def open(self) -> bool:
|
55
|
+
"""
|
56
|
+
Open the log file for writing.
|
57
|
+
|
58
|
+
Returns:
|
59
|
+
bool: True if successful, False otherwise.
|
60
|
+
"""
|
61
|
+
try:
|
62
|
+
# Expand ~ to home directory if present
|
63
|
+
if self.log_path.startswith('~'):
|
64
|
+
self.log_path = os.path.expanduser(self.log_path)
|
65
|
+
|
66
|
+
# Make sure the directory exists
|
67
|
+
log_dir = os.path.dirname(self.log_path)
|
68
|
+
if log_dir and not os.path.exists(log_dir):
|
69
|
+
try:
|
70
|
+
os.makedirs(log_dir, exist_ok=True)
|
71
|
+
except (PermissionError, OSError) as e:
|
72
|
+
print(f"Warning: Could not create log directory: {str(e)}", file=sys.stderr)
|
73
|
+
# Fall back to temp directory
|
74
|
+
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
|
75
|
+
if sys.platform == "win32":
|
76
|
+
temp_dir = os.environ.get("TEMP", "")
|
77
|
+
self.log_path = os.path.join(temp_dir, f"ngpt-{timestamp}.log")
|
78
|
+
else:
|
79
|
+
self.log_path = f"/tmp/ngpt-{timestamp}.log"
|
80
|
+
self.is_temp = True
|
81
|
+
|
82
|
+
self.log_file = open(self.log_path, 'a', encoding='utf-8')
|
83
|
+
|
84
|
+
# Write header information
|
85
|
+
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
86
|
+
self.log_file.write(f"\n--- nGPT Session Log ---\n")
|
87
|
+
self.log_file.write(f"Started at: {timestamp}\n")
|
88
|
+
self.log_file.write(f"Command: {' '.join(self.command_args)}\n")
|
89
|
+
self.log_file.write(f"Log file: {self.log_path}\n\n")
|
90
|
+
self.log_file.flush()
|
91
|
+
|
92
|
+
return True
|
93
|
+
except Exception as e:
|
94
|
+
print(f"Warning: Could not open log file: {str(e)}", file=sys.stderr)
|
95
|
+
|
96
|
+
# Fall back to temp file
|
97
|
+
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
|
98
|
+
if sys.platform == "win32":
|
99
|
+
temp_dir = os.environ.get("TEMP", "")
|
100
|
+
self.log_path = os.path.join(temp_dir, f"ngpt-{timestamp}.log")
|
101
|
+
else:
|
102
|
+
self.log_path = f"/tmp/ngpt-{timestamp}.log"
|
103
|
+
self.is_temp = True
|
104
|
+
|
105
|
+
# Try again with temp file
|
106
|
+
try:
|
107
|
+
self.log_file = open(self.log_path, 'a', encoding='utf-8')
|
108
|
+
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
109
|
+
self.log_file.write(f"\n--- nGPT Session Log ---\n")
|
110
|
+
self.log_file.write(f"Started at: {timestamp}\n")
|
111
|
+
self.log_file.write(f"Command: {' '.join(self.command_args)}\n")
|
112
|
+
self.log_file.write(f"Log file: {self.log_path}\n\n")
|
113
|
+
self.log_file.flush()
|
114
|
+
print(f"{COLORS['green']}Falling back to temporary log file: {self.log_path}{COLORS['reset']}", file=sys.stderr)
|
115
|
+
return True
|
116
|
+
except Exception as e2:
|
117
|
+
print(f"Warning: Could not open temporary log file: {str(e2)}", file=sys.stderr)
|
118
|
+
self.log_file = None
|
119
|
+
return False
|
120
|
+
|
121
|
+
def close(self):
|
122
|
+
"""Close the log file if it's open."""
|
123
|
+
if self.log_file:
|
124
|
+
try:
|
125
|
+
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
126
|
+
self.log_file.write(f"\n--- Session ended at {timestamp} ---\n")
|
127
|
+
self.log_file.close()
|
128
|
+
except Exception:
|
129
|
+
pass
|
130
|
+
self.log_file = None
|
131
|
+
|
132
|
+
def log(self, role: str, content: str):
|
133
|
+
"""
|
134
|
+
Log a message.
|
135
|
+
|
136
|
+
Args:
|
137
|
+
role: Role of the message (e.g., 'system', 'user', 'assistant')
|
138
|
+
content: Content of the message
|
139
|
+
"""
|
140
|
+
if not self.log_file:
|
141
|
+
return
|
142
|
+
|
143
|
+
try:
|
144
|
+
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
145
|
+
self.log_file.write(f"{timestamp}: {role}: {content}\n")
|
146
|
+
self.log_file.flush()
|
147
|
+
except Exception:
|
148
|
+
# Silently fail if logging fails
|
149
|
+
pass
|
150
|
+
|
151
|
+
def get_log_path(self) -> str:
|
152
|
+
"""
|
153
|
+
Get the path to the log file.
|
154
|
+
|
155
|
+
Returns:
|
156
|
+
str: Path to the log file
|
157
|
+
"""
|
158
|
+
return self.log_path
|
159
|
+
|
160
|
+
def is_temporary(self) -> bool:
|
161
|
+
"""
|
162
|
+
Check if the log file is temporary.
|
163
|
+
|
164
|
+
Returns:
|
165
|
+
bool: True if the log file is temporary
|
166
|
+
"""
|
167
|
+
return self.is_temp
|
168
|
+
|
169
|
+
|
170
|
+
def create_logger(log_path: Optional[str] = None) -> Logger:
|
171
|
+
"""
|
172
|
+
Create a logger instance.
|
173
|
+
|
174
|
+
Args:
|
175
|
+
log_path: Optional path to the log file
|
176
|
+
|
177
|
+
Returns:
|
178
|
+
Logger: Logger instance
|
179
|
+
"""
|
180
|
+
return Logger(log_path)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ngpt
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.11.0
|
4
4
|
Summary: A lightweight Python CLI and library for interacting with OpenAI-compatible APIs, supporting both official and self-hosted LLM endpoints.
|
5
5
|
Project-URL: Homepage, https://github.com/nazdridoy/ngpt
|
6
6
|
Project-URL: Repository, https://github.com/nazdridoy/ngpt
|
@@ -114,6 +114,9 @@ ngpt --preprompt "You are a Linux expert" "How do I find large files?"
|
|
114
114
|
|
115
115
|
# Log your conversation to a file
|
116
116
|
ngpt --interactive --log conversation.log
|
117
|
+
|
118
|
+
# Create a temporary log file automatically
|
119
|
+
ngpt --log "Tell me about quantum computing"
|
117
120
|
```
|
118
121
|
|
119
122
|
For more examples and detailed usage, visit the [CLI Usage Guide](https://nazdridoy.github.io/ngpt/usage/cli_usage.html).
|
@@ -135,6 +138,7 @@ For more examples and detailed usage, visit the [CLI Usage Guide](https://nazdri
|
|
135
138
|
- 🎭 **System Prompts**: Customize model behavior with custom system prompts
|
136
139
|
- 📃 **Conversation Logging**: Save your conversations to text files for later reference
|
137
140
|
- 🧰 **CLI Components**: Reusable components for building custom AI-powered command-line tools
|
141
|
+
- 🔌 **Modular Architecture**: Well-structured codebase with clean separation of concerns
|
138
142
|
|
139
143
|
See the [Feature Overview](https://nazdridoy.github.io/ngpt/overview.html) for more details.
|
140
144
|
|
@@ -292,16 +296,14 @@ nGPT can also be used as a framework to build your own AI-powered command-line t
|
|
292
296
|
|
293
297
|
```python
|
294
298
|
from ngpt import NGPTClient, load_config
|
295
|
-
from ngpt.cli.
|
299
|
+
from ngpt.cli.interactive import interactive_chat_session
|
296
300
|
from ngpt.cli.renderers import prettify_markdown
|
297
|
-
from ngpt.cli.
|
298
|
-
import
|
301
|
+
from ngpt.cli.args import setup_argument_parser
|
302
|
+
import sys
|
299
303
|
|
300
304
|
# Create a custom CLI tool with colorized help
|
301
|
-
parser =
|
302
|
-
|
303
|
-
formatter_class=ColoredHelpFormatter
|
304
|
-
)
|
305
|
+
parser = setup_argument_parser()
|
306
|
+
parser.description = "Specialized Code Assistant"
|
305
307
|
parser.add_argument("prompt", nargs="?", help="Code description")
|
306
308
|
parser.add_argument("--language", "-l", default="python", help="Programming language")
|
307
309
|
parser.add_argument("--interactive", "-i", action="store_true", help="Start interactive mode")
|
@@ -318,6 +320,9 @@ elif args.prompt:
|
|
318
320
|
# Generate and prettify code
|
319
321
|
code = client.generate_code(args.prompt, language=args.language)
|
320
322
|
print(prettify_markdown(f"```{args.language}\n{code}\n```"))
|
323
|
+
else:
|
324
|
+
parser.print_help()
|
325
|
+
sys.exit(1)
|
321
326
|
```
|
322
327
|
|
323
328
|
This allows you to build specialized AI tools like:
|
@@ -347,7 +352,7 @@ You can configure the client using the following options:
|
|
347
352
|
| `--top_p` | Set top_p (controls diversity, default: 1.0) |
|
348
353
|
| `--max_tokens` | Set maximum response length in tokens |
|
349
354
|
| `--preprompt` | Set custom system prompt to control AI behavior |
|
350
|
-
| `--log` |
|
355
|
+
| `--log` | Enable logging: use `--log` to create a temporary log file, or `--log PATH` for a specific location |
|
351
356
|
| `--prettify` | Render markdown responses and code with syntax highlighting |
|
352
357
|
| `--stream-prettify` | Enable real-time markdown rendering with syntax highlighting while streaming |
|
353
358
|
| `--renderer` | Select which markdown renderer to use with --prettify (auto, rich, or glow) |
|
@@ -0,0 +1,25 @@
|
|
1
|
+
ngpt/__init__.py,sha256=awvycdj3tgcOr0BO81L4XU6DOtnToxFqkPHe1Pyu0Bw,652
|
2
|
+
ngpt/cli.py,sha256=j3eFYPOtCCFBOGh7NK5IWEnADnTMMSEB9GLyIDoW724,66
|
3
|
+
ngpt/cli_config.py,sha256=fUtahEUJlFt1cguIXrfHk0exn6O1qnm50uTKAgvtySc,9984
|
4
|
+
ngpt/client.py,sha256=Rv-JO8RAmw1v3gdLkwaPe_PEw6p83cejO0YNT_DDjeg,15134
|
5
|
+
ngpt/config.py,sha256=WYOk_b1eiYjo6hpV3pfXr2RjqhOnmKqwZwKid1T41I4,10363
|
6
|
+
ngpt/log.py,sha256=Bxv2-GbWtVYa3u94Zs_OVEvYk_CbuT5hrDH06KHLXa8,6369
|
7
|
+
ngpt/cli/__init__.py,sha256=hebbDSMGiOd43YNnQP67uzr67Ue6rZPwm2czynr5iZY,43
|
8
|
+
ngpt/cli/args.py,sha256=2e13wYQGfHFXpxZz5wXuvmoYpIkK6PEZQamHxfHAdyY,8868
|
9
|
+
ngpt/cli/config_manager.py,sha256=L091h99ntMBth_FM39npGCOtDCV5kVkukNSkCIj6dpI,3752
|
10
|
+
ngpt/cli/formatters.py,sha256=1ofNEWEZtFr0MJ3oWomoL_mFmZHlUdT3I5qGtbDQ4g0,9378
|
11
|
+
ngpt/cli/interactive.py,sha256=8TBDbAqgenu8JiIJzVEGN67bcDA71WKJBXmBhdcJl-E,10953
|
12
|
+
ngpt/cli/main.py,sha256=N2vCeVKDGJFIkVVPxGYgE2wCq6n0vAhdIE3dil7Mqv4,27043
|
13
|
+
ngpt/cli/renderers.py,sha256=U3Vef3nY1NF2JKtLUtUjdFomyqIrijGWdxRPm46urr4,10546
|
14
|
+
ngpt/cli/ui.py,sha256=2JXkCRw5utaKpNZIy0u8F_Jh2zrWbm93dMz91wf9CkQ,5334
|
15
|
+
ngpt/cli/modes/__init__.py,sha256=11znFpqzHyRsEtaTrms5M3q2SrscT9VvUgr7C2B1o-E,179
|
16
|
+
ngpt/cli/modes/chat.py,sha256=dUyPG4wzUZYbCzfjXHpJvIocqbHMKSJuOMBNaJTAdsY,3171
|
17
|
+
ngpt/cli/modes/code.py,sha256=o-dwPwom7cS6wwS1jlEzDoYwhwgh_ClwJYCFGvZw9es,4326
|
18
|
+
ngpt/cli/modes/shell.py,sha256=zjfajH4BH2y18AzBlwvcryeke6hiS43rqPidNfJLKwQ,2715
|
19
|
+
ngpt/cli/modes/text.py,sha256=QQr9dENJP35Xe8Ck0nbmD8gE7uVng4KEHcZkSHcQXCg,3173
|
20
|
+
ngpt/utils/__init__.py,sha256=NK8wlI9-YeaKPOaXBVfUj3mKOXohfD3GmNy5obOIXOM,20
|
21
|
+
ngpt-2.11.0.dist-info/METADATA,sha256=o9WAF9lC8DEqzJbP0_u1lz5qZLEPow6mzUSqPHKy6eY,20569
|
22
|
+
ngpt-2.11.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
23
|
+
ngpt-2.11.0.dist-info/entry_points.txt,sha256=1cnAMujyy34DlOahrJg19lePSnb08bLbkUs_kVerqdk,39
|
24
|
+
ngpt-2.11.0.dist-info/licenses/LICENSE,sha256=mQkpWoADxbHqE0HRefYLJdm7OpdrXBr3vNv5bZ8w72M,1065
|
25
|
+
ngpt-2.11.0.dist-info/RECORD,,
|
ngpt-2.9.2.dist-info/RECORD
DELETED
@@ -1,23 +0,0 @@
|
|
1
|
-
ngpt/__init__.py,sha256=awvycdj3tgcOr0BO81L4XU6DOtnToxFqkPHe1Pyu0Bw,652
|
2
|
-
ngpt/cli.py,sha256=j3eFYPOtCCFBOGh7NK5IWEnADnTMMSEB9GLyIDoW724,66
|
3
|
-
ngpt/cli_config.py,sha256=Om8dXqdBqPCP5V4THQMkzZgHTQvN2rMAV6QjoVDQcZ4,10000
|
4
|
-
ngpt/client.py,sha256=Rv-JO8RAmw1v3gdLkwaPe_PEw6p83cejO0YNT_DDjeg,15134
|
5
|
-
ngpt/config.py,sha256=WYOk_b1eiYjo6hpV3pfXr2RjqhOnmKqwZwKid1T41I4,10363
|
6
|
-
ngpt/cli/__init__.py,sha256=hebbDSMGiOd43YNnQP67uzr67Ue6rZPwm2czynr5iZY,43
|
7
|
-
ngpt/cli/config_manager.py,sha256=L091h99ntMBth_FM39npGCOtDCV5kVkukNSkCIj6dpI,3752
|
8
|
-
ngpt/cli/formatters.py,sha256=1ofNEWEZtFr0MJ3oWomoL_mFmZHlUdT3I5qGtbDQ4g0,9378
|
9
|
-
ngpt/cli/interactive.py,sha256=J6DFkJVBdJ6NjZllsDgJnY1J5RTiKW341p4Zn4wHpGc,11718
|
10
|
-
ngpt/cli/main.py,sha256=6VvBg-PSVY8pm9WiPNXkyPQuzX-dCPNrEX5UPIZWXYk,30711
|
11
|
-
ngpt/cli/renderers.py,sha256=U3Vef3nY1NF2JKtLUtUjdFomyqIrijGWdxRPm46urr4,10546
|
12
|
-
ngpt/cli/ui.py,sha256=2JXkCRw5utaKpNZIy0u8F_Jh2zrWbm93dMz91wf9CkQ,5334
|
13
|
-
ngpt/cli/modes/__init__.py,sha256=11znFpqzHyRsEtaTrms5M3q2SrscT9VvUgr7C2B1o-E,179
|
14
|
-
ngpt/cli/modes/chat.py,sha256=ilTEGu3a8FJ_wGC_P5WnDLx0Okzh3QxJ8HoiYj84g0o,2721
|
15
|
-
ngpt/cli/modes/code.py,sha256=_Z3cKYdeifYZSXZ4dMnQWcnVpM2TvYQd-7S7Q3blfEw,3998
|
16
|
-
ngpt/cli/modes/shell.py,sha256=Fx83_JBc3P5vgCCPlXgXFSgzwTY0UMGfUwY4_CU10Ro,1654
|
17
|
-
ngpt/cli/modes/text.py,sha256=YpNpcujPweO_Biwg4aYwGw4_ShefzaNVtf8d_QrcR_Q,2719
|
18
|
-
ngpt/utils/__init__.py,sha256=NK8wlI9-YeaKPOaXBVfUj3mKOXohfD3GmNy5obOIXOM,20
|
19
|
-
ngpt-2.9.2.dist-info/METADATA,sha256=Re9hYNXfjtWAu4HIl220MSiRa2Ckgq2hTiDr3lPdZis,20343
|
20
|
-
ngpt-2.9.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
21
|
-
ngpt-2.9.2.dist-info/entry_points.txt,sha256=1cnAMujyy34DlOahrJg19lePSnb08bLbkUs_kVerqdk,39
|
22
|
-
ngpt-2.9.2.dist-info/licenses/LICENSE,sha256=mQkpWoADxbHqE0HRefYLJdm7OpdrXBr3vNv5bZ8w72M,1065
|
23
|
-
ngpt-2.9.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|