ngpt 2.9.2__tar.gz → 2.11.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {ngpt-2.9.2 → ngpt-2.11.0}/PKG-INFO +14 -9
  2. {ngpt-2.9.2 → ngpt-2.11.0}/README.md +13 -8
  3. {ngpt-2.9.2 → ngpt-2.11.0}/docs/api/cli.md +2 -2
  4. {ngpt-2.9.2 → ngpt-2.11.0}/docs/configuration.md +5 -2
  5. {ngpt-2.9.2 → ngpt-2.11.0}/docs/examples/advanced.md +7 -1
  6. {ngpt-2.9.2 → ngpt-2.11.0}/docs/examples/cli_components.md +4 -6
  7. {ngpt-2.9.2 → ngpt-2.11.0}/docs/overview.md +5 -0
  8. {ngpt-2.9.2 → ngpt-2.11.0}/docs/usage/cli_framework.md +35 -0
  9. {ngpt-2.9.2 → ngpt-2.11.0}/docs/usage/cli_usage.md +26 -3
  10. ngpt-2.11.0/ngpt/cli/args.py +161 -0
  11. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/interactive.py +18 -33
  12. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/main.py +89 -130
  13. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/modes/chat.py +15 -1
  14. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/modes/code.py +11 -1
  15. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/modes/shell.py +33 -3
  16. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/modes/text.py +15 -1
  17. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli_config.py +1 -1
  18. ngpt-2.11.0/ngpt/log.py +180 -0
  19. {ngpt-2.9.2 → ngpt-2.11.0}/pyproject.toml +1 -1
  20. {ngpt-2.9.2 → ngpt-2.11.0}/uv.lock +1 -1
  21. {ngpt-2.9.2 → ngpt-2.11.0}/.github/workflows/python-publish.yml +0 -0
  22. {ngpt-2.9.2 → ngpt-2.11.0}/.gitignore +0 -0
  23. {ngpt-2.9.2 → ngpt-2.11.0}/.python-version +0 -0
  24. {ngpt-2.9.2 → ngpt-2.11.0}/COMMIT_GUIDELINES.md +0 -0
  25. {ngpt-2.9.2 → ngpt-2.11.0}/CONTRIBUTING.md +0 -0
  26. {ngpt-2.9.2 → ngpt-2.11.0}/LICENSE +0 -0
  27. {ngpt-2.9.2 → ngpt-2.11.0}/docs/CONTRIBUTING.md +0 -0
  28. {ngpt-2.9.2 → ngpt-2.11.0}/docs/LICENSE.md +0 -0
  29. {ngpt-2.9.2 → ngpt-2.11.0}/docs/README.md +0 -0
  30. {ngpt-2.9.2 → ngpt-2.11.0}/docs/_config.yml +0 -0
  31. {ngpt-2.9.2 → ngpt-2.11.0}/docs/api/README.md +0 -0
  32. {ngpt-2.9.2 → ngpt-2.11.0}/docs/api/client.md +0 -0
  33. {ngpt-2.9.2 → ngpt-2.11.0}/docs/api/config.md +0 -0
  34. {ngpt-2.9.2 → ngpt-2.11.0}/docs/assets/css/style.scss +0 -0
  35. {ngpt-2.9.2 → ngpt-2.11.0}/docs/examples/README.md +0 -0
  36. {ngpt-2.9.2 → ngpt-2.11.0}/docs/examples/basic.md +0 -0
  37. {ngpt-2.9.2 → ngpt-2.11.0}/docs/examples/integrations.md +0 -0
  38. {ngpt-2.9.2 → ngpt-2.11.0}/docs/installation.md +0 -0
  39. {ngpt-2.9.2 → ngpt-2.11.0}/docs/usage/README.md +0 -0
  40. {ngpt-2.9.2 → ngpt-2.11.0}/docs/usage/cli_config.md +0 -0
  41. {ngpt-2.9.2 → ngpt-2.11.0}/docs/usage/library_usage.md +0 -0
  42. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/__init__.py +0 -0
  43. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/__init__.py +0 -0
  44. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/config_manager.py +0 -0
  45. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/formatters.py +0 -0
  46. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/modes/__init__.py +0 -0
  47. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/renderers.py +0 -0
  48. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli/ui.py +0 -0
  49. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/cli.py +0 -0
  50. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/client.py +0 -0
  51. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/config.py +0 -0
  52. {ngpt-2.9.2 → ngpt-2.11.0}/ngpt/utils/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ngpt
3
- Version: 2.9.2
3
+ Version: 2.11.0
4
4
  Summary: A lightweight Python CLI and library for interacting with OpenAI-compatible APIs, supporting both official and self-hosted LLM endpoints.
5
5
  Project-URL: Homepage, https://github.com/nazdridoy/ngpt
6
6
  Project-URL: Repository, https://github.com/nazdridoy/ngpt
@@ -114,6 +114,9 @@ ngpt --preprompt "You are a Linux expert" "How do I find large files?"
114
114
 
115
115
  # Log your conversation to a file
116
116
  ngpt --interactive --log conversation.log
117
+
118
+ # Create a temporary log file automatically
119
+ ngpt --log "Tell me about quantum computing"
117
120
  ```
118
121
 
119
122
  For more examples and detailed usage, visit the [CLI Usage Guide](https://nazdridoy.github.io/ngpt/usage/cli_usage.html).
@@ -135,6 +138,7 @@ For more examples and detailed usage, visit the [CLI Usage Guide](https://nazdri
135
138
  - 🎭 **System Prompts**: Customize model behavior with custom system prompts
136
139
  - 📃 **Conversation Logging**: Save your conversations to text files for later reference
137
140
  - 🧰 **CLI Components**: Reusable components for building custom AI-powered command-line tools
141
+ - 🔌 **Modular Architecture**: Well-structured codebase with clean separation of concerns
138
142
 
139
143
  See the [Feature Overview](https://nazdridoy.github.io/ngpt/overview.html) for more details.
140
144
 
@@ -292,16 +296,14 @@ nGPT can also be used as a framework to build your own AI-powered command-line t
292
296
 
293
297
  ```python
294
298
  from ngpt import NGPTClient, load_config
295
- from ngpt.cli.main import interactive_chat_session
299
+ from ngpt.cli.interactive import interactive_chat_session
296
300
  from ngpt.cli.renderers import prettify_markdown
297
- from ngpt.cli.formatters import ColoredHelpFormatter
298
- import argparse
301
+ from ngpt.cli.args import setup_argument_parser
302
+ import sys
299
303
 
300
304
  # Create a custom CLI tool with colorized help
301
- parser = argparse.ArgumentParser(
302
- description="Specialized Code Assistant",
303
- formatter_class=ColoredHelpFormatter
304
- )
305
+ parser = setup_argument_parser()
306
+ parser.description = "Specialized Code Assistant"
305
307
  parser.add_argument("prompt", nargs="?", help="Code description")
306
308
  parser.add_argument("--language", "-l", default="python", help="Programming language")
307
309
  parser.add_argument("--interactive", "-i", action="store_true", help="Start interactive mode")
@@ -318,6 +320,9 @@ elif args.prompt:
318
320
  # Generate and prettify code
319
321
  code = client.generate_code(args.prompt, language=args.language)
320
322
  print(prettify_markdown(f"```{args.language}\n{code}\n```"))
323
+ else:
324
+ parser.print_help()
325
+ sys.exit(1)
321
326
  ```
322
327
 
323
328
  This allows you to build specialized AI tools like:
@@ -347,7 +352,7 @@ You can configure the client using the following options:
347
352
  | `--top_p` | Set top_p (controls diversity, default: 1.0) |
348
353
  | `--max_tokens` | Set maximum response length in tokens |
349
354
  | `--preprompt` | Set custom system prompt to control AI behavior |
350
- | `--log` | Set filepath to log conversation to (for interactive modes) |
355
+ | `--log` | Enable logging: use `--log` to create a temporary log file, or `--log PATH` for a specific location |
351
356
  | `--prettify` | Render markdown responses and code with syntax highlighting |
352
357
  | `--stream-prettify` | Enable real-time markdown rendering with syntax highlighting while streaming |
353
358
  | `--renderer` | Select which markdown renderer to use with --prettify (auto, rich, or glow) |
@@ -79,6 +79,9 @@ ngpt --preprompt "You are a Linux expert" "How do I find large files?"
79
79
 
80
80
  # Log your conversation to a file
81
81
  ngpt --interactive --log conversation.log
82
+
83
+ # Create a temporary log file automatically
84
+ ngpt --log "Tell me about quantum computing"
82
85
  ```
83
86
 
84
87
  For more examples and detailed usage, visit the [CLI Usage Guide](https://nazdridoy.github.io/ngpt/usage/cli_usage.html).
@@ -100,6 +103,7 @@ For more examples and detailed usage, visit the [CLI Usage Guide](https://nazdri
100
103
  - 🎭 **System Prompts**: Customize model behavior with custom system prompts
101
104
  - 📃 **Conversation Logging**: Save your conversations to text files for later reference
102
105
  - 🧰 **CLI Components**: Reusable components for building custom AI-powered command-line tools
106
+ - 🔌 **Modular Architecture**: Well-structured codebase with clean separation of concerns
103
107
 
104
108
  See the [Feature Overview](https://nazdridoy.github.io/ngpt/overview.html) for more details.
105
109
 
@@ -257,16 +261,14 @@ nGPT can also be used as a framework to build your own AI-powered command-line t
257
261
 
258
262
  ```python
259
263
  from ngpt import NGPTClient, load_config
260
- from ngpt.cli.main import interactive_chat_session
264
+ from ngpt.cli.interactive import interactive_chat_session
261
265
  from ngpt.cli.renderers import prettify_markdown
262
- from ngpt.cli.formatters import ColoredHelpFormatter
263
- import argparse
266
+ from ngpt.cli.args import setup_argument_parser
267
+ import sys
264
268
 
265
269
  # Create a custom CLI tool with colorized help
266
- parser = argparse.ArgumentParser(
267
- description="Specialized Code Assistant",
268
- formatter_class=ColoredHelpFormatter
269
- )
270
+ parser = setup_argument_parser()
271
+ parser.description = "Specialized Code Assistant"
270
272
  parser.add_argument("prompt", nargs="?", help="Code description")
271
273
  parser.add_argument("--language", "-l", default="python", help="Programming language")
272
274
  parser.add_argument("--interactive", "-i", action="store_true", help="Start interactive mode")
@@ -283,6 +285,9 @@ elif args.prompt:
283
285
  # Generate and prettify code
284
286
  code = client.generate_code(args.prompt, language=args.language)
285
287
  print(prettify_markdown(f"```{args.language}\n{code}\n```"))
288
+ else:
289
+ parser.print_help()
290
+ sys.exit(1)
286
291
  ```
287
292
 
288
293
  This allows you to build specialized AI tools like:
@@ -312,7 +317,7 @@ You can configure the client using the following options:
312
317
  | `--top_p` | Set top_p (controls diversity, default: 1.0) |
313
318
  | `--max_tokens` | Set maximum response length in tokens |
314
319
  | `--preprompt` | Set custom system prompt to control AI behavior |
315
- | `--log` | Set filepath to log conversation to (for interactive modes) |
320
+ | `--log` | Enable logging: use `--log` to create a temporary log file, or `--log PATH` for a specific location |
316
321
  | `--prettify` | Render markdown responses and code with syntax highlighting |
317
322
  | `--stream-prettify` | Enable real-time markdown rendering with syntax highlighting while streaming |
318
323
  | `--renderer` | Select which markdown renderer to use with --prettify (auto, rich, or glow) |
@@ -18,7 +18,7 @@ def interactive_chat_session(
18
18
  temperature=0.7,
19
19
  top_p=1.0,
20
20
  max_tokens=None,
21
- log_file=None,
21
+ logger=None,
22
22
  preprompt=None,
23
23
  prettify=False,
24
24
  renderer='auto',
@@ -35,7 +35,7 @@ Creates an interactive chat session with the specified AI client.
35
35
  - `temperature` (float): Temperature for generation (0.0-1.0)
36
36
  - `top_p` (float): Top-p sampling value (0.0-1.0)
37
37
  - `max_tokens` (int, optional): Maximum number of tokens to generate
38
- - `log_file` (str, optional): Path to file for logging the conversation
38
+ - `logger` (object, optional): A logger instance with `log(role, message)` and `get_log_path()` methods for logging the conversation.
39
39
  - `preprompt` (str, optional): System prompt to use for the chat
40
40
  - `prettify` (bool): Whether to prettify markdown in responses
41
41
  - `renderer` (str): Markdown renderer to use ('auto', 'rich', 'glow')
@@ -131,7 +131,7 @@ You can also set configuration options directly via command-line arguments:
131
131
  - `--stream-prettify`: Enable real-time formatted output while streaming (uses Rich).
132
132
  - `--web-search`: Enable web search capability (if supported by the API).
133
133
  - `--preprompt <text>`: Set a custom system prompt.
134
- - `--log <file>`: Log the conversation to a file (in interactive modes).
134
+ - `--log [file]`: Enable logging: use `--log` to create a temporary log file, or `--log PATH` for a specific location.
135
135
  - `--temperature <value>`: Set the generation temperature (0.0-2.0).
136
136
  - `--top_p <value>`: Set the nucleus sampling top_p value (0.0-1.0).
137
137
  - `--max_tokens <number>`: Set the maximum number of tokens for the response.
@@ -157,8 +157,11 @@ ngpt --temperature 0.8 --top_p 0.95 --max_tokens 300 "Write a creative story"
157
157
  # Set a custom system prompt (preprompt)
158
158
  ngpt --preprompt "You are a Linux command line expert. Focus on efficient solutions." "How do I find the largest files in a directory?"
159
159
 
160
- # Save conversation to a log file (for interactive modes)
160
+ # Log conversation to a specific file
161
161
  ngpt --interactive --log conversation.log
162
+
163
+ # Create a temporary log file automatically
164
+ ngpt --log "Tell me about quantum computing"
162
165
  ```
163
166
 
164
167
  ## Environment Variables
@@ -81,9 +81,12 @@ ngpt --interactive --preprompt "You are a Python programming tutor. Explain conc
81
81
  Save your conversation history to a file for reference:
82
82
 
83
83
  ```bash
84
- # Basic interactive session with logging
84
+ # Basic interactive session with logging to a specific file
85
85
  ngpt --interactive --log python_tutoring.log
86
86
 
87
+ # Create an automatic temporary log file
88
+ ngpt --interactive --log
89
+
87
90
  # Combine logging with custom system prompt
88
91
  ngpt --interactive \
89
92
  --preprompt "You are a data science expert helping analyze experimental results." \
@@ -93,6 +96,9 @@ ngpt --interactive \
93
96
  ngpt --interactive \
94
97
  --preprompt "You are helping plan the architecture for a microservices application." \
95
98
  --log architecture_planning.log
99
+
100
+ # Log non-interactive sessions
101
+ ngpt --log "Explain quantum computing"
96
102
  ```
97
103
 
98
104
  The log file contains the complete conversation transcript, including:
@@ -18,19 +18,17 @@ Here's a simple CLI tool that uses nGPT to generate and explain code:
18
18
 
19
19
  ```python
20
20
  #!/usr/bin/env python3
21
- import argparse
22
21
  import sys
23
22
  from ngpt import NGPTClient, load_config
24
- from ngpt.cli.formatters import ColoredHelpFormatter
23
+ from ngpt.cli.args import setup_argument_parser, validate_args
25
24
  from ngpt.cli.renderers import prettify_markdown, has_markdown_renderer
26
25
 
27
26
  def main():
28
27
  # Create parser with colorized help
29
- parser = argparse.ArgumentParser(
30
- description="Simple code generation tool",
31
- formatter_class=ColoredHelpFormatter
32
- )
28
+ parser = setup_argument_parser()
33
29
 
30
+ # Customize the parser for our specific needs
31
+ parser.description = "Simple code generation tool"
34
32
  parser.add_argument("prompt", help="Code description")
35
33
  parser.add_argument("--language", "-l", default="python", help="Programming language")
36
34
  parser.add_argument("--explain", "-e", action="store_true", help="Include explanation")
@@ -28,6 +28,11 @@ nGPT is a lightweight Python library and command-line interface (CLI) tool desig
28
28
 
29
29
  - **Clean Code Generation**: Output code without markdown formatting or explanations.
30
30
 
31
+ - 📝 **Rich Multiline Editor**: Interactive multiline text input with syntax highlighting and intuitive controls
32
+ - 🎭 **System Prompts**: Customize model behavior with custom system prompts
33
+ - 📃 **Conversation Logging**: Automatically or manually log conversations with timestamps and role information
34
+ - 🧰 **CLI Components**: Reusable components for building custom AI-powered command-line tools
35
+
31
36
  ## Architecture
32
37
 
33
38
  nGPT is built around a few core components:
@@ -10,6 +10,7 @@ nGPT's CLI module has been modularized into several components that you can inco
10
10
  - **Markdown Rendering**: Beautiful formatting for markdown with syntax highlighting (`ngpt.cli.renderers`)
11
11
  - **Real-time Streaming**: Tools for handling streaming content with live updates (`ngpt.cli.ui`)
12
12
  - **CLI Configuration System**: Robust configuration management (`ngpt.cli.main`)
13
+ - **Argument Parsing**: Sophisticated argument parsing and validation (`ngpt.cli.args`)
13
14
  - **Terminal Utilities**: Helpers for colorized output and terminal formatting (`ngpt.cli.formatters`)
14
15
  - **Mode-specific functionality**: Specialized code, shell, chat and text mode handlers (`ngpt.cli.modes`)
15
16
 
@@ -28,6 +29,40 @@ This will install nGPT with all required dependencies, including:
28
29
 
29
30
  ## Available Components
30
31
 
32
+ ### Argument Parsing
33
+
34
+ The `args` module provides utilities for building colorful, sophisticated command-line interfaces:
35
+
36
+ ```python
37
+ from ngpt.cli.args import setup_argument_parser, validate_args, validate_markdown_renderer
38
+
39
+ # Create and configure the parser
40
+ parser = setup_argument_parser()
41
+ args = parser.parse_args()
42
+
43
+ # Validate arguments for correctness and compatibility
44
+ try:
45
+ args = validate_args(args)
46
+ except ValueError as e:
47
+ print(f"Error: {e}")
48
+ sys.exit(1)
49
+
50
+ # Check if markdown renderer is available
51
+ has_renderer, args = validate_markdown_renderer(args)
52
+ if not has_renderer:
53
+ print("Warning: No markdown renderer available. Using plain text.")
54
+ ```
55
+
56
+ The argument parsing module provides these key functions:
57
+
58
+ - `setup_argument_parser()`: Creates a fully configured argument parser with rich formatting
59
+ - `parse_args()`: Parses command-line arguments
60
+ - `validate_args(args)`: Validates parsed arguments for correctness and compatibility
61
+ - `validate_markdown_renderer(args)`: Checks if markdown rendering is available
62
+ - `handle_cli_config_args(args)`: Processes CLI configuration commands
63
+
64
+ This modular approach makes it easy to create sophisticated CLI tools with consistent behavior.
65
+
31
66
  ### Interactive Chat Session
32
67
 
33
68
  The `interactive_chat_session` function provides a complete interactive chat experience:
@@ -90,7 +90,7 @@ Below is a comprehensive list of all available command-line options, organized b
90
90
 
91
91
  | Option | Description |
92
92
  |--------|-------------|
93
- | `--log <file>` | Set filepath to log conversation to (for interactive modes) |
93
+ | `--log [file]` | Enable logging: use `--log` to create a temporary log file, or `--log PATH` for a specific location |
94
94
  | `-v, --version` | Show version information and exit |
95
95
  | `-h, --help` | Show help message and exit |
96
96
 
@@ -120,13 +120,36 @@ This opens a continuous chat session where the AI remembers previous exchanges.
120
120
  - Use arrow keys to navigate message history
121
121
  - Press Ctrl+C to exit the session
122
122
 
123
- You can log your conversation to a file for later reference:
123
+ #### Conversation Logging
124
+
125
+ You can log your conversation in several ways:
124
126
 
125
127
  ```bash
128
+ # Log to a specific file
126
129
  ngpt -i --log conversation.log
130
+
131
+ # Automatically create a temporary log file
132
+ ngpt -i --log
127
133
  ```
128
134
 
129
- This saves the entire conversation, including both user inputs and AI responses, to the specified file.
135
+ When using `--log` without a path, nGPT creates a temporary log file with a timestamp in the name:
136
+ - On Linux/macOS: `/tmp/ngpt-YYYYMMDD-HHMMSS.log`
137
+ - On Windows: `%TEMP%\ngpt-YYYYMMDD-HHMMSS.log`
138
+
139
+ The log file contains timestamps, roles, and the full content of all messages exchanged, making it easy to reference conversations later.
140
+
141
+ Logging works in all modes (not just interactive):
142
+
143
+ ```bash
144
+ # Log in standard chat mode
145
+ ngpt --log "Tell me about quantum computing"
146
+
147
+ # Log in code generation mode
148
+ ngpt --code --log "function to calculate prime numbers"
149
+
150
+ # Log in shell command mode
151
+ ngpt --shell --log "find large files in current directory"
152
+ ```
130
153
 
131
154
  #### Combining with Other Options
132
155
 
@@ -0,0 +1,161 @@
1
+ import argparse
2
+ import sys
3
+ from .. import __version__
4
+ from .formatters import COLORS, ColoredHelpFormatter
5
+ from .renderers import has_markdown_renderer, warn_if_no_markdown_renderer
6
+
7
+ def setup_argument_parser():
8
+ """Set up and return a fully configured argument parser for nGPT CLI."""
9
+ # Colorize description - use a shorter description to avoid line wrapping issues
10
+ description = f"{COLORS['cyan']}{COLORS['bold']}nGPT{COLORS['reset']} - Interact with AI language models via OpenAI-compatible APIs"
11
+
12
+ # Minimalist, clean epilog design
13
+ epilog = f"\n{COLORS['yellow']}nGPT {COLORS['bold']}v{__version__}{COLORS['reset']} • {COLORS['green']}Docs: {COLORS['bold']}https://nazdridoy.github.io/ngpt/usage/cli_usage.html{COLORS['reset']}"
14
+
15
+ parser = argparse.ArgumentParser(description=description, formatter_class=ColoredHelpFormatter, epilog=epilog)
16
+
17
+ # Add custom error method with color
18
+ original_error = parser.error
19
+ def error_with_color(message):
20
+ parser.print_usage(sys.stderr)
21
+ parser.exit(2, f"{COLORS['bold']}{COLORS['yellow']}error: {COLORS['reset']}{message}\n")
22
+ parser.error = error_with_color
23
+
24
+ # Custom version action with color
25
+ class ColoredVersionAction(argparse.Action):
26
+ def __call__(self, parser, namespace, values, option_string=None):
27
+ print(f"{COLORS['green']}{COLORS['bold']}nGPT{COLORS['reset']} version {COLORS['yellow']}{__version__}{COLORS['reset']}")
28
+ parser.exit()
29
+
30
+ # Version flag
31
+ parser.add_argument('-v', '--version', action=ColoredVersionAction, nargs=0, help='Show version information and exit')
32
+
33
+ # Config options
34
+ config_group = parser.add_argument_group('Configuration Options')
35
+ config_group.add_argument('--config', nargs='?', const=True, help='Path to a custom config file or, if no value provided, enter interactive configuration mode to create a new config')
36
+ config_group.add_argument('--config-index', type=int, default=0, help='Index of the configuration to use or edit (default: 0)')
37
+ config_group.add_argument('--provider', help='Provider name to identify the configuration to use')
38
+ config_group.add_argument('--remove', action='store_true', help='Remove the configuration at the specified index (requires --config and --config-index)')
39
+ config_group.add_argument('--show-config', action='store_true', help='Show the current configuration(s) and exit')
40
+ config_group.add_argument('--all', action='store_true', help='Show details for all configurations (requires --show-config)')
41
+ config_group.add_argument('--list-models', action='store_true', help='List all available models for the current configuration and exit')
42
+ config_group.add_argument('--list-renderers', action='store_true', help='Show available markdown renderers for use with --prettify')
43
+
44
+ # Global options
45
+ global_group = parser.add_argument_group('Global Options')
46
+ global_group.add_argument('--api-key', help='API key for the service')
47
+ global_group.add_argument('--base-url', help='Base URL for the API')
48
+ global_group.add_argument('--model', help='Model to use')
49
+ global_group.add_argument('--web-search', action='store_true',
50
+ help='Enable web search capability (Note: Your API endpoint must support this feature)')
51
+ global_group.add_argument('-n', '--no-stream', action='store_true',
52
+ help='Return the whole response without streaming')
53
+ global_group.add_argument('--temperature', type=float, default=0.7,
54
+ help='Set temperature (controls randomness, default: 0.7)')
55
+ global_group.add_argument('--top_p', type=float, default=1.0,
56
+ help='Set top_p (controls diversity, default: 1.0)')
57
+ global_group.add_argument('--max_tokens', type=int,
58
+ help='Set max response length in tokens')
59
+ global_group.add_argument('--log', metavar='FILE', nargs='?', const=True,
60
+ help='Set filepath to log conversation to, or create a temporary log file if no path provided')
61
+ global_group.add_argument('--preprompt',
62
+ help='Set custom system prompt to control AI behavior')
63
+ global_group.add_argument('--prettify', action='store_const', const='auto',
64
+ help='Render markdown responses and code with syntax highlighting and formatting')
65
+ global_group.add_argument('--stream-prettify', action='store_true',
66
+ help='Enable streaming with markdown rendering (automatically uses Rich renderer)')
67
+ global_group.add_argument('--renderer', choices=['auto', 'rich', 'glow'], default='auto',
68
+ help='Select which markdown renderer to use with --prettify (auto, rich, or glow)')
69
+
70
+ # Mode flags (mutually exclusive)
71
+ mode_group = parser.add_argument_group('Modes (mutually exclusive)')
72
+ mode_exclusive_group = mode_group.add_mutually_exclusive_group()
73
+ mode_exclusive_group.add_argument('-i', '--interactive', action='store_true', help='Start an interactive chat session')
74
+ mode_exclusive_group.add_argument('-s', '--shell', action='store_true', help='Generate and execute shell commands')
75
+ mode_exclusive_group.add_argument('-c', '--code', action='store_true', help='Generate code')
76
+ mode_exclusive_group.add_argument('-t', '--text', action='store_true', help='Enter multi-line text input (submit with Ctrl+D)')
77
+ # Note: --show-config is handled separately and implicitly acts as a mode
78
+
79
+ # Language option for code mode
80
+ parser.add_argument('--language', default="python", help='Programming language to generate code in (for code mode)')
81
+
82
+ # Prompt argument
83
+ parser.add_argument('prompt', nargs='?', default=None, help='The prompt to send')
84
+
85
+ # Add CLI configuration command
86
+ config_group.add_argument('--cli-config', nargs='*', metavar='COMMAND',
87
+ help='Manage CLI configuration (set, get, unset, list)')
88
+
89
+ return parser
90
+
91
+ def parse_args():
92
+ """Parse command line arguments using the configured parser."""
93
+ parser = setup_argument_parser()
94
+ return parser.parse_args()
95
+
96
+ def validate_args(args):
97
+ """Validate parsed arguments for correctness and compatibility."""
98
+ # Validate --all usage
99
+ if args.all and not args.show_config:
100
+ raise ValueError("--all can only be used with --show-config")
101
+
102
+ # Check if --prettify is used with --stream-prettify (conflict)
103
+ if args.prettify and args.stream_prettify:
104
+ raise ValueError("--prettify and --stream-prettify cannot be used together. Choose one option.")
105
+
106
+ # Check if --stream-prettify is used but Rich is not available
107
+ if args.stream_prettify and not has_markdown_renderer('rich'):
108
+ raise ValueError("--stream-prettify requires Rich to be installed. Install with: pip install \"ngpt[full]\" or pip install rich")
109
+
110
+ return args
111
+
112
+ def validate_markdown_renderer(args):
113
+ """Validate that required markdown renderers are available.
114
+
115
+ Args:
116
+ args: The parsed command line arguments.
117
+
118
+ Returns:
119
+ tuple: (has_renderer, args)
120
+ - has_renderer: Boolean indicating if a renderer is available
121
+ - args: Potentially modified args with prettify disabled if no renderer is available
122
+ """
123
+ has_renderer = True
124
+ if args.prettify:
125
+ has_renderer = warn_if_no_markdown_renderer(args.renderer)
126
+ if not has_renderer:
127
+ # Set a flag to disable prettify since we already warned the user
128
+ print(f"{COLORS['yellow']}Continuing without markdown rendering.{COLORS['reset']}")
129
+ args.prettify = False
130
+
131
+ return has_renderer, args
132
+
133
+ def handle_cli_config_args(args):
134
+ """Process CLI configuration arguments and determine command parameters.
135
+
136
+ Args:
137
+ args: The parsed command line arguments.
138
+
139
+ Returns:
140
+ tuple: (should_handle, action, option, value)
141
+ - should_handle: True if --cli-config was specified and should be handled
142
+ - action: The action to perform (set, get, unset, list, help)
143
+ - option: The option name (or None)
144
+ - value: The option value (or None)
145
+ """
146
+ if args.cli_config is None:
147
+ return (False, None, None, None)
148
+
149
+ # Show help if no arguments or "help" argument
150
+ if len(args.cli_config) == 0 or (len(args.cli_config) > 0 and args.cli_config[0].lower() == "help"):
151
+ return (True, "help", None, None)
152
+
153
+ action = args.cli_config[0].lower()
154
+ option = args.cli_config[1] if len(args.cli_config) > 1 else None
155
+ value = args.cli_config[2] if len(args.cli_config) > 2 else None
156
+
157
+ if action in ("set", "get", "unset", "list", "help"):
158
+ return (True, action, option, value)
159
+ else:
160
+ # Unknown action, show help
161
+ return (True, "help", None, None)
@@ -1,9 +1,11 @@
1
1
  import sys
2
+ import os
2
3
  import shutil
3
4
  import datetime
4
5
  import traceback
5
6
  from .formatters import COLORS
6
7
  from .renderers import prettify_markdown, prettify_streaming_markdown
8
+ from ..log import create_logger
7
9
 
8
10
  # Optional imports for enhanced UI
9
11
  try:
@@ -16,7 +18,7 @@ try:
16
18
  except ImportError:
17
19
  HAS_PROMPT_TOOLKIT = False
18
20
 
19
- def interactive_chat_session(client, web_search=False, no_stream=False, temperature=0.7, top_p=1.0, max_tokens=None, log_file=None, preprompt=None, prettify=False, renderer='auto', stream_prettify=False):
21
+ def interactive_chat_session(client, web_search=False, no_stream=False, temperature=0.7, top_p=1.0, max_tokens=None, preprompt=None, prettify=False, renderer='auto', stream_prettify=False, logger=None):
20
22
  """Start an interactive chat session with the AI.
21
23
 
22
24
  Args:
@@ -26,11 +28,11 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
26
28
  temperature: Controls randomness in the response
27
29
  top_p: Controls diversity via nucleus sampling
28
30
  max_tokens: Maximum number of tokens to generate in each response
29
- log_file: Optional filepath to log conversation to
30
31
  preprompt: Custom system prompt to control AI behavior
31
32
  prettify: Whether to enable markdown rendering
32
33
  renderer: Which markdown renderer to use
33
34
  stream_prettify: Whether to enable streaming with prettify
35
+ logger: Logger instance for logging the conversation
34
36
  """
35
37
  # Get terminal width for better formatting
36
38
  try:
@@ -58,18 +60,9 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
58
60
 
59
61
  print(f"\n{separator}\n")
60
62
 
61
- # Initialize log file if provided
62
- log_handle = None
63
- if log_file:
64
- try:
65
- timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
66
- log_handle = open(log_file, 'a', encoding='utf-8')
67
- log_handle.write(f"\n--- nGPT Session Log: {sys.argv} ---\n")
68
- log_handle.write(f"Started at: {timestamp}\n\n")
69
- print(f"{COLORS['green']}Logging conversation to: {log_file}{COLORS['reset']}")
70
- except Exception as e:
71
- print(f"{COLORS['yellow']}Warning: Could not open log file: {str(e)}{COLORS['reset']}")
72
- log_handle = None
63
+ # Show logging info if logger is available
64
+ if logger:
65
+ print(f"{COLORS['green']}Logging conversation to: {logger.get_log_path()}{COLORS['reset']}")
73
66
 
74
67
  # Custom separator - use the same length for consistency
75
68
  def print_separator():
@@ -90,9 +83,8 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
90
83
  conversation.append(system_message)
91
84
 
92
85
  # Log system prompt if logging is enabled
93
- if log_handle and preprompt:
94
- log_handle.write(f"System: {system_prompt}\n\n")
95
- log_handle.flush()
86
+ if logger and preprompt:
87
+ logger.log("system", system_prompt)
96
88
 
97
89
  # Initialize prompt_toolkit history
98
90
  prompt_history = InMemoryHistory() if HAS_PROMPT_TOOLKIT else None
@@ -187,9 +179,8 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
187
179
  conversation.append(user_message)
188
180
 
189
181
  # Log user message if logging is enabled
190
- if log_handle:
191
- log_handle.write(f"User: {user_input}\n")
192
- log_handle.flush()
182
+ if logger:
183
+ logger.log("user", user_input)
193
184
 
194
185
  # Print assistant indicator with formatting
195
186
  if not no_stream and not stream_prettify:
@@ -254,22 +245,16 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
254
245
  else:
255
246
  print(response)
256
247
 
257
- # Log assistant response if logging is enabled
258
- if log_handle:
259
- log_handle.write(f"Assistant: {response}\n\n")
260
- log_handle.flush()
248
+ # Log AI response if logging is enabled
249
+ if logger:
250
+ logger.log("assistant", response)
261
251
 
262
252
  # Print separator between exchanges
263
253
  print_separator()
264
254
 
265
255
  except KeyboardInterrupt:
266
- print(f"\n\n{COLORS['green']}Chat session ended by user. Goodbye!{COLORS['reset']}")
256
+ print(f"\n\n{COLORS['yellow']}Chat session interrupted by user.{COLORS['reset']}")
267
257
  except Exception as e:
268
- print(f"\n{COLORS['yellow']}Error during chat session: {str(e)}{COLORS['reset']}")
269
- # Print traceback for debugging if it's a serious error
270
- traceback.print_exc()
271
- finally:
272
- # Close log file if it was opened
273
- if log_handle:
274
- log_handle.write(f"\n--- End of Session ---\n")
275
- log_handle.close()
258
+ print(f"\n{COLORS['yellow']}Error in chat session: {str(e)}{COLORS['reset']}")
259
+ if os.environ.get("NGPT_DEBUG"):
260
+ traceback.print_exc()