ngpt 2.16.7__py3-none-any.whl → 3.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ngpt/cli/args.py CHANGED
@@ -90,9 +90,7 @@ def setup_argument_parser():
90
90
 
91
91
  # GitCommit message options
92
92
  gitcommsg_group = parser.add_argument_group('Git Commit Message Options')
93
- gitcommsg_group.add_argument('-m', '--message-context',
94
- help='Context to guide AI generation (e.g., file types, commit type)')
95
- gitcommsg_group.add_argument('-r', '--recursive-chunk', action='store_true',
93
+ gitcommsg_group.add_argument('--rec-chunk', action='store_true',
96
94
  help='Process large diffs in chunks with recursive analysis if needed')
97
95
  gitcommsg_group.add_argument('--diff', metavar='FILE', nargs='?', const=True,
98
96
  help='Use diff from specified file instead of staged changes. If used without a path, uses the path from CLI config.')
@@ -116,11 +114,11 @@ def setup_argument_parser():
116
114
  help='Generate code')
117
115
  mode_exclusive_group.add_argument('-t', '--text', action='store_true',
118
116
  help='Enter multi-line text input (submit with Ctrl+D)')
119
- mode_exclusive_group.add_argument('--stdin', action='store_true',
117
+ mode_exclusive_group.add_argument('-p', '--pipe', action='store_true',
120
118
  help='Read from stdin and use content with prompt. Use {} in prompt as placeholder for stdin content')
121
- mode_exclusive_group.add_argument('--rewrite', action='store_true',
119
+ mode_exclusive_group.add_argument('-r', '--rewrite', action='store_true',
122
120
  help='Rewrite text from stdin to be more natural while preserving tone and meaning')
123
- mode_exclusive_group.add_argument('--gitcommsg', action='store_true',
121
+ mode_exclusive_group.add_argument('-g', '--gitcommsg', action='store_true',
124
122
  help='Generate AI-powered git commit messages from staged changes or diff file')
125
123
 
126
124
  return parser
@@ -145,8 +143,8 @@ def validate_args(args):
145
143
  raise ValueError("--stream-prettify requires Rich to be installed. Install with: pip install \"ngpt[full]\" or pip install rich")
146
144
 
147
145
  # If stdin mode is used, check if input is available
148
- if args.stdin and sys.stdin.isatty():
149
- raise ValueError("--stdin was specified but no input is piped. Use echo 'content' | ngpt --stdin 'prompt with {}'")
146
+ if args.pipe and sys.stdin.isatty():
147
+ raise ValueError("--pipe was specified but no input is piped. Use echo 'content' | ngpt --pipe 'prompt with {}'")
150
148
 
151
149
  return args
152
150
 
ngpt/cli/main.py CHANGED
@@ -550,11 +550,11 @@ def main():
550
550
  # Text mode (multiline input)
551
551
  text_mode(client, args, logger=logger)
552
552
 
553
- elif args.stdin:
554
- # Apply CLI config for stdin mode (similar to chat mode)
553
+ elif args.pipe:
554
+ # Apply CLI config for pipe mode (similar to chat mode)
555
555
  args = apply_cli_config(args, "all")
556
556
 
557
- # Stdin mode (using the chat mode with stdin input)
557
+ # Pipe mode (using the chat mode with stdin input)
558
558
  chat_mode(client, args, logger=logger)
559
559
 
560
560
  elif args.rewrite:
ngpt/cli/modes/chat.py CHANGED
@@ -12,8 +12,8 @@ def chat_mode(client, args, logger=None):
12
12
  args: The parsed command-line arguments
13
13
  logger: Optional logger instance
14
14
  """
15
- # Handle stdin mode
16
- if args.stdin:
15
+ # Handle pipe mode
16
+ if args.pipe:
17
17
  # Read input from stdin
18
18
  stdin_content = sys.stdin.read().strip()
19
19
 
@@ -70,11 +70,11 @@ def split_into_chunks(content, chunk_size=200):
70
70
 
71
71
  return chunks
72
72
 
73
- def create_technical_analysis_system_prompt(context=None):
74
- """Create system prompt for technical analysis based on context data.
73
+ def create_technical_analysis_system_prompt(preprompt=None):
74
+ """Create system prompt for technical analysis based on preprompt data.
75
75
 
76
76
  Args:
77
- context: The raw context string from -m flag
77
+ preprompt: The raw preprompt string from --preprompt flag
78
78
 
79
79
  Returns:
80
80
  str: System prompt for the technical analysis stage
@@ -102,25 +102,25 @@ RULES:
102
102
  8. When analyzing multiple files, clearly separate each file's changes
103
103
  9. Include proper technical details (method names, component identifiers, etc.)"""
104
104
 
105
- # If context is provided, append it with strong wording about absolute priority
106
- if context:
107
- context_prompt = f"""
105
+ # If preprompt is provided, prepend it to the base prompt with strong wording about absolute priority
106
+ if preprompt:
107
+ preprompt_section = f"""===CRITICAL USER PREPROMPT - ABSOLUTE HIGHEST PRIORITY===
108
+ The following preprompt from the user OVERRIDES ALL OTHER INSTRUCTIONS and must be followed exactly:
108
109
 
109
- ===CRITICAL USER CONTEXT - ABSOLUTE HIGHEST PRIORITY===
110
- The following context from the user OVERRIDES ALL OTHER INSTRUCTIONS and must be followed exactly:
110
+ {preprompt}
111
111
 
112
- {context}
112
+ THIS USER PREPROMPT HAS ABSOLUTE PRIORITY over any other instructions that follow. If it contradicts other instructions, the user preprompt MUST be followed. No exceptions.
113
113
 
114
- THIS USER CONTEXT HAS ABSOLUTE PRIORITY over any other instructions in this prompt. If it contradicts other instructions, the user context MUST be followed. No exceptions."""
115
- base_prompt += context_prompt
114
+ """
115
+ return preprompt_section + base_prompt
116
116
 
117
117
  return base_prompt
118
118
 
119
- def create_system_prompt(context=None):
120
- """Create system prompt for commit message generation based on context data.
119
+ def create_system_prompt(preprompt=None):
120
+ """Create system prompt for commit message generation based on preprompt data.
121
121
 
122
122
  Args:
123
- context: The raw context string from -m flag
123
+ preprompt: The raw preprompt string from --preprompt flag
124
124
 
125
125
  Returns:
126
126
  str: System prompt for the AI
@@ -188,11 +188,11 @@ refactor(core): simplify data processing pipeline
188
188
  - [test] Update tests for new pipeline structure (tests/pipeline.test.js)
189
189
 
190
190
  4. Multiple changes to the same file:
191
- refactor(core): simplify context handling for commit prompts
191
+ refactor(core): simplify preprompt handling for commit prompts
192
192
 
193
- - [refactor] Remove process_context function (cli/modes/gitcommsg.py:69-124)
194
- - [refactor] Update all functions to accept raw context string (gitcommsg.py:create_system_prompt())
195
- - [refactor] Replace context_data usages with context (gitcommsg.py)
193
+ - [refactor] Remove process_preprompt function (cli/modes/gitcommsg.py:69-124)
194
+ - [refactor] Update all functions to accept raw preprompt string (gitcommsg.py:create_system_prompt())
195
+ - [refactor] Replace preprompt_data usages with preprompt (gitcommsg.py)
196
196
  - [docs] Update library usage doc (docs/usage/library_usage.md:516,531-537)
197
197
  - [chore] Bump project version to 2.15.1 (pyproject.toml:3, uv.lock:137)
198
198
 
@@ -216,17 +216,17 @@ RULES:
216
216
  10. Include proper technical details (method names, component identifiers, etc.)
217
217
  11. When all changes are to the same file, mention it once in the summary"""
218
218
 
219
- # If context is provided, append it with strong wording about absolute priority
220
- if context:
221
- context_prompt = f"""
219
+ # If preprompt is provided, prepend it with strong wording about absolute priority
220
+ if preprompt:
221
+ preprompt_section = f"""===CRITICAL USER PREPROMPT - ABSOLUTE HIGHEST PRIORITY===
222
+ The following preprompt from the user OVERRIDES ALL OTHER INSTRUCTIONS and must be followed exactly:
222
223
 
223
- ===CRITICAL USER CONTEXT - ABSOLUTE HIGHEST PRIORITY===
224
- The following context from the user OVERRIDES ALL OTHER INSTRUCTIONS and must be followed exactly:
224
+ {preprompt}
225
225
 
226
- {context}
226
+ THIS USER PREPROMPT HAS ABSOLUTE PRIORITY over any other instructions that follow. If it contradicts other instructions, the user preprompt MUST be followed. No exceptions.
227
227
 
228
- THIS USER CONTEXT HAS ABSOLUTE PRIORITY over any other instructions in this prompt. If it contradicts other instructions, the user context MUST be followed. No exceptions."""
229
- base_prompt += context_prompt
228
+ """
229
+ return preprompt_section + base_prompt
230
230
 
231
231
  return base_prompt
232
232
 
@@ -346,11 +346,11 @@ refactor(core): simplify data processing pipeline
346
346
  - [test] Update tests for new pipeline structure (tests/pipeline.test.js)
347
347
 
348
348
  4. Multiple changes to the same file:
349
- refactor(core): simplify context handling for commit prompts
349
+ refactor(core): simplify preprompt handling for commit prompts
350
350
 
351
- - [refactor] Remove process_context function (cli/modes/gitcommsg.py:69-124)
352
- - [refactor] Update all functions to accept raw context string (gitcommsg.py:create_system_prompt())
353
- - [refactor] Replace context_data usages with context (gitcommsg.py)
351
+ - [refactor] Remove process_preprompt function (cli/modes/gitcommsg.py:69-124)
352
+ - [refactor] Update all functions to accept raw preprompt string (gitcommsg.py:create_system_prompt())
353
+ - [refactor] Replace preprompt_data usages with preprompt (gitcommsg.py)
354
354
  - [docs] Update library usage doc (docs/usage/library_usage.md:516,531-537)
355
355
  - [chore] Bump project version to 2.15.1 (pyproject.toml:3, uv.lock:137)
356
356
 
@@ -444,13 +444,13 @@ def handle_api_call(client, prompt, system_prompt=None, logger=None, max_retries
444
444
  # Exponential backoff
445
445
  wait_seconds *= 2
446
446
 
447
- def process_with_chunking(client, diff_content, context, chunk_size=200, recursive=False, logger=None, max_msg_lines=20, max_recursion_depth=3, analyses_chunk_size=None):
447
+ def process_with_chunking(client, diff_content, preprompt, chunk_size=200, recursive=False, logger=None, max_msg_lines=20, max_recursion_depth=3, analyses_chunk_size=None):
448
448
  """Process diff with chunking to handle large diffs.
449
449
 
450
450
  Args:
451
451
  client: The NGPTClient instance
452
452
  diff_content: The diff content to process
453
- context: The raw context string
453
+ preprompt: The raw preprompt string
454
454
  chunk_size: Maximum number of lines per chunk
455
455
  recursive: Whether to use recursive chunking
456
456
  logger: Optional logger instance
@@ -466,8 +466,8 @@ def process_with_chunking(client, diff_content, context, chunk_size=200, recursi
466
466
  analyses_chunk_size = chunk_size
467
467
 
468
468
  # Create different system prompts for different stages
469
- technical_system_prompt = create_technical_analysis_system_prompt(context)
470
- commit_system_prompt = create_system_prompt(context)
469
+ technical_system_prompt = create_technical_analysis_system_prompt(preprompt)
470
+ commit_system_prompt = create_system_prompt(preprompt)
471
471
 
472
472
  # Log initial diff content
473
473
  if logger:
@@ -547,7 +547,7 @@ def process_with_chunking(client, diff_content, context, chunk_size=200, recursi
547
547
  return recursive_chunk_analysis(
548
548
  client,
549
549
  combined_analyses,
550
- context,
550
+ preprompt,
551
551
  analyses_chunk_size,
552
552
  logger,
553
553
  max_msg_lines,
@@ -601,13 +601,13 @@ def process_with_chunking(client, diff_content, context, chunk_size=200, recursi
601
601
  logger.error(f"Error combining analyses: {str(e)}")
602
602
  return None
603
603
 
604
- def recursive_chunk_analysis(client, combined_analysis, context, chunk_size, logger=None, max_msg_lines=20, max_recursion_depth=3, current_depth=1):
604
+ def recursive_chunk_analysis(client, combined_analysis, preprompt, chunk_size, logger=None, max_msg_lines=20, max_recursion_depth=3, current_depth=1):
605
605
  """Recursively chunk and process large analysis results until they're small enough.
606
606
 
607
607
  Args:
608
608
  client: The NGPTClient instance
609
609
  combined_analysis: The combined analysis to process
610
- context: The raw context string
610
+ preprompt: The raw preprompt string
611
611
  chunk_size: Maximum number of lines per chunk
612
612
  logger: Optional logger instance
613
613
  max_msg_lines: Maximum number of lines in commit message before condensing
@@ -618,8 +618,8 @@ def recursive_chunk_analysis(client, combined_analysis, context, chunk_size, log
618
618
  str: Generated commit message
619
619
  """
620
620
  # Create different system prompts for different stages
621
- technical_system_prompt = create_technical_analysis_system_prompt(context)
622
- commit_system_prompt = create_system_prompt(context)
621
+ technical_system_prompt = create_technical_analysis_system_prompt(preprompt)
622
+ commit_system_prompt = create_system_prompt(preprompt)
623
623
 
624
624
  print(f"\n{COLORS['cyan']}Recursive analysis chunking level {current_depth}...{COLORS['reset']}")
625
625
 
@@ -767,7 +767,7 @@ SECTION OF ANALYSIS TO CONDENSE:
767
767
  return recursive_chunk_analysis(
768
768
  client,
769
769
  combined_condensed,
770
- context,
770
+ preprompt,
771
771
  chunk_size,
772
772
  logger,
773
773
  max_msg_lines,
@@ -948,11 +948,11 @@ BULLET POINT FORMAT:
948
948
  - Not: "- feat: Add new login component" (incorrect)
949
949
 
950
950
  EXAMPLE OF PROPERLY FORMATTED COMMIT MESSAGE:
951
- refactor(core): simplify context handling for commit prompts
951
+ refactor(core): simplify preprompt handling for commit prompts
952
952
 
953
- - [refactor] Remove process_context function (cli/modes/gitcommsg.py:69-124)
954
- - [refactor] Update all functions to accept raw context string (gitcommsg.py:create_system_prompt())
955
- - [refactor] Replace context_data usages with context (gitcommsg.py)
953
+ - [refactor] Remove process_preprompt function (cli/modes/gitcommsg.py:69-124)
954
+ - [refactor] Update all functions to accept raw preprompt string (gitcommsg.py:create_system_prompt())
955
+ - [refactor] Replace preprompt_data usages with preprompt (gitcommsg.py)
956
956
  - [docs] Update library usage doc (docs/usage/library_usage.md:516,531-537)
957
957
  - [chore] Bump project version to 2.15.1 (pyproject.toml:3, uv.lock:137)
958
958
 
@@ -1025,17 +1025,17 @@ def gitcommsg_mode(client, args, logger=None):
1025
1025
  if active_logger:
1026
1026
  active_logger.log_diff("DEBUG", diff_content)
1027
1027
 
1028
- # Process context if provided
1029
- context = None
1030
- if args.message_context:
1031
- context = args.message_context
1028
+ # Process preprompt if provided
1029
+ preprompt = None
1030
+ if args.preprompt:
1031
+ preprompt = args.preprompt
1032
1032
  if active_logger:
1033
- active_logger.debug(f"Using raw context: {context}")
1034
- active_logger.log_content("DEBUG", "CONTEXT", context)
1033
+ active_logger.debug(f"Using preprompt: {preprompt}")
1034
+ active_logger.log_content("DEBUG", "PREPROMPT", preprompt)
1035
1035
 
1036
1036
  # Create system prompts for different stages
1037
- technical_system_prompt = create_technical_analysis_system_prompt(context)
1038
- commit_system_prompt = create_system_prompt(context)
1037
+ technical_system_prompt = create_technical_analysis_system_prompt(preprompt)
1038
+ commit_system_prompt = create_system_prompt(preprompt)
1039
1039
 
1040
1040
  # Log system prompts
1041
1041
  if active_logger:
@@ -1066,7 +1066,7 @@ def gitcommsg_mode(client, args, logger=None):
1066
1066
  if active_logger:
1067
1067
  active_logger.info(f"Analyses chunk size: {analyses_chunk_size}")
1068
1068
 
1069
- if args.recursive_chunk:
1069
+ if args.rec_chunk:
1070
1070
  # Use chunking with recursive processing
1071
1071
  if active_logger:
1072
1072
  active_logger.info(f"Using recursive chunking with max_recursion_depth: {max_recursion_depth}")
@@ -1074,7 +1074,7 @@ def gitcommsg_mode(client, args, logger=None):
1074
1074
  result = process_with_chunking(
1075
1075
  client,
1076
1076
  diff_content,
1077
- context,
1077
+ preprompt,
1078
1078
  chunk_size=args.chunk_size,
1079
1079
  recursive=True,
1080
1080
  logger=active_logger,
@@ -1087,6 +1087,7 @@ def gitcommsg_mode(client, args, logger=None):
1087
1087
  if active_logger:
1088
1088
  active_logger.info("Processing without chunking")
1089
1089
 
1090
+ # Pass preprompt to create_final_prompt
1090
1091
  prompt = create_final_prompt(diff_content)
1091
1092
 
1092
1093
  # Log final template
ngpt/utils/cli_config.py CHANGED
@@ -20,8 +20,7 @@ CLI_CONFIG_OPTIONS = {
20
20
  "config-index": {"type": "int", "default": 0, "context": ["all"], "exclusive": ["provider"]},
21
21
  "web-search": {"type": "bool", "default": False, "context": ["all"]},
22
22
  # GitCommit message options
23
- "message-context": {"type": "str", "default": None, "context": ["gitcommsg"]},
24
- "recursive-chunk": {"type": "bool", "default": False, "context": ["gitcommsg"]},
23
+ "rec-chunk": {"type": "bool", "default": False, "context": ["gitcommsg"]},
25
24
  "diff": {"type": "str", "default": None, "context": ["gitcommsg"]},
26
25
  "chunk-size": {"type": "int", "default": 200, "context": ["gitcommsg"]},
27
26
  "analyses-chunk-size": {"type": "int", "default": 200, "context": ["gitcommsg"]},
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ngpt
3
- Version: 2.16.7
4
- Summary: Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible Python library. Works with OpenAI, Ollama, Groq, Claude, and any OpenAI-compatible API.
3
+ Version: 3.0.1
4
+ Summary: Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible Python library. Works with OpenAI, Ollama, Groq, Claude, Gemini, and any OpenAI-compatible API.
5
5
  Project-URL: Homepage, https://github.com/nazdridoy/ngpt
6
6
  Project-URL: Repository, https://github.com/nazdridoy/ngpt
7
7
  Project-URL: Bug Tracker, https://github.com/nazdridoy/ngpt/issues
@@ -36,20 +36,52 @@ Description-Content-Type: text/markdown
36
36
 
37
37
  # nGPT
38
38
 
39
- 🤖 nGPT: A Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible library all in one package. Seamlessly work with OpenAI, Ollama, Groq, Claude, or any OpenAI-compatible API to generate code, craft git commits, rewrite text, and execute shell commands. Fast, lightweight, and designed for both casual users and developers.
39
+ <p align="center">
40
+ <a href="https://pypi.org/project/ngpt/"><img src="https://img.shields.io/pypi/v/ngpt.svg" alt="PyPI version"></a>
41
+ <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
42
+ <a href="https://nazdridoy.github.io/ngpt/"><img src="https://img.shields.io/badge/docs-available-brightgreen.svg" alt="Documentation"></a>
43
+ </p>
44
+
45
+ <p align="center">
46
+ <a href="https://nazdridoy.github.io/ngpt/installation.html"><img src="https://img.shields.io/badge/Linux-support-blue?logo=linux" alt="Linux"></a>
47
+ <a href="https://nazdridoy.github.io/ngpt/installation.html"><img src="https://img.shields.io/badge/Windows-support-blue?logo=windows" alt="Windows"></a>
48
+ <a href="https://nazdridoy.github.io/ngpt/installation.html"><img src="https://img.shields.io/badge/macOS-support-blue?logo=apple" alt="macOS"></a>
49
+ <a href="https://nazdridoy.github.io/ngpt/installation.html"><img src="https://img.shields.io/badge/Android-Termux-blue?logo=android" alt="Android"></a>
50
+ </p>
51
+
52
+ 🤖 nGPT: A Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible library all in one package. Seamlessly work with OpenAI, Ollama, Groq, Claude, Gemini, or any OpenAI-compatible API to generate code, craft git commits, rewrite text, and execute shell commands. Fast, lightweight, and designed for both casual users and developers.
40
53
 
41
- [![PyPI version](https://img.shields.io/pypi/v/ngpt.svg)](https://pypi.org/project/ngpt/)
42
- [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
43
- [![Documentation](https://img.shields.io/badge/docs-available-brightgreen.svg)](https://nazdridoy.github.io/ngpt/)
44
- [![Linux](https://img.shields.io/badge/Linux-support-blue?logo=linux)](https://nazdridoy.github.io/ngpt/installation.html)
45
- [![Windows](https://img.shields.io/badge/Windows-support-blue?logo=windows)](https://nazdridoy.github.io/ngpt/installation.html)
46
- [![macOS](https://img.shields.io/badge/macOS-support-blue?logo=apple)](https://nazdridoy.github.io/ngpt/installation.html)
47
- [![Android](https://img.shields.io/badge/Android-Termux-blue?logo=android)](https://nazdridoy.github.io/ngpt/installation.html)
48
- [![Python Versions](https://img.shields.io/pypi/pyversions/ngpt.svg)](https://pypi.org/project/ngpt/)
49
54
 
50
55
  ![2025-04-23_16-18-01](https://github.com/user-attachments/assets/b8e58926-5165-4352-b48b-9f4a982da86e)
51
56
 
52
57
 
58
+ ## Features
59
+
60
+ - ✅ **Versatile**: Use as a CLI tool, Python library, or CLI framework for building custom tools
61
+ - 🪶 **Lightweight**: Minimal dependencies with everything you need included
62
+ - 🔄 **API Flexibility**: Works with OpenAI, Ollama, Groq, Claude, Gemini, and any compatible endpoint
63
+ - 💬 **Interactive Chat**: Continuous conversation with memory in modern UI
64
+ - 📊 **Streaming Responses**: Real-time output for better user experience
65
+ - 🔍 **Web Search**: Integrated with compatible API endpoints
66
+ - 📥 **Stdin Processing**: Process piped content by using `{}` placeholder in prompts
67
+ - 🎨 **Markdown Rendering**: Beautiful formatting of markdown and code with syntax highlighting
68
+ - ⚡ **Real-time Markdown**: Stream responses with live updating syntax highlighting and formatting
69
+ - ⚙️ **Multiple Configurations**: Cross-platform config system supporting different profiles
70
+ - 💻 **Shell Command Generation**: OS-aware command execution
71
+ - 🧠 **Text Rewriting**: Improve text quality while maintaining original tone and meaning
72
+ - 🧩 **Clean Code Generation**: Output code without markdown or explanations
73
+ - 📝 **Rich Multiline Editor**: Interactive multiline text input with syntax highlighting and intuitive controls
74
+ - 📑 **Git Commit Messages**: AI-powered generation of conventional, detailed commit messages from git diffs
75
+ - 🎭 **System Prompts**: Customize model behavior with custom system prompts
76
+ - 📃 **Conversation Logging**: Save your conversations to text files for later reference
77
+ - 🧰 **CLI Components**: Reusable components for building custom AI-powered command-line tools
78
+ - 🔌 **Modular Architecture**: Well-structured codebase with clean separation of concerns
79
+ - 🔄 **Provider Switching**: Easily switch between different LLM providers with a single parameter
80
+ - 🚀 **Performance Optimized**: Fast response times and minimal resource usage
81
+
82
+ See the [Feature Overview](https://nazdridoy.github.io/ngpt/overview.html) for more details.
83
+
84
+
53
85
  ## Table of Contents
54
86
  - [Quick Start](#quick-start)
55
87
  - [Features](#features)
@@ -109,34 +141,34 @@ ngpt --code --stream-prettify "function to calculate the Fibonacci sequence"
109
141
  ngpt --shell "list all files in the current directory"
110
142
 
111
143
  # Read from stdin and use the content in your prompt
112
- echo "What is this text about?" | ngpt --stdin "Analyze the following text: {}"
144
+ echo "What is this text about?" | ngpt -p "Analyze the following text: {}"
113
145
 
114
146
  # Rewrite text to improve quality while preserving tone and meaning
115
- echo "your text" | ngpt --rewrite
147
+ echo "your text" | ngpt -r
116
148
 
117
149
  # Rewrite text from a command-line argument
118
- ngpt --rewrite "your text to rewrite"
150
+ ngpt -r "your text to rewrite"
119
151
 
120
152
  # Rewrite text from a file
121
- cat file.txt | ngpt --rewrite
153
+ cat file.txt | ngpt -r
122
154
 
123
155
  # Generate AI-powered git commit messages for staged changes
124
- ngpt --gitcommsg
156
+ ngpt -g
125
157
 
126
- # Generate commit message with context
127
- ngpt --gitcommsg -m "type:feat"
158
+ # Generate commit message from staged changes with a context directive
159
+ ngpt -g --preprompt "type:feat"
128
160
 
129
161
  # Process large diffs in chunks with recursive analysis
130
- ngpt --gitcommsg -r
162
+ ngpt -g --rec-chunk
131
163
 
132
164
  # Process a diff file instead of staged changes
133
- ngpt --gitcommsg --diff /path/to/changes.diff
165
+ ngpt -g --diff /path/to/changes.diff
134
166
 
135
167
  # Generate a commit message with logging for debugging
136
- ngpt --gitcommsg --log commit_log.txt
168
+ ngpt -g --log commit_log.txt
137
169
 
138
170
  # Use interactive multiline editor to enter text to rewrite
139
- ngpt --rewrite
171
+ ngpt -r
140
172
 
141
173
  # Display markdown responses with beautiful formatting
142
174
  ngpt --prettify "Explain markdown syntax with examples"
@@ -160,7 +192,7 @@ ngpt --interactive --log conversation.log
160
192
  ngpt --log "Tell me about quantum computing"
161
193
 
162
194
  # Process text from stdin using the {} placeholder
163
- cat README.md | ngpt --stdin "Summarize this document: {}"
195
+ cat README.md | ngpt -p "Summarize this document: {}"
164
196
 
165
197
  # Use different model providers by specifying the provider name
166
198
  ngpt --provider Groq "Explain quantum computing"
@@ -172,32 +204,6 @@ ngpt --provider Ollama "Explain quantum physics" > ollama_response.txt
172
204
 
173
205
  For more examples and detailed usage, visit the [CLI Usage Guide](https://nazdridoy.github.io/ngpt/usage/cli_usage.html).
174
206
 
175
- ## Features
176
-
177
- - ✅ **Versatile**: Use as a CLI tool, Python library, or CLI framework for building custom tools
178
- - 🪶 **Lightweight**: Minimal dependencies with everything you need included
179
- - 🔄 **API Flexibility**: Works with OpenAI, Ollama, Groq, Claude, and any compatible endpoint
180
- - 💬 **Interactive Chat**: Continuous conversation with memory in modern UI
181
- - 📊 **Streaming Responses**: Real-time output for better user experience
182
- - 🔍 **Web Search**: Integrated with compatible API endpoints
183
- - 📥 **Stdin Processing**: Process piped content by using `{}` placeholder in prompts
184
- - 🎨 **Markdown Rendering**: Beautiful formatting of markdown and code with syntax highlighting
185
- - ⚡ **Real-time Markdown**: Stream responses with live updating syntax highlighting and formatting
186
- - ⚙️ **Multiple Configurations**: Cross-platform config system supporting different profiles
187
- - 💻 **Shell Command Generation**: OS-aware command execution
188
- - 🧠 **Text Rewriting**: Improve text quality while maintaining original tone and meaning
189
- - 🧩 **Clean Code Generation**: Output code without markdown or explanations
190
- - 📝 **Rich Multiline Editor**: Interactive multiline text input with syntax highlighting and intuitive controls
191
- - 📑 **Git Commit Messages**: AI-powered generation of conventional, detailed commit messages from git diffs
192
- - 🎭 **System Prompts**: Customize model behavior with custom system prompts
193
- - 📃 **Conversation Logging**: Save your conversations to text files for later reference
194
- - 🧰 **CLI Components**: Reusable components for building custom AI-powered command-line tools
195
- - 🔌 **Modular Architecture**: Well-structured codebase with clean separation of concerns
196
- - 🔄 **Provider Switching**: Easily switch between different LLM providers with a single parameter
197
- - 🚀 **Performance Optimized**: Fast response times and minimal resource usage
198
-
199
- See the [Feature Overview](https://nazdridoy.github.io/ngpt/overview.html) for more details.
200
-
201
207
  ## Documentation
202
208
 
203
209
  Comprehensive documentation, including API reference, usage guides, and examples, is available at:
@@ -432,120 +438,160 @@ For detailed information about building CLI tools with nGPT, see the [CLI Framew
432
438
 
433
439
  ### Command Line Options
434
440
 
435
- You can configure nGPT using the following options:
436
-
437
- #### Mode Options (Mutually Exclusive)
438
-
439
- | Option | Description |
440
- |--------|-------------|
441
- | `-i, --interactive` | Start an interactive chat session with conversation memory and special commands |
442
- | `-s, --shell` | Generate and execute shell commands appropriate for your operating system |
443
- | `-c, --code` | Generate clean code without markdown formatting or explanations |
444
- | `-t, --text` | Open interactive multiline editor for complex prompts with syntax highlighting |
445
- | `--stdin` | Read from stdin and use content with prompt. Use {} in prompt as placeholder for stdin content |
446
- | `--rewrite` | Rewrite text to improve quality while preserving original tone and meaning |
447
- | `--gitcommsg` | Generate AI-powered git commit messages from staged changes or diff files |
448
-
449
- #### Global Options
450
-
451
- | Option | Description |
452
- |--------|-------------|
453
- | `--api-key KEY` | API key for the service |
454
- | `--base-url URL` | Base URL for the API |
455
- | `--model MODEL` | Model to use |
456
- | `--web-search` | Enable web search capability (if your API endpoint supports it) |
457
- | `--temperature VALUE` | Set temperature (controls randomness, default: 0.7) |
458
- | `--top_p VALUE` | Set top_p (controls diversity, default: 1.0) |
459
- | `--max_tokens NUMBER` | Set maximum response length in tokens |
460
- | `--preprompt TEXT` | Set custom system prompt to control AI behavior |
461
- | `--language LANG` | Programming language to generate code in (for code mode, default: python) |
462
- | `--no-stream` | Return the whole response without streaming |
463
- | `--prettify` | Render markdown responses and code with syntax highlighting and formatting |
464
- | `--stream-prettify` | Enable streaming with markdown rendering (automatically uses Rich renderer) |
465
- | `--renderer {auto,rich,glow}` | Select which markdown renderer to use with --prettify (default: auto) |
466
- | `--log [FILE]` | Set filepath to log conversation to, or create a temporary log file if no path provided |
467
-
468
- #### Configuration Options
469
-
470
- | Option | Description |
471
- |--------|-------------|
472
- | `--config [PATH]` | Path to a custom config file or, if no value provided, enter interactive configuration mode |
473
- | `--config-index INDEX` | Index of the configuration to use or edit (default: 0) |
474
- | `--provider NAME` | Provider name to identify the configuration to use |
475
- | `--remove` | Remove the configuration at the specified index (requires --config and --config-index or --provider) |
476
- | `--show-config` | Show the current configuration(s) and exit |
477
- | `--all` | Show details for all configurations (requires --show-config) |
478
- | `--list-models` | List all available models for the current configuration and exit |
479
- | `--list-renderers` | Show available markdown renderers for use with --prettify |
480
- | `--cli-config [COMMAND]` | Manage CLI configuration (set, get, unset, list, help) |
481
- | `-v, --version` | Show version information and exit |
441
+ ```console
442
+ ❯ ngpt -h
443
+ usage: ngpt [-h] [-v] [--language LANGUAGE] [--config [CONFIG]] [--config-index CONFIG_INDEX] [--provider PROVIDER]
444
+ [--remove] [--show-config] [--all] [--list-models] [--list-renderers] [--cli-config [COMMAND ...]]
445
+ [--api-key API_KEY] [--base-url BASE_URL] [--model MODEL] [--web-search] [--temperature TEMPERATURE]
446
+ [--top_p TOP_P] [--max_tokens MAX_TOKENS] [--log [FILE]] [--preprompt PREPROMPT] [--no-stream] [--prettify]
447
+ [--stream-prettify] [--renderer {auto,rich,glow}] [--rec-chunk] [--diff [FILE]] [--chunk-size CHUNK_SIZE]
448
+ [--analyses-chunk-size ANALYSES_CHUNK_SIZE] [--max-msg-lines MAX_MSG_LINES]
449
+ [--max-recursion-depth MAX_RECURSION_DEPTH] [-i | -s | -c | -t | -p | -r | -g]
450
+ [prompt]
451
+
452
+ nGPT - Interact with AI language models via OpenAI-compatible APIs
453
+
454
+ positional arguments::
455
+
456
+ [PROMPT] The prompt to send
457
+
458
+ options::
459
+
460
+ -h, --help show this help message and exit
461
+ -v, --version Show version information and exit
462
+ --language LANGUAGE Programming language to generate code in (for code mode)
463
+
464
+ Configuration Options::
465
+
466
+ --config [CONFIG] Path to a custom config file or, if no value provided, enter interactive
467
+ configuration mode to create a new config
468
+ --config-index CONFIG_INDEX Index of the configuration to use or edit (default: 0)
469
+ --provider PROVIDER Provider name to identify the configuration to use
470
+ --remove Remove the configuration at the specified index (requires --config and
471
+ --config-index or --provider)
472
+ --show-config Show the current configuration(s) and exit
473
+ --all Show details for all configurations (requires --show-config)
474
+ --list-models List all available models for the current configuration and exit
475
+ --list-renderers Show available markdown renderers for use with --prettify
476
+ --cli-config [COMMAND ...] Manage CLI configuration (set, get, unset, list, help)
477
+
478
+ Global Options::
479
+
480
+ --api-key API_KEY API key for the service
481
+ --base-url BASE_URL Base URL for the API
482
+ --model MODEL Model to use
483
+ --web-search Enable web search capability (Note: Your API endpoint must support this
484
+ feature)
485
+ --temperature TEMPERATURE Set temperature (controls randomness, default: 0.7)
486
+ --top_p TOP_P Set top_p (controls diversity, default: 1.0)
487
+ --max_tokens MAX_TOKENS Set max response length in tokens
488
+ --log [FILE] Set filepath to log conversation to, or create a temporary log file if no path
489
+ provided
490
+ --preprompt PREPROMPT Set custom system prompt to control AI behavior
491
+ --no-stream Return the whole response without streaming
492
+ --prettify Render markdown responses and code with syntax highlighting and formatting
493
+ --stream-prettify Enable streaming with markdown rendering (automatically uses Rich renderer)
494
+ --renderer {auto,rich,glow} Select which markdown renderer to use with --prettify (auto, rich, or glow)
495
+
496
+ Git Commit Message Options::
497
+
498
+ --rec-chunk Process large diffs in chunks with recursive analysis if needed
499
+ --diff [FILE] Use diff from specified file instead of staged changes. If used without a path,
500
+ uses the path from CLI config.
501
+ --chunk-size CHUNK_SIZE Number of lines per chunk when chunking is enabled (default: 200)
502
+ --analyses-chunk-size ANALYSES_CHUNK_SIZE
503
+ Number of lines per chunk when recursively chunking analyses (default: 200)
504
+ --max-msg-lines MAX_MSG_LINES Maximum number of lines in commit message before condensing (default: 20)
505
+ --max-recursion-depth MAX_RECURSION_DEPTH
506
+ Maximum recursion depth for commit message condensing (default: 3)
507
+
508
+ Modes (mutually exclusive)::
509
+
510
+ -i, --interactive Start an interactive chat session
511
+ -s, --shell Generate and execute shell commands
512
+ -c, --code Generate code
513
+ -t, --text Enter multi-line text input (submit with Ctrl+D)
514
+ -p, --pipe Read from stdin and use content with prompt. Use {} in prompt as placeholder
515
+ for stdin content
516
+ -r, --rewrite Rewrite text from stdin to be more natural while preserving tone and meaning
517
+ -g, --gitcommsg Generate AI-powered git commit messages from staged changes or diff file
518
+ ```
519
+
520
+ > **Note**: For better visualization of conventional commit messages on GitHub, you can use the [GitHub Commit Labels](https://greasyfork.org/en/scripts/526153-github-commit-labels) userscript, which adds colorful labels to your commits.
482
521
 
483
522
  For a complete reference of all available options, see the [CLI Usage Guide](https://nazdridoy.github.io/ngpt/usage/cli_usage.html).
484
523
 
485
524
  ### CLI Configuration
486
525
 
487
- NGPT offers a CLI configuration system that allows you to set default values for command-line options:
488
-
489
- ```bash
490
- # Set default options
491
- ngpt --cli-config set language typescript
492
- ngpt --cli-config set temperature 0.9
493
- ngpt --cli-config set prettify true
494
-
495
- # View current settings
496
- ngpt --cli-config get
497
-
498
- # Get a specific setting
499
- ngpt --cli-config get language
500
-
501
- # Remove a setting
502
- ngpt --cli-config unset prettify
526
+ NGPT offers a CLI configuration system that allows you to set default values for command-line options. This is especially useful when you:
527
+
528
+ - Repeatedly use the same provider or model
529
+ - Have preferred settings for specific tasks
530
+ - Want to create different workflows based on context
531
+
532
+ For example, setting your preferred language for code generation or temperature value means you won't have to specify these parameters each time:
533
+
534
+ ```console
535
+
536
+ ❯ ngpt --cli-config help
537
+
538
+ CLI Configuration Help:
539
+ Command syntax:
540
+ ngpt --cli-config help - Show this help message
541
+ ngpt --cli-config set OPTION VALUE - Set a default value for OPTION
542
+ ngpt --cli-config get OPTION - Get the current value of OPTION
543
+ ngpt --cli-config get - Show all CLI configuration settings
544
+ ngpt --cli-config unset OPTION - Remove OPTION from configuration
545
+ ngpt --cli-config list - List all available options
546
+
547
+ Available options:
548
+ General options (all modes):
549
+ config-index - int (default: 0) [exclusive with: provider]
550
+ log - str
551
+ max_tokens - int
552
+ no-stream - bool (default: False) [exclusive with: prettify, stream-prettify]
553
+ preprompt - str
554
+ prettify - bool (default: False) [exclusive with: no-stream, stream-prettify]
555
+ provider - str [exclusive with: config-index]
556
+ renderer - str (default: auto)
557
+ stream-prettify - bool (default: False) [exclusive with: no-stream, prettify]
558
+ temperature - float (default: 0.7)
559
+ top_p - float (default: 1.0)
560
+ web-search - bool (default: False)
561
+
562
+ Options for Code generation mode:
563
+ language - str (default: python)
564
+
565
+ Options for Git commit message mode:
566
+ analyses-chunk-size - int (default: 200)
567
+ chunk-size - int (default: 200)
568
+ diff - str
569
+ max-msg-lines - int (default: 20)
570
+ max-recursion-depth - int (default: 3)
571
+ rec-chunk - bool (default: False)
572
+
573
+ Example usage:
574
+ ngpt --cli-config set language java - Set default language to java for code generation
575
+ ngpt --cli-config set provider Gemini - Set Gemini as your default provider
576
+ ngpt --cli-config set temperature 0.9 - Set default temperature to 0.9
577
+ ngpt --cli-config set no-stream true - Disable streaming by default
578
+ ngpt --cli-config set recursive-chunk true - Enable recursive chunking for git commit messages
579
+ ngpt --cli-config set diff /path/to/file.diff - Set default diff file for git commit messages
580
+ ngpt --cli-config get temperature - Check the current temperature setting
581
+ ngpt --cli-config get - Show all current CLI settings
582
+ ngpt --cli-config unset language - Remove language setting
583
+
584
+ Notes:
585
+ - CLI configuration is stored in:
586
+ • Linux: ~/.config/ngpt/ngpt-cli.conf
587
+ • macOS: ~/Library/Application Support/ngpt/ngpt-cli.conf
588
+ • Windows: %APPDATA%\ngpt\ngpt-cli.conf
589
+ - Settings are applied based on context (e.g., language only applies to code generation mode)
590
+ - Command-line arguments always override CLI configuration
591
+ - Some options are mutually exclusive and will not be applied together
503
592
 
504
- # List all available options
505
- ngpt --cli-config list
506
-
507
- # Show help information
508
- ngpt --cli-config help
509
- ```
510
-
511
- Key features of CLI configuration:
512
- - **Context-Aware**: Settings are applied based on the current command mode (e.g., `language` only applies in code generation mode `-c`).
513
- - **Priority**: When determining option values, NGPT uses the following priority order (highest to lowest):
514
- 1. Command-line arguments
515
- 2. Environment variables
516
- 3. CLI configuration (ngpt-cli.conf)
517
- 4. Main configuration file (ngpt.conf)
518
- 5. Default values
519
- - **Mutual Exclusivity**: For options like `no-stream`, `prettify`, and `stream-prettify`, setting one to `True` automatically sets the others to `False` in the configuration file, ensuring consistency.
520
- - **Smart Selection**: The `provider` setting is used to select which configuration profile to use, offering a persistent way to select your preferred API.
521
-
522
- Available options include:
523
- - General options (all modes): `provider`, `temperature`, `top_p`, `max_tokens`, `preprompt`, `renderer`, `config-index`, `web-search`
524
- - Mode-specific options: `language` (code mode only), `log` (interactive and text modes)
525
- - Mutually exclusive options: `no-stream`, `prettify`, `stream-prettify`
526
-
527
- #### Practical Examples
528
-
529
- ```bash
530
- # Set Gemini as your default provider
531
- ngpt --cli-config set provider Gemini
532
- # Now you can run commands without specifying --provider
533
- ngpt "Explain quantum computing"
534
-
535
- # Configure code generation for TypeScript
536
- ngpt --cli-config set language typescript
537
- # Now in code mode, TypeScript will be used by default
538
- ngpt -c "Write a function to sort an array"
539
-
540
- # Set a higher temperature for more creative responses
541
- ngpt --cli-config set temperature 0.9
542
593
  ```
543
594
 
544
- The CLI configuration is stored in:
545
- - Linux: `~/.config/ngpt/ngpt-cli.conf`
546
- - macOS: `~/Library/Application Support/ngpt/ngpt-cli.conf`
547
- - Windows: `%APPDATA%\ngpt\ngpt-cli.conf`
548
-
549
595
  For more details, see the [CLI Configuration Guide](https://nazdridoy.github.io/ngpt/usage/cli_config.html).
550
596
 
551
597
  ### Interactive Configuration
@@ -642,4 +688,4 @@ Please check the [CONTRIBUTING.md](CONTRIBUTING.md) file for detailed guidelines
642
688
 
643
689
  ## License
644
690
 
645
- This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.
691
+ This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.
@@ -2,26 +2,26 @@ ngpt/__init__.py,sha256=kpKhViLakwMdHZkuLht2vWcjt0uD_5gR33gvMhfXr6w,664
2
2
  ngpt/__main__.py,sha256=j3eFYPOtCCFBOGh7NK5IWEnADnTMMSEB9GLyIDoW724,66
3
3
  ngpt/client.py,sha256=rLgDPmJe8_yi13-XUiHJ45z54rJVrupxWmeb-fQZGF4,15129
4
4
  ngpt/cli/__init__.py,sha256=hebbDSMGiOd43YNnQP67uzr67Ue6rZPwm2czynr5iZY,43
5
- ngpt/cli/args.py,sha256=VJM6ySMnVrXgKaGb7Qb3AQPYxcQCv3FfCI4x8YkAsLQ,11534
5
+ ngpt/cli/args.py,sha256=XQvofZs_WkbgUto3Dbx7Yw-AmPAQHh8kdHUe3uWy8w4,11382
6
6
  ngpt/cli/config_manager.py,sha256=NQQcWnjUppAAd0s0p9YAf8EyKS1ex5-0EB4DvKdB4dk,3662
7
7
  ngpt/cli/formatters.py,sha256=HBYGlx_7eoAKyzfy0Vq5L0yn8yVKjngqYBukMmXCcz0,9401
8
8
  ngpt/cli/interactive.py,sha256=oLflfYVSX_NdYb5D8BsEeYr2m_6yWw06Rw-BS5tLqkI,12923
9
- ngpt/cli/main.py,sha256=6GO4r9e9su7FFukj9JeVmJt1bJsqPOJBj6xo3iyMZXU,28911
9
+ ngpt/cli/main.py,sha256=rIGbsr1wll4fJhAmPx8C9fCJYlhKijdHqal9e3NC2I8,28908
10
10
  ngpt/cli/renderers.py,sha256=yYt3b_cWUYckfgLQ1wkEiQCnq5v5m7hfn5PWxHJzR9Y,12422
11
11
  ngpt/cli/ui.py,sha256=m8qtd4cCSHBGHPUlHVdBEfun1G1Se4vLKTSgnS7QOKE,6775
12
12
  ngpt/cli/modes/__init__.py,sha256=R3aO662RIzWEOvr3moTrEI8Tpg0zDDyMGGh1-OxiRgM,285
13
- ngpt/cli/modes/chat.py,sha256=-g8hMHkn9Dv3iGFZvbXXFQwG8KsXEgngr6Si9AUdQXE,5701
13
+ ngpt/cli/modes/chat.py,sha256=ULSIFach3Namb3FQBhgACxhHXME-fX7bLBDum9JQfks,5699
14
14
  ngpt/cli/modes/code.py,sha256=1EeiooyNe1jvTyNu6r0J9xY34QXb2uBQKPeNca-DrRI,5734
15
- ngpt/cli/modes/gitcommsg.py,sha256=Bhgg9UArrfRUwosgVlNLUB7i1B8j-1ngpkmCm5iZBkM,46786
15
+ ngpt/cli/modes/gitcommsg.py,sha256=rsfMoeOupmNp-5p5fsMSPAf18BbzXWq-4PF2HjEz6SY,46991
16
16
  ngpt/cli/modes/rewrite.py,sha256=iEMn6i6cuaxOiKq886Lsh961riCmPj5K1mXVQeT2Z7k,10112
17
17
  ngpt/cli/modes/shell.py,sha256=QkprnOxMMTg2v5DIwcofDnnr3JPNfuk-YgSQaae5Xps,3311
18
18
  ngpt/cli/modes/text.py,sha256=z08xaW2r0XzyunUzBhwZKqnwPjyRol_CSazjLygmLRs,4514
19
19
  ngpt/utils/__init__.py,sha256=E46suk2-QgYBI0Qrs6WXOajOUOebF3ETAFY7ah8DTWs,942
20
- ngpt/utils/cli_config.py,sha256=tQxR3a2iXyc5TfRBPQHSUXPInO2dv_zTPGn04eWfmoo,11285
20
+ ngpt/utils/cli_config.py,sha256=IlHnOEEGpLoGZInynM778wgpxLVcJ_STKWxg2Ypvir4,11196
21
21
  ngpt/utils/config.py,sha256=WYOk_b1eiYjo6hpV3pfXr2RjqhOnmKqwZwKid1T41I4,10363
22
22
  ngpt/utils/log.py,sha256=f1jg2iFo35PAmsarH8FVL_62plq4VXH0Mu2QiP6RJGw,15934
23
- ngpt-2.16.7.dist-info/METADATA,sha256=VWaC8qsuVQS5Qb4ZwDB5lo-7ToRmCBOCx_XYLMEdMrE,25460
24
- ngpt-2.16.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
25
- ngpt-2.16.7.dist-info/entry_points.txt,sha256=SqAAvLhMrsEpkIr4YFRdUeyuXQ9o0IBCeYgE6AVojoI,44
26
- ngpt-2.16.7.dist-info/licenses/LICENSE,sha256=mQkpWoADxbHqE0HRefYLJdm7OpdrXBr3vNv5bZ8w72M,1065
27
- ngpt-2.16.7.dist-info/RECORD,,
23
+ ngpt-3.0.1.dist-info/METADATA,sha256=6YOossV8zjUDK_cEn0UEE05frHrzsz4F46-tJ_lGCWk,28821
24
+ ngpt-3.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
25
+ ngpt-3.0.1.dist-info/entry_points.txt,sha256=SqAAvLhMrsEpkIr4YFRdUeyuXQ9o0IBCeYgE6AVojoI,44
26
+ ngpt-3.0.1.dist-info/licenses/LICENSE,sha256=mQkpWoADxbHqE0HRefYLJdm7OpdrXBr3vNv5bZ8w72M,1065
27
+ ngpt-3.0.1.dist-info/RECORD,,
File without changes