ngpt 2.16.7__py3-none-any.whl → 3.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ngpt/cli/args.py +6 -8
- ngpt/cli/main.py +3 -3
- ngpt/cli/modes/chat.py +2 -2
- ngpt/cli/modes/gitcommsg.py +55 -54
- ngpt/utils/cli_config.py +1 -2
- {ngpt-2.16.7.dist-info → ngpt-3.0.1.dist-info}/METADATA +202 -156
- {ngpt-2.16.7.dist-info → ngpt-3.0.1.dist-info}/RECORD +10 -10
- {ngpt-2.16.7.dist-info → ngpt-3.0.1.dist-info}/WHEEL +0 -0
- {ngpt-2.16.7.dist-info → ngpt-3.0.1.dist-info}/entry_points.txt +0 -0
- {ngpt-2.16.7.dist-info → ngpt-3.0.1.dist-info}/licenses/LICENSE +0 -0
ngpt/cli/args.py
CHANGED
@@ -90,9 +90,7 @@ def setup_argument_parser():
|
|
90
90
|
|
91
91
|
# GitCommit message options
|
92
92
|
gitcommsg_group = parser.add_argument_group('Git Commit Message Options')
|
93
|
-
gitcommsg_group.add_argument('-
|
94
|
-
help='Context to guide AI generation (e.g., file types, commit type)')
|
95
|
-
gitcommsg_group.add_argument('-r', '--recursive-chunk', action='store_true',
|
93
|
+
gitcommsg_group.add_argument('--rec-chunk', action='store_true',
|
96
94
|
help='Process large diffs in chunks with recursive analysis if needed')
|
97
95
|
gitcommsg_group.add_argument('--diff', metavar='FILE', nargs='?', const=True,
|
98
96
|
help='Use diff from specified file instead of staged changes. If used without a path, uses the path from CLI config.')
|
@@ -116,11 +114,11 @@ def setup_argument_parser():
|
|
116
114
|
help='Generate code')
|
117
115
|
mode_exclusive_group.add_argument('-t', '--text', action='store_true',
|
118
116
|
help='Enter multi-line text input (submit with Ctrl+D)')
|
119
|
-
mode_exclusive_group.add_argument('--
|
117
|
+
mode_exclusive_group.add_argument('-p', '--pipe', action='store_true',
|
120
118
|
help='Read from stdin and use content with prompt. Use {} in prompt as placeholder for stdin content')
|
121
|
-
mode_exclusive_group.add_argument('--rewrite', action='store_true',
|
119
|
+
mode_exclusive_group.add_argument('-r', '--rewrite', action='store_true',
|
122
120
|
help='Rewrite text from stdin to be more natural while preserving tone and meaning')
|
123
|
-
mode_exclusive_group.add_argument('--gitcommsg', action='store_true',
|
121
|
+
mode_exclusive_group.add_argument('-g', '--gitcommsg', action='store_true',
|
124
122
|
help='Generate AI-powered git commit messages from staged changes or diff file')
|
125
123
|
|
126
124
|
return parser
|
@@ -145,8 +143,8 @@ def validate_args(args):
|
|
145
143
|
raise ValueError("--stream-prettify requires Rich to be installed. Install with: pip install \"ngpt[full]\" or pip install rich")
|
146
144
|
|
147
145
|
# If stdin mode is used, check if input is available
|
148
|
-
if args.
|
149
|
-
raise ValueError("--
|
146
|
+
if args.pipe and sys.stdin.isatty():
|
147
|
+
raise ValueError("--pipe was specified but no input is piped. Use echo 'content' | ngpt --pipe 'prompt with {}'")
|
150
148
|
|
151
149
|
return args
|
152
150
|
|
ngpt/cli/main.py
CHANGED
@@ -550,11 +550,11 @@ def main():
|
|
550
550
|
# Text mode (multiline input)
|
551
551
|
text_mode(client, args, logger=logger)
|
552
552
|
|
553
|
-
elif args.
|
554
|
-
# Apply CLI config for
|
553
|
+
elif args.pipe:
|
554
|
+
# Apply CLI config for pipe mode (similar to chat mode)
|
555
555
|
args = apply_cli_config(args, "all")
|
556
556
|
|
557
|
-
#
|
557
|
+
# Pipe mode (using the chat mode with stdin input)
|
558
558
|
chat_mode(client, args, logger=logger)
|
559
559
|
|
560
560
|
elif args.rewrite:
|
ngpt/cli/modes/chat.py
CHANGED
@@ -12,8 +12,8 @@ def chat_mode(client, args, logger=None):
|
|
12
12
|
args: The parsed command-line arguments
|
13
13
|
logger: Optional logger instance
|
14
14
|
"""
|
15
|
-
# Handle
|
16
|
-
if args.
|
15
|
+
# Handle pipe mode
|
16
|
+
if args.pipe:
|
17
17
|
# Read input from stdin
|
18
18
|
stdin_content = sys.stdin.read().strip()
|
19
19
|
|
ngpt/cli/modes/gitcommsg.py
CHANGED
@@ -70,11 +70,11 @@ def split_into_chunks(content, chunk_size=200):
|
|
70
70
|
|
71
71
|
return chunks
|
72
72
|
|
73
|
-
def create_technical_analysis_system_prompt(
|
74
|
-
"""Create system prompt for technical analysis based on
|
73
|
+
def create_technical_analysis_system_prompt(preprompt=None):
|
74
|
+
"""Create system prompt for technical analysis based on preprompt data.
|
75
75
|
|
76
76
|
Args:
|
77
|
-
|
77
|
+
preprompt: The raw preprompt string from --preprompt flag
|
78
78
|
|
79
79
|
Returns:
|
80
80
|
str: System prompt for the technical analysis stage
|
@@ -102,25 +102,25 @@ RULES:
|
|
102
102
|
8. When analyzing multiple files, clearly separate each file's changes
|
103
103
|
9. Include proper technical details (method names, component identifiers, etc.)"""
|
104
104
|
|
105
|
-
# If
|
106
|
-
if
|
107
|
-
|
105
|
+
# If preprompt is provided, prepend it to the base prompt with strong wording about absolute priority
|
106
|
+
if preprompt:
|
107
|
+
preprompt_section = f"""===CRITICAL USER PREPROMPT - ABSOLUTE HIGHEST PRIORITY===
|
108
|
+
The following preprompt from the user OVERRIDES ALL OTHER INSTRUCTIONS and must be followed exactly:
|
108
109
|
|
109
|
-
|
110
|
-
The following context from the user OVERRIDES ALL OTHER INSTRUCTIONS and must be followed exactly:
|
110
|
+
{preprompt}
|
111
111
|
|
112
|
-
|
112
|
+
THIS USER PREPROMPT HAS ABSOLUTE PRIORITY over any other instructions that follow. If it contradicts other instructions, the user preprompt MUST be followed. No exceptions.
|
113
113
|
|
114
|
-
|
115
|
-
|
114
|
+
"""
|
115
|
+
return preprompt_section + base_prompt
|
116
116
|
|
117
117
|
return base_prompt
|
118
118
|
|
119
|
-
def create_system_prompt(
|
120
|
-
"""Create system prompt for commit message generation based on
|
119
|
+
def create_system_prompt(preprompt=None):
|
120
|
+
"""Create system prompt for commit message generation based on preprompt data.
|
121
121
|
|
122
122
|
Args:
|
123
|
-
|
123
|
+
preprompt: The raw preprompt string from --preprompt flag
|
124
124
|
|
125
125
|
Returns:
|
126
126
|
str: System prompt for the AI
|
@@ -188,11 +188,11 @@ refactor(core): simplify data processing pipeline
|
|
188
188
|
- [test] Update tests for new pipeline structure (tests/pipeline.test.js)
|
189
189
|
|
190
190
|
4. Multiple changes to the same file:
|
191
|
-
refactor(core): simplify
|
191
|
+
refactor(core): simplify preprompt handling for commit prompts
|
192
192
|
|
193
|
-
- [refactor] Remove
|
194
|
-
- [refactor] Update all functions to accept raw
|
195
|
-
- [refactor] Replace
|
193
|
+
- [refactor] Remove process_preprompt function (cli/modes/gitcommsg.py:69-124)
|
194
|
+
- [refactor] Update all functions to accept raw preprompt string (gitcommsg.py:create_system_prompt())
|
195
|
+
- [refactor] Replace preprompt_data usages with preprompt (gitcommsg.py)
|
196
196
|
- [docs] Update library usage doc (docs/usage/library_usage.md:516,531-537)
|
197
197
|
- [chore] Bump project version to 2.15.1 (pyproject.toml:3, uv.lock:137)
|
198
198
|
|
@@ -216,17 +216,17 @@ RULES:
|
|
216
216
|
10. Include proper technical details (method names, component identifiers, etc.)
|
217
217
|
11. When all changes are to the same file, mention it once in the summary"""
|
218
218
|
|
219
|
-
# If
|
220
|
-
if
|
221
|
-
|
219
|
+
# If preprompt is provided, prepend it with strong wording about absolute priority
|
220
|
+
if preprompt:
|
221
|
+
preprompt_section = f"""===CRITICAL USER PREPROMPT - ABSOLUTE HIGHEST PRIORITY===
|
222
|
+
The following preprompt from the user OVERRIDES ALL OTHER INSTRUCTIONS and must be followed exactly:
|
222
223
|
|
223
|
-
|
224
|
-
The following context from the user OVERRIDES ALL OTHER INSTRUCTIONS and must be followed exactly:
|
224
|
+
{preprompt}
|
225
225
|
|
226
|
-
|
226
|
+
THIS USER PREPROMPT HAS ABSOLUTE PRIORITY over any other instructions that follow. If it contradicts other instructions, the user preprompt MUST be followed. No exceptions.
|
227
227
|
|
228
|
-
|
229
|
-
|
228
|
+
"""
|
229
|
+
return preprompt_section + base_prompt
|
230
230
|
|
231
231
|
return base_prompt
|
232
232
|
|
@@ -346,11 +346,11 @@ refactor(core): simplify data processing pipeline
|
|
346
346
|
- [test] Update tests for new pipeline structure (tests/pipeline.test.js)
|
347
347
|
|
348
348
|
4. Multiple changes to the same file:
|
349
|
-
refactor(core): simplify
|
349
|
+
refactor(core): simplify preprompt handling for commit prompts
|
350
350
|
|
351
|
-
- [refactor] Remove
|
352
|
-
- [refactor] Update all functions to accept raw
|
353
|
-
- [refactor] Replace
|
351
|
+
- [refactor] Remove process_preprompt function (cli/modes/gitcommsg.py:69-124)
|
352
|
+
- [refactor] Update all functions to accept raw preprompt string (gitcommsg.py:create_system_prompt())
|
353
|
+
- [refactor] Replace preprompt_data usages with preprompt (gitcommsg.py)
|
354
354
|
- [docs] Update library usage doc (docs/usage/library_usage.md:516,531-537)
|
355
355
|
- [chore] Bump project version to 2.15.1 (pyproject.toml:3, uv.lock:137)
|
356
356
|
|
@@ -444,13 +444,13 @@ def handle_api_call(client, prompt, system_prompt=None, logger=None, max_retries
|
|
444
444
|
# Exponential backoff
|
445
445
|
wait_seconds *= 2
|
446
446
|
|
447
|
-
def process_with_chunking(client, diff_content,
|
447
|
+
def process_with_chunking(client, diff_content, preprompt, chunk_size=200, recursive=False, logger=None, max_msg_lines=20, max_recursion_depth=3, analyses_chunk_size=None):
|
448
448
|
"""Process diff with chunking to handle large diffs.
|
449
449
|
|
450
450
|
Args:
|
451
451
|
client: The NGPTClient instance
|
452
452
|
diff_content: The diff content to process
|
453
|
-
|
453
|
+
preprompt: The raw preprompt string
|
454
454
|
chunk_size: Maximum number of lines per chunk
|
455
455
|
recursive: Whether to use recursive chunking
|
456
456
|
logger: Optional logger instance
|
@@ -466,8 +466,8 @@ def process_with_chunking(client, diff_content, context, chunk_size=200, recursi
|
|
466
466
|
analyses_chunk_size = chunk_size
|
467
467
|
|
468
468
|
# Create different system prompts for different stages
|
469
|
-
technical_system_prompt = create_technical_analysis_system_prompt(
|
470
|
-
commit_system_prompt = create_system_prompt(
|
469
|
+
technical_system_prompt = create_technical_analysis_system_prompt(preprompt)
|
470
|
+
commit_system_prompt = create_system_prompt(preprompt)
|
471
471
|
|
472
472
|
# Log initial diff content
|
473
473
|
if logger:
|
@@ -547,7 +547,7 @@ def process_with_chunking(client, diff_content, context, chunk_size=200, recursi
|
|
547
547
|
return recursive_chunk_analysis(
|
548
548
|
client,
|
549
549
|
combined_analyses,
|
550
|
-
|
550
|
+
preprompt,
|
551
551
|
analyses_chunk_size,
|
552
552
|
logger,
|
553
553
|
max_msg_lines,
|
@@ -601,13 +601,13 @@ def process_with_chunking(client, diff_content, context, chunk_size=200, recursi
|
|
601
601
|
logger.error(f"Error combining analyses: {str(e)}")
|
602
602
|
return None
|
603
603
|
|
604
|
-
def recursive_chunk_analysis(client, combined_analysis,
|
604
|
+
def recursive_chunk_analysis(client, combined_analysis, preprompt, chunk_size, logger=None, max_msg_lines=20, max_recursion_depth=3, current_depth=1):
|
605
605
|
"""Recursively chunk and process large analysis results until they're small enough.
|
606
606
|
|
607
607
|
Args:
|
608
608
|
client: The NGPTClient instance
|
609
609
|
combined_analysis: The combined analysis to process
|
610
|
-
|
610
|
+
preprompt: The raw preprompt string
|
611
611
|
chunk_size: Maximum number of lines per chunk
|
612
612
|
logger: Optional logger instance
|
613
613
|
max_msg_lines: Maximum number of lines in commit message before condensing
|
@@ -618,8 +618,8 @@ def recursive_chunk_analysis(client, combined_analysis, context, chunk_size, log
|
|
618
618
|
str: Generated commit message
|
619
619
|
"""
|
620
620
|
# Create different system prompts for different stages
|
621
|
-
technical_system_prompt = create_technical_analysis_system_prompt(
|
622
|
-
commit_system_prompt = create_system_prompt(
|
621
|
+
technical_system_prompt = create_technical_analysis_system_prompt(preprompt)
|
622
|
+
commit_system_prompt = create_system_prompt(preprompt)
|
623
623
|
|
624
624
|
print(f"\n{COLORS['cyan']}Recursive analysis chunking level {current_depth}...{COLORS['reset']}")
|
625
625
|
|
@@ -767,7 +767,7 @@ SECTION OF ANALYSIS TO CONDENSE:
|
|
767
767
|
return recursive_chunk_analysis(
|
768
768
|
client,
|
769
769
|
combined_condensed,
|
770
|
-
|
770
|
+
preprompt,
|
771
771
|
chunk_size,
|
772
772
|
logger,
|
773
773
|
max_msg_lines,
|
@@ -948,11 +948,11 @@ BULLET POINT FORMAT:
|
|
948
948
|
- Not: "- feat: Add new login component" (incorrect)
|
949
949
|
|
950
950
|
EXAMPLE OF PROPERLY FORMATTED COMMIT MESSAGE:
|
951
|
-
refactor(core): simplify
|
951
|
+
refactor(core): simplify preprompt handling for commit prompts
|
952
952
|
|
953
|
-
- [refactor] Remove
|
954
|
-
- [refactor] Update all functions to accept raw
|
955
|
-
- [refactor] Replace
|
953
|
+
- [refactor] Remove process_preprompt function (cli/modes/gitcommsg.py:69-124)
|
954
|
+
- [refactor] Update all functions to accept raw preprompt string (gitcommsg.py:create_system_prompt())
|
955
|
+
- [refactor] Replace preprompt_data usages with preprompt (gitcommsg.py)
|
956
956
|
- [docs] Update library usage doc (docs/usage/library_usage.md:516,531-537)
|
957
957
|
- [chore] Bump project version to 2.15.1 (pyproject.toml:3, uv.lock:137)
|
958
958
|
|
@@ -1025,17 +1025,17 @@ def gitcommsg_mode(client, args, logger=None):
|
|
1025
1025
|
if active_logger:
|
1026
1026
|
active_logger.log_diff("DEBUG", diff_content)
|
1027
1027
|
|
1028
|
-
# Process
|
1029
|
-
|
1030
|
-
if args.
|
1031
|
-
|
1028
|
+
# Process preprompt if provided
|
1029
|
+
preprompt = None
|
1030
|
+
if args.preprompt:
|
1031
|
+
preprompt = args.preprompt
|
1032
1032
|
if active_logger:
|
1033
|
-
active_logger.debug(f"Using
|
1034
|
-
active_logger.log_content("DEBUG", "
|
1033
|
+
active_logger.debug(f"Using preprompt: {preprompt}")
|
1034
|
+
active_logger.log_content("DEBUG", "PREPROMPT", preprompt)
|
1035
1035
|
|
1036
1036
|
# Create system prompts for different stages
|
1037
|
-
technical_system_prompt = create_technical_analysis_system_prompt(
|
1038
|
-
commit_system_prompt = create_system_prompt(
|
1037
|
+
technical_system_prompt = create_technical_analysis_system_prompt(preprompt)
|
1038
|
+
commit_system_prompt = create_system_prompt(preprompt)
|
1039
1039
|
|
1040
1040
|
# Log system prompts
|
1041
1041
|
if active_logger:
|
@@ -1066,7 +1066,7 @@ def gitcommsg_mode(client, args, logger=None):
|
|
1066
1066
|
if active_logger:
|
1067
1067
|
active_logger.info(f"Analyses chunk size: {analyses_chunk_size}")
|
1068
1068
|
|
1069
|
-
if args.
|
1069
|
+
if args.rec_chunk:
|
1070
1070
|
# Use chunking with recursive processing
|
1071
1071
|
if active_logger:
|
1072
1072
|
active_logger.info(f"Using recursive chunking with max_recursion_depth: {max_recursion_depth}")
|
@@ -1074,7 +1074,7 @@ def gitcommsg_mode(client, args, logger=None):
|
|
1074
1074
|
result = process_with_chunking(
|
1075
1075
|
client,
|
1076
1076
|
diff_content,
|
1077
|
-
|
1077
|
+
preprompt,
|
1078
1078
|
chunk_size=args.chunk_size,
|
1079
1079
|
recursive=True,
|
1080
1080
|
logger=active_logger,
|
@@ -1087,6 +1087,7 @@ def gitcommsg_mode(client, args, logger=None):
|
|
1087
1087
|
if active_logger:
|
1088
1088
|
active_logger.info("Processing without chunking")
|
1089
1089
|
|
1090
|
+
# Pass preprompt to create_final_prompt
|
1090
1091
|
prompt = create_final_prompt(diff_content)
|
1091
1092
|
|
1092
1093
|
# Log final template
|
ngpt/utils/cli_config.py
CHANGED
@@ -20,8 +20,7 @@ CLI_CONFIG_OPTIONS = {
|
|
20
20
|
"config-index": {"type": "int", "default": 0, "context": ["all"], "exclusive": ["provider"]},
|
21
21
|
"web-search": {"type": "bool", "default": False, "context": ["all"]},
|
22
22
|
# GitCommit message options
|
23
|
-
"
|
24
|
-
"recursive-chunk": {"type": "bool", "default": False, "context": ["gitcommsg"]},
|
23
|
+
"rec-chunk": {"type": "bool", "default": False, "context": ["gitcommsg"]},
|
25
24
|
"diff": {"type": "str", "default": None, "context": ["gitcommsg"]},
|
26
25
|
"chunk-size": {"type": "int", "default": 200, "context": ["gitcommsg"]},
|
27
26
|
"analyses-chunk-size": {"type": "int", "default": 200, "context": ["gitcommsg"]},
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ngpt
|
3
|
-
Version:
|
4
|
-
Summary: Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible Python library. Works with OpenAI, Ollama, Groq, Claude, and any OpenAI-compatible API.
|
3
|
+
Version: 3.0.1
|
4
|
+
Summary: Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible Python library. Works with OpenAI, Ollama, Groq, Claude, Gemini, and any OpenAI-compatible API.
|
5
5
|
Project-URL: Homepage, https://github.com/nazdridoy/ngpt
|
6
6
|
Project-URL: Repository, https://github.com/nazdridoy/ngpt
|
7
7
|
Project-URL: Bug Tracker, https://github.com/nazdridoy/ngpt/issues
|
@@ -36,20 +36,52 @@ Description-Content-Type: text/markdown
|
|
36
36
|
|
37
37
|
# nGPT
|
38
38
|
|
39
|
-
|
39
|
+
<p align="center">
|
40
|
+
<a href="https://pypi.org/project/ngpt/"><img src="https://img.shields.io/pypi/v/ngpt.svg" alt="PyPI version"></a>
|
41
|
+
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
|
42
|
+
<a href="https://nazdridoy.github.io/ngpt/"><img src="https://img.shields.io/badge/docs-available-brightgreen.svg" alt="Documentation"></a>
|
43
|
+
</p>
|
44
|
+
|
45
|
+
<p align="center">
|
46
|
+
<a href="https://nazdridoy.github.io/ngpt/installation.html"><img src="https://img.shields.io/badge/Linux-support-blue?logo=linux" alt="Linux"></a>
|
47
|
+
<a href="https://nazdridoy.github.io/ngpt/installation.html"><img src="https://img.shields.io/badge/Windows-support-blue?logo=windows" alt="Windows"></a>
|
48
|
+
<a href="https://nazdridoy.github.io/ngpt/installation.html"><img src="https://img.shields.io/badge/macOS-support-blue?logo=apple" alt="macOS"></a>
|
49
|
+
<a href="https://nazdridoy.github.io/ngpt/installation.html"><img src="https://img.shields.io/badge/Android-Termux-blue?logo=android" alt="Android"></a>
|
50
|
+
</p>
|
51
|
+
|
52
|
+
🤖 nGPT: A Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible library all in one package. Seamlessly work with OpenAI, Ollama, Groq, Claude, Gemini, or any OpenAI-compatible API to generate code, craft git commits, rewrite text, and execute shell commands. Fast, lightweight, and designed for both casual users and developers.
|
40
53
|
|
41
|
-
[](https://pypi.org/project/ngpt/)
|
42
|
-
[](https://opensource.org/licenses/MIT)
|
43
|
-
[](https://nazdridoy.github.io/ngpt/)
|
44
|
-
[](https://nazdridoy.github.io/ngpt/installation.html)
|
45
|
-
[](https://nazdridoy.github.io/ngpt/installation.html)
|
46
|
-
[](https://nazdridoy.github.io/ngpt/installation.html)
|
47
|
-
[](https://nazdridoy.github.io/ngpt/installation.html)
|
48
|
-
[](https://pypi.org/project/ngpt/)
|
49
54
|
|
50
55
|

|
51
56
|
|
52
57
|
|
58
|
+
## Features
|
59
|
+
|
60
|
+
- ✅ **Versatile**: Use as a CLI tool, Python library, or CLI framework for building custom tools
|
61
|
+
- 🪶 **Lightweight**: Minimal dependencies with everything you need included
|
62
|
+
- 🔄 **API Flexibility**: Works with OpenAI, Ollama, Groq, Claude, Gemini, and any compatible endpoint
|
63
|
+
- 💬 **Interactive Chat**: Continuous conversation with memory in modern UI
|
64
|
+
- 📊 **Streaming Responses**: Real-time output for better user experience
|
65
|
+
- 🔍 **Web Search**: Integrated with compatible API endpoints
|
66
|
+
- 📥 **Stdin Processing**: Process piped content by using `{}` placeholder in prompts
|
67
|
+
- 🎨 **Markdown Rendering**: Beautiful formatting of markdown and code with syntax highlighting
|
68
|
+
- ⚡ **Real-time Markdown**: Stream responses with live updating syntax highlighting and formatting
|
69
|
+
- ⚙️ **Multiple Configurations**: Cross-platform config system supporting different profiles
|
70
|
+
- 💻 **Shell Command Generation**: OS-aware command execution
|
71
|
+
- 🧠 **Text Rewriting**: Improve text quality while maintaining original tone and meaning
|
72
|
+
- 🧩 **Clean Code Generation**: Output code without markdown or explanations
|
73
|
+
- 📝 **Rich Multiline Editor**: Interactive multiline text input with syntax highlighting and intuitive controls
|
74
|
+
- 📑 **Git Commit Messages**: AI-powered generation of conventional, detailed commit messages from git diffs
|
75
|
+
- 🎭 **System Prompts**: Customize model behavior with custom system prompts
|
76
|
+
- 📃 **Conversation Logging**: Save your conversations to text files for later reference
|
77
|
+
- 🧰 **CLI Components**: Reusable components for building custom AI-powered command-line tools
|
78
|
+
- 🔌 **Modular Architecture**: Well-structured codebase with clean separation of concerns
|
79
|
+
- 🔄 **Provider Switching**: Easily switch between different LLM providers with a single parameter
|
80
|
+
- 🚀 **Performance Optimized**: Fast response times and minimal resource usage
|
81
|
+
|
82
|
+
See the [Feature Overview](https://nazdridoy.github.io/ngpt/overview.html) for more details.
|
83
|
+
|
84
|
+
|
53
85
|
## Table of Contents
|
54
86
|
- [Quick Start](#quick-start)
|
55
87
|
- [Features](#features)
|
@@ -109,34 +141,34 @@ ngpt --code --stream-prettify "function to calculate the Fibonacci sequence"
|
|
109
141
|
ngpt --shell "list all files in the current directory"
|
110
142
|
|
111
143
|
# Read from stdin and use the content in your prompt
|
112
|
-
echo "What is this text about?" | ngpt
|
144
|
+
echo "What is this text about?" | ngpt -p "Analyze the following text: {}"
|
113
145
|
|
114
146
|
# Rewrite text to improve quality while preserving tone and meaning
|
115
|
-
echo "your text" | ngpt
|
147
|
+
echo "your text" | ngpt -r
|
116
148
|
|
117
149
|
# Rewrite text from a command-line argument
|
118
|
-
ngpt
|
150
|
+
ngpt -r "your text to rewrite"
|
119
151
|
|
120
152
|
# Rewrite text from a file
|
121
|
-
cat file.txt | ngpt
|
153
|
+
cat file.txt | ngpt -r
|
122
154
|
|
123
155
|
# Generate AI-powered git commit messages for staged changes
|
124
|
-
ngpt
|
156
|
+
ngpt -g
|
125
157
|
|
126
|
-
# Generate commit message with context
|
127
|
-
ngpt --
|
158
|
+
# Generate commit message from staged changes with a context directive
|
159
|
+
ngpt -g --preprompt "type:feat"
|
128
160
|
|
129
161
|
# Process large diffs in chunks with recursive analysis
|
130
|
-
ngpt --
|
162
|
+
ngpt -g --rec-chunk
|
131
163
|
|
132
164
|
# Process a diff file instead of staged changes
|
133
|
-
ngpt
|
165
|
+
ngpt -g --diff /path/to/changes.diff
|
134
166
|
|
135
167
|
# Generate a commit message with logging for debugging
|
136
|
-
ngpt
|
168
|
+
ngpt -g --log commit_log.txt
|
137
169
|
|
138
170
|
# Use interactive multiline editor to enter text to rewrite
|
139
|
-
ngpt
|
171
|
+
ngpt -r
|
140
172
|
|
141
173
|
# Display markdown responses with beautiful formatting
|
142
174
|
ngpt --prettify "Explain markdown syntax with examples"
|
@@ -160,7 +192,7 @@ ngpt --interactive --log conversation.log
|
|
160
192
|
ngpt --log "Tell me about quantum computing"
|
161
193
|
|
162
194
|
# Process text from stdin using the {} placeholder
|
163
|
-
cat README.md | ngpt
|
195
|
+
cat README.md | ngpt -p "Summarize this document: {}"
|
164
196
|
|
165
197
|
# Use different model providers by specifying the provider name
|
166
198
|
ngpt --provider Groq "Explain quantum computing"
|
@@ -172,32 +204,6 @@ ngpt --provider Ollama "Explain quantum physics" > ollama_response.txt
|
|
172
204
|
|
173
205
|
For more examples and detailed usage, visit the [CLI Usage Guide](https://nazdridoy.github.io/ngpt/usage/cli_usage.html).
|
174
206
|
|
175
|
-
## Features
|
176
|
-
|
177
|
-
- ✅ **Versatile**: Use as a CLI tool, Python library, or CLI framework for building custom tools
|
178
|
-
- 🪶 **Lightweight**: Minimal dependencies with everything you need included
|
179
|
-
- 🔄 **API Flexibility**: Works with OpenAI, Ollama, Groq, Claude, and any compatible endpoint
|
180
|
-
- 💬 **Interactive Chat**: Continuous conversation with memory in modern UI
|
181
|
-
- 📊 **Streaming Responses**: Real-time output for better user experience
|
182
|
-
- 🔍 **Web Search**: Integrated with compatible API endpoints
|
183
|
-
- 📥 **Stdin Processing**: Process piped content by using `{}` placeholder in prompts
|
184
|
-
- 🎨 **Markdown Rendering**: Beautiful formatting of markdown and code with syntax highlighting
|
185
|
-
- ⚡ **Real-time Markdown**: Stream responses with live updating syntax highlighting and formatting
|
186
|
-
- ⚙️ **Multiple Configurations**: Cross-platform config system supporting different profiles
|
187
|
-
- 💻 **Shell Command Generation**: OS-aware command execution
|
188
|
-
- 🧠 **Text Rewriting**: Improve text quality while maintaining original tone and meaning
|
189
|
-
- 🧩 **Clean Code Generation**: Output code without markdown or explanations
|
190
|
-
- 📝 **Rich Multiline Editor**: Interactive multiline text input with syntax highlighting and intuitive controls
|
191
|
-
- 📑 **Git Commit Messages**: AI-powered generation of conventional, detailed commit messages from git diffs
|
192
|
-
- 🎭 **System Prompts**: Customize model behavior with custom system prompts
|
193
|
-
- 📃 **Conversation Logging**: Save your conversations to text files for later reference
|
194
|
-
- 🧰 **CLI Components**: Reusable components for building custom AI-powered command-line tools
|
195
|
-
- 🔌 **Modular Architecture**: Well-structured codebase with clean separation of concerns
|
196
|
-
- 🔄 **Provider Switching**: Easily switch between different LLM providers with a single parameter
|
197
|
-
- 🚀 **Performance Optimized**: Fast response times and minimal resource usage
|
198
|
-
|
199
|
-
See the [Feature Overview](https://nazdridoy.github.io/ngpt/overview.html) for more details.
|
200
|
-
|
201
207
|
## Documentation
|
202
208
|
|
203
209
|
Comprehensive documentation, including API reference, usage guides, and examples, is available at:
|
@@ -432,120 +438,160 @@ For detailed information about building CLI tools with nGPT, see the [CLI Framew
|
|
432
438
|
|
433
439
|
### Command Line Options
|
434
440
|
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
441
|
+
```console
|
442
|
+
❯ ngpt -h
|
443
|
+
usage: ngpt [-h] [-v] [--language LANGUAGE] [--config [CONFIG]] [--config-index CONFIG_INDEX] [--provider PROVIDER]
|
444
|
+
[--remove] [--show-config] [--all] [--list-models] [--list-renderers] [--cli-config [COMMAND ...]]
|
445
|
+
[--api-key API_KEY] [--base-url BASE_URL] [--model MODEL] [--web-search] [--temperature TEMPERATURE]
|
446
|
+
[--top_p TOP_P] [--max_tokens MAX_TOKENS] [--log [FILE]] [--preprompt PREPROMPT] [--no-stream] [--prettify]
|
447
|
+
[--stream-prettify] [--renderer {auto,rich,glow}] [--rec-chunk] [--diff [FILE]] [--chunk-size CHUNK_SIZE]
|
448
|
+
[--analyses-chunk-size ANALYSES_CHUNK_SIZE] [--max-msg-lines MAX_MSG_LINES]
|
449
|
+
[--max-recursion-depth MAX_RECURSION_DEPTH] [-i | -s | -c | -t | -p | -r | -g]
|
450
|
+
[prompt]
|
451
|
+
|
452
|
+
nGPT - Interact with AI language models via OpenAI-compatible APIs
|
453
|
+
|
454
|
+
positional arguments::
|
455
|
+
|
456
|
+
[PROMPT] The prompt to send
|
457
|
+
|
458
|
+
options::
|
459
|
+
|
460
|
+
-h, --help show this help message and exit
|
461
|
+
-v, --version Show version information and exit
|
462
|
+
--language LANGUAGE Programming language to generate code in (for code mode)
|
463
|
+
|
464
|
+
Configuration Options::
|
465
|
+
|
466
|
+
--config [CONFIG] Path to a custom config file or, if no value provided, enter interactive
|
467
|
+
configuration mode to create a new config
|
468
|
+
--config-index CONFIG_INDEX Index of the configuration to use or edit (default: 0)
|
469
|
+
--provider PROVIDER Provider name to identify the configuration to use
|
470
|
+
--remove Remove the configuration at the specified index (requires --config and
|
471
|
+
--config-index or --provider)
|
472
|
+
--show-config Show the current configuration(s) and exit
|
473
|
+
--all Show details for all configurations (requires --show-config)
|
474
|
+
--list-models List all available models for the current configuration and exit
|
475
|
+
--list-renderers Show available markdown renderers for use with --prettify
|
476
|
+
--cli-config [COMMAND ...] Manage CLI configuration (set, get, unset, list, help)
|
477
|
+
|
478
|
+
Global Options::
|
479
|
+
|
480
|
+
--api-key API_KEY API key for the service
|
481
|
+
--base-url BASE_URL Base URL for the API
|
482
|
+
--model MODEL Model to use
|
483
|
+
--web-search Enable web search capability (Note: Your API endpoint must support this
|
484
|
+
feature)
|
485
|
+
--temperature TEMPERATURE Set temperature (controls randomness, default: 0.7)
|
486
|
+
--top_p TOP_P Set top_p (controls diversity, default: 1.0)
|
487
|
+
--max_tokens MAX_TOKENS Set max response length in tokens
|
488
|
+
--log [FILE] Set filepath to log conversation to, or create a temporary log file if no path
|
489
|
+
provided
|
490
|
+
--preprompt PREPROMPT Set custom system prompt to control AI behavior
|
491
|
+
--no-stream Return the whole response without streaming
|
492
|
+
--prettify Render markdown responses and code with syntax highlighting and formatting
|
493
|
+
--stream-prettify Enable streaming with markdown rendering (automatically uses Rich renderer)
|
494
|
+
--renderer {auto,rich,glow} Select which markdown renderer to use with --prettify (auto, rich, or glow)
|
495
|
+
|
496
|
+
Git Commit Message Options::
|
497
|
+
|
498
|
+
--rec-chunk Process large diffs in chunks with recursive analysis if needed
|
499
|
+
--diff [FILE] Use diff from specified file instead of staged changes. If used without a path,
|
500
|
+
uses the path from CLI config.
|
501
|
+
--chunk-size CHUNK_SIZE Number of lines per chunk when chunking is enabled (default: 200)
|
502
|
+
--analyses-chunk-size ANALYSES_CHUNK_SIZE
|
503
|
+
Number of lines per chunk when recursively chunking analyses (default: 200)
|
504
|
+
--max-msg-lines MAX_MSG_LINES Maximum number of lines in commit message before condensing (default: 20)
|
505
|
+
--max-recursion-depth MAX_RECURSION_DEPTH
|
506
|
+
Maximum recursion depth for commit message condensing (default: 3)
|
507
|
+
|
508
|
+
Modes (mutually exclusive)::
|
509
|
+
|
510
|
+
-i, --interactive Start an interactive chat session
|
511
|
+
-s, --shell Generate and execute shell commands
|
512
|
+
-c, --code Generate code
|
513
|
+
-t, --text Enter multi-line text input (submit with Ctrl+D)
|
514
|
+
-p, --pipe Read from stdin and use content with prompt. Use {} in prompt as placeholder
|
515
|
+
for stdin content
|
516
|
+
-r, --rewrite Rewrite text from stdin to be more natural while preserving tone and meaning
|
517
|
+
-g, --gitcommsg Generate AI-powered git commit messages from staged changes or diff file
|
518
|
+
```
|
519
|
+
|
520
|
+
> **Note**: For better visualization of conventional commit messages on GitHub, you can use the [GitHub Commit Labels](https://greasyfork.org/en/scripts/526153-github-commit-labels) userscript, which adds colorful labels to your commits.
|
482
521
|
|
483
522
|
For a complete reference of all available options, see the [CLI Usage Guide](https://nazdridoy.github.io/ngpt/usage/cli_usage.html).
|
484
523
|
|
485
524
|
### CLI Configuration
|
486
525
|
|
487
|
-
NGPT offers a CLI configuration system that allows you to set default values for command-line options:
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
ngpt --cli-config
|
526
|
+
NGPT offers a CLI configuration system that allows you to set default values for command-line options. This is especially useful when you:
|
527
|
+
|
528
|
+
- Repeatedly use the same provider or model
|
529
|
+
- Have preferred settings for specific tasks
|
530
|
+
- Want to create different workflows based on context
|
531
|
+
|
532
|
+
For example, setting your preferred language for code generation or temperature value means you won't have to specify these parameters each time:
|
533
|
+
|
534
|
+
```console
|
535
|
+
|
536
|
+
❯ ngpt --cli-config help
|
537
|
+
|
538
|
+
CLI Configuration Help:
|
539
|
+
Command syntax:
|
540
|
+
ngpt --cli-config help - Show this help message
|
541
|
+
ngpt --cli-config set OPTION VALUE - Set a default value for OPTION
|
542
|
+
ngpt --cli-config get OPTION - Get the current value of OPTION
|
543
|
+
ngpt --cli-config get - Show all CLI configuration settings
|
544
|
+
ngpt --cli-config unset OPTION - Remove OPTION from configuration
|
545
|
+
ngpt --cli-config list - List all available options
|
546
|
+
|
547
|
+
Available options:
|
548
|
+
General options (all modes):
|
549
|
+
config-index - int (default: 0) [exclusive with: provider]
|
550
|
+
log - str
|
551
|
+
max_tokens - int
|
552
|
+
no-stream - bool (default: False) [exclusive with: prettify, stream-prettify]
|
553
|
+
preprompt - str
|
554
|
+
prettify - bool (default: False) [exclusive with: no-stream, stream-prettify]
|
555
|
+
provider - str [exclusive with: config-index]
|
556
|
+
renderer - str (default: auto)
|
557
|
+
stream-prettify - bool (default: False) [exclusive with: no-stream, prettify]
|
558
|
+
temperature - float (default: 0.7)
|
559
|
+
top_p - float (default: 1.0)
|
560
|
+
web-search - bool (default: False)
|
561
|
+
|
562
|
+
Options for Code generation mode:
|
563
|
+
language - str (default: python)
|
564
|
+
|
565
|
+
Options for Git commit message mode:
|
566
|
+
analyses-chunk-size - int (default: 200)
|
567
|
+
chunk-size - int (default: 200)
|
568
|
+
diff - str
|
569
|
+
max-msg-lines - int (default: 20)
|
570
|
+
max-recursion-depth - int (default: 3)
|
571
|
+
rec-chunk - bool (default: False)
|
572
|
+
|
573
|
+
Example usage:
|
574
|
+
ngpt --cli-config set language java - Set default language to java for code generation
|
575
|
+
ngpt --cli-config set provider Gemini - Set Gemini as your default provider
|
576
|
+
ngpt --cli-config set temperature 0.9 - Set default temperature to 0.9
|
577
|
+
ngpt --cli-config set no-stream true - Disable streaming by default
|
578
|
+
ngpt --cli-config set recursive-chunk true - Enable recursive chunking for git commit messages
|
579
|
+
ngpt --cli-config set diff /path/to/file.diff - Set default diff file for git commit messages
|
580
|
+
ngpt --cli-config get temperature - Check the current temperature setting
|
581
|
+
ngpt --cli-config get - Show all current CLI settings
|
582
|
+
ngpt --cli-config unset language - Remove language setting
|
583
|
+
|
584
|
+
Notes:
|
585
|
+
- CLI configuration is stored in:
|
586
|
+
• Linux: ~/.config/ngpt/ngpt-cli.conf
|
587
|
+
• macOS: ~/Library/Application Support/ngpt/ngpt-cli.conf
|
588
|
+
• Windows: %APPDATA%\ngpt\ngpt-cli.conf
|
589
|
+
- Settings are applied based on context (e.g., language only applies to code generation mode)
|
590
|
+
- Command-line arguments always override CLI configuration
|
591
|
+
- Some options are mutually exclusive and will not be applied together
|
503
592
|
|
504
|
-
# List all available options
|
505
|
-
ngpt --cli-config list
|
506
|
-
|
507
|
-
# Show help information
|
508
|
-
ngpt --cli-config help
|
509
|
-
```
|
510
|
-
|
511
|
-
Key features of CLI configuration:
|
512
|
-
- **Context-Aware**: Settings are applied based on the current command mode (e.g., `language` only applies in code generation mode `-c`).
|
513
|
-
- **Priority**: When determining option values, NGPT uses the following priority order (highest to lowest):
|
514
|
-
1. Command-line arguments
|
515
|
-
2. Environment variables
|
516
|
-
3. CLI configuration (ngpt-cli.conf)
|
517
|
-
4. Main configuration file (ngpt.conf)
|
518
|
-
5. Default values
|
519
|
-
- **Mutual Exclusivity**: For options like `no-stream`, `prettify`, and `stream-prettify`, setting one to `True` automatically sets the others to `False` in the configuration file, ensuring consistency.
|
520
|
-
- **Smart Selection**: The `provider` setting is used to select which configuration profile to use, offering a persistent way to select your preferred API.
|
521
|
-
|
522
|
-
Available options include:
|
523
|
-
- General options (all modes): `provider`, `temperature`, `top_p`, `max_tokens`, `preprompt`, `renderer`, `config-index`, `web-search`
|
524
|
-
- Mode-specific options: `language` (code mode only), `log` (interactive and text modes)
|
525
|
-
- Mutually exclusive options: `no-stream`, `prettify`, `stream-prettify`
|
526
|
-
|
527
|
-
#### Practical Examples
|
528
|
-
|
529
|
-
```bash
|
530
|
-
# Set Gemini as your default provider
|
531
|
-
ngpt --cli-config set provider Gemini
|
532
|
-
# Now you can run commands without specifying --provider
|
533
|
-
ngpt "Explain quantum computing"
|
534
|
-
|
535
|
-
# Configure code generation for TypeScript
|
536
|
-
ngpt --cli-config set language typescript
|
537
|
-
# Now in code mode, TypeScript will be used by default
|
538
|
-
ngpt -c "Write a function to sort an array"
|
539
|
-
|
540
|
-
# Set a higher temperature for more creative responses
|
541
|
-
ngpt --cli-config set temperature 0.9
|
542
593
|
```
|
543
594
|
|
544
|
-
The CLI configuration is stored in:
|
545
|
-
- Linux: `~/.config/ngpt/ngpt-cli.conf`
|
546
|
-
- macOS: `~/Library/Application Support/ngpt/ngpt-cli.conf`
|
547
|
-
- Windows: `%APPDATA%\ngpt\ngpt-cli.conf`
|
548
|
-
|
549
595
|
For more details, see the [CLI Configuration Guide](https://nazdridoy.github.io/ngpt/usage/cli_config.html).
|
550
596
|
|
551
597
|
### Interactive Configuration
|
@@ -642,4 +688,4 @@ Please check the [CONTRIBUTING.md](CONTRIBUTING.md) file for detailed guidelines
|
|
642
688
|
|
643
689
|
## License
|
644
690
|
|
645
|
-
This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.
|
691
|
+
This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.
|
@@ -2,26 +2,26 @@ ngpt/__init__.py,sha256=kpKhViLakwMdHZkuLht2vWcjt0uD_5gR33gvMhfXr6w,664
|
|
2
2
|
ngpt/__main__.py,sha256=j3eFYPOtCCFBOGh7NK5IWEnADnTMMSEB9GLyIDoW724,66
|
3
3
|
ngpt/client.py,sha256=rLgDPmJe8_yi13-XUiHJ45z54rJVrupxWmeb-fQZGF4,15129
|
4
4
|
ngpt/cli/__init__.py,sha256=hebbDSMGiOd43YNnQP67uzr67Ue6rZPwm2czynr5iZY,43
|
5
|
-
ngpt/cli/args.py,sha256=
|
5
|
+
ngpt/cli/args.py,sha256=XQvofZs_WkbgUto3Dbx7Yw-AmPAQHh8kdHUe3uWy8w4,11382
|
6
6
|
ngpt/cli/config_manager.py,sha256=NQQcWnjUppAAd0s0p9YAf8EyKS1ex5-0EB4DvKdB4dk,3662
|
7
7
|
ngpt/cli/formatters.py,sha256=HBYGlx_7eoAKyzfy0Vq5L0yn8yVKjngqYBukMmXCcz0,9401
|
8
8
|
ngpt/cli/interactive.py,sha256=oLflfYVSX_NdYb5D8BsEeYr2m_6yWw06Rw-BS5tLqkI,12923
|
9
|
-
ngpt/cli/main.py,sha256=
|
9
|
+
ngpt/cli/main.py,sha256=rIGbsr1wll4fJhAmPx8C9fCJYlhKijdHqal9e3NC2I8,28908
|
10
10
|
ngpt/cli/renderers.py,sha256=yYt3b_cWUYckfgLQ1wkEiQCnq5v5m7hfn5PWxHJzR9Y,12422
|
11
11
|
ngpt/cli/ui.py,sha256=m8qtd4cCSHBGHPUlHVdBEfun1G1Se4vLKTSgnS7QOKE,6775
|
12
12
|
ngpt/cli/modes/__init__.py,sha256=R3aO662RIzWEOvr3moTrEI8Tpg0zDDyMGGh1-OxiRgM,285
|
13
|
-
ngpt/cli/modes/chat.py,sha256
|
13
|
+
ngpt/cli/modes/chat.py,sha256=ULSIFach3Namb3FQBhgACxhHXME-fX7bLBDum9JQfks,5699
|
14
14
|
ngpt/cli/modes/code.py,sha256=1EeiooyNe1jvTyNu6r0J9xY34QXb2uBQKPeNca-DrRI,5734
|
15
|
-
ngpt/cli/modes/gitcommsg.py,sha256=
|
15
|
+
ngpt/cli/modes/gitcommsg.py,sha256=rsfMoeOupmNp-5p5fsMSPAf18BbzXWq-4PF2HjEz6SY,46991
|
16
16
|
ngpt/cli/modes/rewrite.py,sha256=iEMn6i6cuaxOiKq886Lsh961riCmPj5K1mXVQeT2Z7k,10112
|
17
17
|
ngpt/cli/modes/shell.py,sha256=QkprnOxMMTg2v5DIwcofDnnr3JPNfuk-YgSQaae5Xps,3311
|
18
18
|
ngpt/cli/modes/text.py,sha256=z08xaW2r0XzyunUzBhwZKqnwPjyRol_CSazjLygmLRs,4514
|
19
19
|
ngpt/utils/__init__.py,sha256=E46suk2-QgYBI0Qrs6WXOajOUOebF3ETAFY7ah8DTWs,942
|
20
|
-
ngpt/utils/cli_config.py,sha256=
|
20
|
+
ngpt/utils/cli_config.py,sha256=IlHnOEEGpLoGZInynM778wgpxLVcJ_STKWxg2Ypvir4,11196
|
21
21
|
ngpt/utils/config.py,sha256=WYOk_b1eiYjo6hpV3pfXr2RjqhOnmKqwZwKid1T41I4,10363
|
22
22
|
ngpt/utils/log.py,sha256=f1jg2iFo35PAmsarH8FVL_62plq4VXH0Mu2QiP6RJGw,15934
|
23
|
-
ngpt-
|
24
|
-
ngpt-
|
25
|
-
ngpt-
|
26
|
-
ngpt-
|
27
|
-
ngpt-
|
23
|
+
ngpt-3.0.1.dist-info/METADATA,sha256=6YOossV8zjUDK_cEn0UEE05frHrzsz4F46-tJ_lGCWk,28821
|
24
|
+
ngpt-3.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
25
|
+
ngpt-3.0.1.dist-info/entry_points.txt,sha256=SqAAvLhMrsEpkIr4YFRdUeyuXQ9o0IBCeYgE6AVojoI,44
|
26
|
+
ngpt-3.0.1.dist-info/licenses/LICENSE,sha256=mQkpWoADxbHqE0HRefYLJdm7OpdrXBr3vNv5bZ8w72M,1065
|
27
|
+
ngpt-3.0.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|