ngpt 2.5.0__tar.gz → 2.5.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ngpt-2.5.0 → ngpt-2.5.1}/PKG-INFO +1 -2
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/api/client.md +49 -24
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/usage/cli_usage.md +3 -1
- {ngpt-2.5.0 → ngpt-2.5.1}/ngpt/cli.py +17 -7
- {ngpt-2.5.0 → ngpt-2.5.1}/ngpt/client.py +22 -3
- {ngpt-2.5.0 → ngpt-2.5.1}/pyproject.toml +4 -3
- {ngpt-2.5.0 → ngpt-2.5.1}/uv.lock +1 -3
- {ngpt-2.5.0 → ngpt-2.5.1}/.github/workflows/python-publish.yml +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/.gitignore +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/.python-version +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/COMMIT_GUIDELINES.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/CONTRIBUTING.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/LICENSE +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/README.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/CONTRIBUTING.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/LICENSE.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/README.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/_config.yml +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/api/README.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/api/config.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/assets/css/style.scss +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/configuration.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/examples/README.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/examples/advanced.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/examples/basic.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/examples/integrations.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/installation.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/overview.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/usage/README.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/docs/usage/library_usage.md +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/ngpt/__init__.py +0 -0
- {ngpt-2.5.0 → ngpt-2.5.1}/ngpt/config.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ngpt
|
3
|
-
Version: 2.5.
|
3
|
+
Version: 2.5.1
|
4
4
|
Summary: A lightweight Python CLI and library for interacting with OpenAI-compatible APIs, supporting both official and self-hosted LLM endpoints.
|
5
5
|
Project-URL: Homepage, https://github.com/nazdridoy/ngpt
|
6
6
|
Project-URL: Repository, https://github.com/nazdridoy/ngpt
|
@@ -30,7 +30,6 @@ Classifier: Topic :: Utilities
|
|
30
30
|
Requires-Python: >=3.8
|
31
31
|
Requires-Dist: prompt-toolkit>=3.0.0
|
32
32
|
Requires-Dist: requests>=2.31.0
|
33
|
-
Requires-Dist: rich>=14.0.0
|
34
33
|
Provides-Extra: prettify
|
35
34
|
Requires-Dist: rich>=10.0.0; extra == 'prettify'
|
36
35
|
Description-Content-Type: text/markdown
|
@@ -49,7 +49,7 @@ client = NGPTClient(
|
|
49
49
|
|
50
50
|
## Chat Method
|
51
51
|
|
52
|
-
The
|
52
|
+
The main method for interacting with the AI model.
|
53
53
|
|
54
54
|
```python
|
55
55
|
response = client.chat(
|
@@ -57,8 +57,10 @@ response = client.chat(
|
|
57
57
|
stream: bool = True,
|
58
58
|
temperature: float = 0.7,
|
59
59
|
max_tokens: Optional[int] = None,
|
60
|
+
top_p: float = 1.0,
|
60
61
|
messages: Optional[List[Dict[str, str]]] = None,
|
61
62
|
web_search: bool = False,
|
63
|
+
markdown_format: bool = False,
|
62
64
|
**kwargs
|
63
65
|
) -> str
|
64
66
|
```
|
@@ -69,49 +71,55 @@ response = client.chat(
|
|
69
71
|
|-----------|------|---------|-------------|
|
70
72
|
| `prompt` | `str` | Required | The user's message |
|
71
73
|
| `stream` | `bool` | `True` | Whether to stream the response |
|
72
|
-
| `temperature` | `float` | `0.7` | Controls randomness in the response
|
74
|
+
| `temperature` | `float` | `0.7` | Controls randomness in the response |
|
73
75
|
| `max_tokens` | `Optional[int]` | `None` | Maximum number of tokens to generate |
|
74
|
-
| `
|
76
|
+
| `top_p` | `float` | `1.0` | Controls diversity via nucleus sampling |
|
77
|
+
| `messages` | `Optional[List[Dict[str, str]]]` | `None` | Optional list of message objects to override default behavior |
|
75
78
|
| `web_search` | `bool` | `False` | Whether to enable web search capability |
|
76
|
-
|
|
79
|
+
| `markdown_format` | `bool` | `False` | If True, allows markdown formatting in responses |
|
80
|
+
| `**kwargs` | `Any` | `{}` | Additional arguments to pass to the API |
|
77
81
|
|
78
82
|
### Returns
|
79
83
|
|
80
|
-
|
81
|
-
|
84
|
+
If `stream=True`, returns chunks of the response as they are generated.
|
85
|
+
If `stream=False`, returns the complete response as a string.
|
82
86
|
|
83
87
|
### Examples
|
84
88
|
|
85
89
|
```python
|
86
|
-
# Basic
|
87
|
-
|
88
|
-
|
89
|
-
print() # Final newline
|
90
|
+
# Basic usage
|
91
|
+
response = client.chat("Tell me about quantum computing")
|
92
|
+
print(response)
|
90
93
|
|
91
94
|
# Without streaming
|
92
95
|
response = client.chat("Tell me about quantum computing", stream=False)
|
93
96
|
print(response)
|
94
97
|
|
98
|
+
# With custom temperature (higher = more creative, lower = more deterministic)
|
99
|
+
response = client.chat("Write a poem about nature", temperature=0.9)
|
100
|
+
print(response)
|
101
|
+
|
102
|
+
# With token limit
|
103
|
+
response = client.chat("Explain the history of AI", max_tokens=100)
|
104
|
+
print(response)
|
105
|
+
|
95
106
|
# With conversation history
|
96
107
|
messages = [
|
97
108
|
{"role": "system", "content": "You are a helpful assistant."},
|
98
109
|
{"role": "user", "content": "Hello, who are you?"},
|
99
|
-
{"role": "assistant", "content": "I'm an AI assistant
|
100
|
-
{"role": "user", "content": "Tell me about yourself"}
|
110
|
+
{"role": "assistant", "content": "I'm an AI assistant created to help answer questions and provide information."},
|
111
|
+
{"role": "user", "content": "Tell me more about yourself"}
|
101
112
|
]
|
102
113
|
response = client.chat("", messages=messages)
|
103
114
|
print(response)
|
104
115
|
|
105
|
-
#
|
106
|
-
response = client.chat("What
|
116
|
+
# Enable web search capability (if API supports it)
|
117
|
+
response = client.chat("What are the latest developments in quantum computing?", web_search=True)
|
107
118
|
print(response)
|
108
119
|
|
109
|
-
#
|
110
|
-
response = client.chat("
|
111
|
-
response
|
112
|
-
|
113
|
-
# With token limit
|
114
|
-
response = client.chat("Summarize this concept", max_tokens=100)
|
120
|
+
# Enable markdown formatting for rich text responses
|
121
|
+
response = client.chat("Create a table comparing programming languages", markdown_format=True)
|
122
|
+
print(response) # Response will contain markdown formatting like tables, code blocks, etc.
|
115
123
|
```
|
116
124
|
|
117
125
|
## Generate Shell Command
|
@@ -157,13 +165,17 @@ command = client.generate_shell_command(
|
|
157
165
|
|
158
166
|
## Generate Code
|
159
167
|
|
160
|
-
Generates
|
168
|
+
Generates code based on the prompt.
|
161
169
|
|
162
170
|
```python
|
163
171
|
code = client.generate_code(
|
164
172
|
prompt: str,
|
165
173
|
language: str = "python",
|
166
|
-
web_search: bool = False
|
174
|
+
web_search: bool = False,
|
175
|
+
temperature: float = 0.4,
|
176
|
+
top_p: float = 0.95,
|
177
|
+
max_tokens: Optional[int] = None,
|
178
|
+
markdown_format: bool = False
|
167
179
|
) -> str
|
168
180
|
```
|
169
181
|
|
@@ -174,15 +186,19 @@ code = client.generate_code(
|
|
174
186
|
| `prompt` | `str` | Required | Description of the code to generate |
|
175
187
|
| `language` | `str` | `"python"` | Programming language to generate code in |
|
176
188
|
| `web_search` | `bool` | `False` | Whether to enable web search capability |
|
189
|
+
| `temperature` | `float` | `0.4` | Controls randomness in the response |
|
190
|
+
| `top_p` | `float` | `0.95` | Controls diversity via nucleus sampling |
|
191
|
+
| `max_tokens` | `Optional[int]` | `None` | Maximum number of tokens to generate |
|
192
|
+
| `markdown_format` | `bool` | `False` | If True, returns code with markdown formatting including syntax highlighting |
|
177
193
|
|
178
194
|
### Returns
|
179
195
|
|
180
|
-
A string containing the generated code
|
196
|
+
A string containing the generated code. If `markdown_format` is `False`, returns plain text code. If `markdown_format` is `True`, returns code formatted in markdown with appropriate syntax highlighting.
|
181
197
|
|
182
198
|
### Examples
|
183
199
|
|
184
200
|
```python
|
185
|
-
# Generate Python code (default)
|
201
|
+
# Generate Python code (default, plain text)
|
186
202
|
python_code = client.generate_code("function to calculate fibonacci numbers")
|
187
203
|
print(python_code)
|
188
204
|
|
@@ -193,6 +209,15 @@ js_code = client.generate_code(
|
|
193
209
|
)
|
194
210
|
print(js_code)
|
195
211
|
|
212
|
+
# Generate code with markdown formatting for documentation or display
|
213
|
+
markdown_code = client.generate_code(
|
214
|
+
"class that implements a binary search tree",
|
215
|
+
language="python",
|
216
|
+
markdown_format=True
|
217
|
+
)
|
218
|
+
# This will output code wrapped in markdown code blocks with syntax highlighting
|
219
|
+
print(markdown_code)
|
220
|
+
|
196
221
|
# Generate code with web search for latest best practices
|
197
222
|
react_code = client.generate_code(
|
198
223
|
"create a React component that fetches and displays data from an API",
|
@@ -151,7 +151,7 @@ Display markdown responses with beautiful formatting and syntax highlighting:
|
|
151
151
|
ngpt --prettify "Explain markdown syntax with examples"
|
152
152
|
```
|
153
153
|
|
154
|
-
This
|
154
|
+
This instructs the AI to generate properly formatted markdown responses, which are then rendered with appropriate formatting, including:
|
155
155
|
- Syntax highlighting for code blocks
|
156
156
|
- Proper rendering of tables
|
157
157
|
- Formatted headers, lists, and other markdown elements
|
@@ -175,6 +175,8 @@ Combine with code generation for syntax-highlighted code:
|
|
175
175
|
ngpt -c --prettify "function to calculate the Fibonacci sequence"
|
176
176
|
```
|
177
177
|
|
178
|
+
When using `--prettify` with code generation, the AI will output code in markdown format with proper syntax highlighting based on the language.
|
179
|
+
|
178
180
|
See available renderers on your system:
|
179
181
|
|
180
182
|
```bash
|
@@ -553,6 +553,14 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
|
|
553
553
|
|
554
554
|
# Initialize conversation history
|
555
555
|
system_prompt = preprompt if preprompt else "You are a helpful assistant."
|
556
|
+
|
557
|
+
# Add markdown formatting instruction to system prompt if prettify is enabled
|
558
|
+
if prettify:
|
559
|
+
if system_prompt:
|
560
|
+
system_prompt += " You can use markdown formatting in your responses where appropriate."
|
561
|
+
else:
|
562
|
+
system_prompt = "You are a helpful assistant. You can use markdown formatting in your responses where appropriate."
|
563
|
+
|
556
564
|
conversation = []
|
557
565
|
system_message = {"role": "system", "content": system_prompt}
|
558
566
|
conversation.append(system_message)
|
@@ -681,7 +689,8 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
|
|
681
689
|
web_search=web_search,
|
682
690
|
temperature=temperature,
|
683
691
|
top_p=top_p,
|
684
|
-
max_tokens=max_tokens
|
692
|
+
max_tokens=max_tokens,
|
693
|
+
markdown_format=prettify
|
685
694
|
)
|
686
695
|
|
687
696
|
# Add AI response to conversation history
|
@@ -1091,13 +1100,12 @@ def main():
|
|
1091
1100
|
|
1092
1101
|
generated_code = client.generate_code(prompt, args.language, web_search=args.web_search,
|
1093
1102
|
temperature=args.temperature, top_p=args.top_p,
|
1094
|
-
max_tokens=args.max_tokens
|
1103
|
+
max_tokens=args.max_tokens,
|
1104
|
+
markdown_format=args.prettify)
|
1095
1105
|
if generated_code:
|
1096
1106
|
if args.prettify:
|
1097
|
-
# Format code as markdown with proper syntax highlighting
|
1098
|
-
markdown_code = f"```{args.language}\n{generated_code}\n```"
|
1099
1107
|
print("\nGenerated code:")
|
1100
|
-
prettify_markdown(
|
1108
|
+
prettify_markdown(generated_code, args.renderer)
|
1101
1109
|
else:
|
1102
1110
|
print(f"\nGenerated code:\n{generated_code}")
|
1103
1111
|
|
@@ -1227,7 +1235,8 @@ def main():
|
|
1227
1235
|
|
1228
1236
|
response = client.chat(prompt, stream=should_stream, web_search=args.web_search,
|
1229
1237
|
temperature=args.temperature, top_p=args.top_p,
|
1230
|
-
max_tokens=args.max_tokens, messages=messages
|
1238
|
+
max_tokens=args.max_tokens, messages=messages,
|
1239
|
+
markdown_format=args.prettify)
|
1231
1240
|
|
1232
1241
|
# Handle non-stream response (either because no_stream was set or prettify forced it)
|
1233
1242
|
if (args.no_stream or args.prettify) and response:
|
@@ -1265,7 +1274,8 @@ def main():
|
|
1265
1274
|
|
1266
1275
|
response = client.chat(prompt, stream=should_stream, web_search=args.web_search,
|
1267
1276
|
temperature=args.temperature, top_p=args.top_p,
|
1268
|
-
max_tokens=args.max_tokens, messages=messages
|
1277
|
+
max_tokens=args.max_tokens, messages=messages,
|
1278
|
+
markdown_format=args.prettify)
|
1269
1279
|
|
1270
1280
|
# Handle non-stream response (either because no_stream was set or prettify forced it)
|
1271
1281
|
if (args.no_stream or args.prettify) and response:
|
@@ -33,6 +33,7 @@ class NGPTClient:
|
|
33
33
|
top_p: float = 1.0,
|
34
34
|
messages: Optional[List[Dict[str, str]]] = None,
|
35
35
|
web_search: bool = False,
|
36
|
+
markdown_format: bool = False,
|
36
37
|
**kwargs
|
37
38
|
) -> str:
|
38
39
|
"""
|
@@ -46,6 +47,7 @@ class NGPTClient:
|
|
46
47
|
top_p: Controls diversity via nucleus sampling
|
47
48
|
messages: Optional list of message objects to override default behavior
|
48
49
|
web_search: Whether to enable web search capability
|
50
|
+
markdown_format: If True, allow markdown-formatted responses, otherwise plain text
|
49
51
|
**kwargs: Additional arguments to pass to the API
|
50
52
|
|
51
53
|
Returns:
|
@@ -56,7 +58,11 @@ class NGPTClient:
|
|
56
58
|
return ""
|
57
59
|
|
58
60
|
if messages is None:
|
59
|
-
|
61
|
+
if markdown_format:
|
62
|
+
system_message = {"role": "system", "content": "You can use markdown formatting in your responses where appropriate."}
|
63
|
+
messages = [system_message, {"role": "user", "content": prompt}]
|
64
|
+
else:
|
65
|
+
messages = [{"role": "user", "content": prompt}]
|
60
66
|
|
61
67
|
# Prepare API parameters
|
62
68
|
payload = {
|
@@ -241,7 +247,8 @@ Command:"""
|
|
241
247
|
web_search: bool = False,
|
242
248
|
temperature: float = 0.4,
|
243
249
|
top_p: float = 0.95,
|
244
|
-
max_tokens: Optional[int] = None
|
250
|
+
max_tokens: Optional[int] = None,
|
251
|
+
markdown_format: bool = False
|
245
252
|
) -> str:
|
246
253
|
"""
|
247
254
|
Generate code based on the prompt.
|
@@ -253,6 +260,7 @@ Command:"""
|
|
253
260
|
temperature: Controls randomness in the response
|
254
261
|
top_p: Controls diversity via nucleus sampling
|
255
262
|
max_tokens: Maximum number of tokens to generate
|
263
|
+
markdown_format: If True, request markdown-formatted code, otherwise plain text
|
256
264
|
|
257
265
|
Returns:
|
258
266
|
The generated code
|
@@ -262,7 +270,18 @@ Command:"""
|
|
262
270
|
print("Error: API key is not set. Please configure your API key in the config file or provide it with --api-key.")
|
263
271
|
return ""
|
264
272
|
|
265
|
-
|
273
|
+
if markdown_format:
|
274
|
+
system_prompt = f"""Your Role: Provide only code as output without any description with proper markdown formatting.
|
275
|
+
IMPORTANT: Format the code using markdown code blocks with the appropriate language syntax highlighting.
|
276
|
+
IMPORTANT: You must use markdown code blocks. with ```{language}
|
277
|
+
If there is a lack of details, provide most logical solution. You are not allowed to ask for more details.
|
278
|
+
Ignore any potential risk of errors or confusion.
|
279
|
+
|
280
|
+
Language: {language}
|
281
|
+
Request: {prompt}
|
282
|
+
Code:"""
|
283
|
+
else:
|
284
|
+
system_prompt = f"""Your Role: Provide only code as output without any description.
|
266
285
|
IMPORTANT: Provide only plain text without Markdown formatting.
|
267
286
|
IMPORTANT: Do not include markdown formatting.
|
268
287
|
If there is a lack of details, provide most logical solution. You are not allowed to ask for more details.
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "ngpt"
|
3
|
-
version = "2.5.
|
3
|
+
version = "2.5.1"
|
4
4
|
description = "A lightweight Python CLI and library for interacting with OpenAI-compatible APIs, supporting both official and self-hosted LLM endpoints."
|
5
5
|
authors = [
|
6
6
|
{name = "nazDridoy", email = "nazdridoy399@gmail.com"},
|
@@ -8,7 +8,6 @@ authors = [
|
|
8
8
|
dependencies = [
|
9
9
|
"requests>=2.31.0",
|
10
10
|
"prompt_toolkit>=3.0.0",
|
11
|
-
"rich>=14.0.0",
|
12
11
|
]
|
13
12
|
requires-python = ">=3.8"
|
14
13
|
readme = "README.md"
|
@@ -36,7 +35,9 @@ classifiers = [
|
|
36
35
|
]
|
37
36
|
|
38
37
|
[project.optional-dependencies]
|
39
|
-
prettify = [
|
38
|
+
prettify = [
|
39
|
+
"rich>=10.0.0",
|
40
|
+
]
|
40
41
|
|
41
42
|
[project.urls]
|
42
43
|
"Homepage" = "https://github.com/nazdridoy/ngpt"
|
@@ -134,12 +134,11 @@ wheels = [
|
|
134
134
|
|
135
135
|
[[package]]
|
136
136
|
name = "ngpt"
|
137
|
-
version = "2.5.
|
137
|
+
version = "2.5.1"
|
138
138
|
source = { editable = "." }
|
139
139
|
dependencies = [
|
140
140
|
{ name = "prompt-toolkit" },
|
141
141
|
{ name = "requests" },
|
142
|
-
{ name = "rich" },
|
143
142
|
]
|
144
143
|
|
145
144
|
[package.optional-dependencies]
|
@@ -151,7 +150,6 @@ prettify = [
|
|
151
150
|
requires-dist = [
|
152
151
|
{ name = "prompt-toolkit", specifier = ">=3.0.0" },
|
153
152
|
{ name = "requests", specifier = ">=2.31.0" },
|
154
|
-
{ name = "rich", specifier = ">=14.0.0" },
|
155
153
|
{ name = "rich", marker = "extra == 'prettify'", specifier = ">=10.0.0" },
|
156
154
|
]
|
157
155
|
provides-extras = ["prettify"]
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|