ngpt 2.5.0__tar.gz → 2.6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {ngpt-2.5.0 → ngpt-2.6.0}/PKG-INFO +15 -5
  2. {ngpt-2.5.0 → ngpt-2.6.0}/README.md +11 -0
  3. {ngpt-2.5.0 → ngpt-2.6.0}/docs/api/client.md +49 -24
  4. {ngpt-2.5.0 → ngpt-2.6.0}/docs/installation.md +20 -6
  5. {ngpt-2.5.0 → ngpt-2.6.0}/docs/usage/cli_usage.md +3 -1
  6. {ngpt-2.5.0 → ngpt-2.6.0}/ngpt/cli.py +23 -11
  7. {ngpt-2.5.0 → ngpt-2.6.0}/ngpt/client.py +22 -3
  8. {ngpt-2.5.0 → ngpt-2.6.0}/pyproject.toml +5 -4
  9. {ngpt-2.5.0 → ngpt-2.6.0}/uv.lock +6 -8
  10. {ngpt-2.5.0 → ngpt-2.6.0}/.github/workflows/python-publish.yml +0 -0
  11. {ngpt-2.5.0 → ngpt-2.6.0}/.gitignore +0 -0
  12. {ngpt-2.5.0 → ngpt-2.6.0}/.python-version +0 -0
  13. {ngpt-2.5.0 → ngpt-2.6.0}/COMMIT_GUIDELINES.md +0 -0
  14. {ngpt-2.5.0 → ngpt-2.6.0}/CONTRIBUTING.md +0 -0
  15. {ngpt-2.5.0 → ngpt-2.6.0}/LICENSE +0 -0
  16. {ngpt-2.5.0 → ngpt-2.6.0}/docs/CONTRIBUTING.md +0 -0
  17. {ngpt-2.5.0 → ngpt-2.6.0}/docs/LICENSE.md +0 -0
  18. {ngpt-2.5.0 → ngpt-2.6.0}/docs/README.md +0 -0
  19. {ngpt-2.5.0 → ngpt-2.6.0}/docs/_config.yml +0 -0
  20. {ngpt-2.5.0 → ngpt-2.6.0}/docs/api/README.md +0 -0
  21. {ngpt-2.5.0 → ngpt-2.6.0}/docs/api/config.md +0 -0
  22. {ngpt-2.5.0 → ngpt-2.6.0}/docs/assets/css/style.scss +0 -0
  23. {ngpt-2.5.0 → ngpt-2.6.0}/docs/configuration.md +0 -0
  24. {ngpt-2.5.0 → ngpt-2.6.0}/docs/examples/README.md +0 -0
  25. {ngpt-2.5.0 → ngpt-2.6.0}/docs/examples/advanced.md +0 -0
  26. {ngpt-2.5.0 → ngpt-2.6.0}/docs/examples/basic.md +0 -0
  27. {ngpt-2.5.0 → ngpt-2.6.0}/docs/examples/integrations.md +0 -0
  28. {ngpt-2.5.0 → ngpt-2.6.0}/docs/overview.md +0 -0
  29. {ngpt-2.5.0 → ngpt-2.6.0}/docs/usage/README.md +0 -0
  30. {ngpt-2.5.0 → ngpt-2.6.0}/docs/usage/library_usage.md +0 -0
  31. {ngpt-2.5.0 → ngpt-2.6.0}/ngpt/__init__.py +0 -0
  32. {ngpt-2.5.0 → ngpt-2.6.0}/ngpt/config.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ngpt
3
- Version: 2.5.0
3
+ Version: 2.6.0
4
4
  Summary: A lightweight Python CLI and library for interacting with OpenAI-compatible APIs, supporting both official and self-hosted LLM endpoints.
5
5
  Project-URL: Homepage, https://github.com/nazdridoy/ngpt
6
6
  Project-URL: Repository, https://github.com/nazdridoy/ngpt
@@ -28,11 +28,10 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
28
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
29
29
  Classifier: Topic :: Utilities
30
30
  Requires-Python: >=3.8
31
- Requires-Dist: prompt-toolkit>=3.0.0
32
31
  Requires-Dist: requests>=2.31.0
33
- Requires-Dist: rich>=14.0.0
34
- Provides-Extra: prettify
35
- Requires-Dist: rich>=10.0.0; extra == 'prettify'
32
+ Provides-Extra: full
33
+ Requires-Dist: prompt-toolkit>=3.0.0; extra == 'full'
34
+ Requires-Dist: rich>=10.0.0; extra == 'full'
36
35
  Description-Content-Type: text/markdown
37
36
 
38
37
  # nGPT
@@ -67,6 +66,9 @@ A lightweight Python CLI and library for interacting with OpenAI-compatible APIs
67
66
  # Install
68
67
  pip install ngpt
69
68
 
69
+ # Install with additional features
70
+ pip install "ngpt[full]"
71
+
70
72
  # Chat with default settings
71
73
  ngpt "Tell me about quantum computing"
72
74
 
@@ -137,11 +139,19 @@ Key documentation sections:
137
139
  ## Installation
138
140
 
139
141
  ```bash
142
+ # Basic installation (minimal dependencies)
140
143
  pip install ngpt
144
+
145
+ # Full installation with all features (recommended)
146
+ pip install "ngpt[full]"
141
147
  ```
142
148
 
143
149
  Requires Python 3.8 or newer.
144
150
 
151
+ The full installation includes:
152
+ - Enhanced markdown rendering with syntax highlighting
153
+ - Improved interactive input experience with multiline editing
154
+
145
155
  For detailed installation instructions, see the [Installation Guide](https://nazdridoy.github.io/ngpt/installation.html).
146
156
 
147
157
  ## Usage
@@ -30,6 +30,9 @@ A lightweight Python CLI and library for interacting with OpenAI-compatible APIs
30
30
  # Install
31
31
  pip install ngpt
32
32
 
33
+ # Install with additional features
34
+ pip install "ngpt[full]"
35
+
33
36
  # Chat with default settings
34
37
  ngpt "Tell me about quantum computing"
35
38
 
@@ -100,11 +103,19 @@ Key documentation sections:
100
103
  ## Installation
101
104
 
102
105
  ```bash
106
+ # Basic installation (minimal dependencies)
103
107
  pip install ngpt
108
+
109
+ # Full installation with all features (recommended)
110
+ pip install "ngpt[full]"
104
111
  ```
105
112
 
106
113
  Requires Python 3.8 or newer.
107
114
 
115
+ The full installation includes:
116
+ - Enhanced markdown rendering with syntax highlighting
117
+ - Improved interactive input experience with multiline editing
118
+
108
119
  For detailed installation instructions, see the [Installation Guide](https://nazdridoy.github.io/ngpt/installation.html).
109
120
 
110
121
  ## Usage
@@ -49,7 +49,7 @@ client = NGPTClient(
49
49
 
50
50
  ## Chat Method
51
51
 
52
- The primary method for interacting with the AI model.
52
+ The main method for interacting with the AI model.
53
53
 
54
54
  ```python
55
55
  response = client.chat(
@@ -57,8 +57,10 @@ response = client.chat(
57
57
  stream: bool = True,
58
58
  temperature: float = 0.7,
59
59
  max_tokens: Optional[int] = None,
60
+ top_p: float = 1.0,
60
61
  messages: Optional[List[Dict[str, str]]] = None,
61
62
  web_search: bool = False,
63
+ markdown_format: bool = False,
62
64
  **kwargs
63
65
  ) -> str
64
66
  ```
@@ -69,49 +71,55 @@ response = client.chat(
69
71
  |-----------|------|---------|-------------|
70
72
  | `prompt` | `str` | Required | The user's message |
71
73
  | `stream` | `bool` | `True` | Whether to stream the response |
72
- | `temperature` | `float` | `0.7` | Controls randomness in the response (0.0-1.0) |
74
+ | `temperature` | `float` | `0.7` | Controls randomness in the response |
73
75
  | `max_tokens` | `Optional[int]` | `None` | Maximum number of tokens to generate |
74
- | `messages` | `Optional[List[Dict[str, str]]]` | `None` | Optional list of message objects for conversation history |
76
+ | `top_p` | `float` | `1.0` | Controls diversity via nucleus sampling |
77
+ | `messages` | `Optional[List[Dict[str, str]]]` | `None` | Optional list of message objects to override default behavior |
75
78
  | `web_search` | `bool` | `False` | Whether to enable web search capability |
76
- | `**kwargs` | | | Additional arguments to pass to the API |
79
+ | `markdown_format` | `bool` | `False` | If True, allows markdown formatting in responses |
80
+ | `**kwargs` | `Any` | `{}` | Additional arguments to pass to the API |
77
81
 
78
82
  ### Returns
79
83
 
80
- - When `stream=False`: A string containing the complete response
81
- - When `stream=True`: A generator yielding response chunks that can be iterated over
84
+ If `stream=True`, returns chunks of the response as they are generated.
85
+ If `stream=False`, returns the complete response as a string.
82
86
 
83
87
  ### Examples
84
88
 
85
89
  ```python
86
- # Basic chat with streaming
87
- for chunk in client.chat("Tell me about quantum computing"):
88
- print(chunk, end="", flush=True)
89
- print() # Final newline
90
+ # Basic usage
91
+ response = client.chat("Tell me about quantum computing")
92
+ print(response)
90
93
 
91
94
  # Without streaming
92
95
  response = client.chat("Tell me about quantum computing", stream=False)
93
96
  print(response)
94
97
 
98
+ # With custom temperature (higher = more creative, lower = more deterministic)
99
+ response = client.chat("Write a poem about nature", temperature=0.9)
100
+ print(response)
101
+
102
+ # With token limit
103
+ response = client.chat("Explain the history of AI", max_tokens=100)
104
+ print(response)
105
+
95
106
  # With conversation history
96
107
  messages = [
97
108
  {"role": "system", "content": "You are a helpful assistant."},
98
109
  {"role": "user", "content": "Hello, who are you?"},
99
- {"role": "assistant", "content": "I'm an AI assistant. How can I help you today?"},
100
- {"role": "user", "content": "Tell me about yourself"}
110
+ {"role": "assistant", "content": "I'm an AI assistant created to help answer questions and provide information."},
111
+ {"role": "user", "content": "Tell me more about yourself"}
101
112
  ]
102
113
  response = client.chat("", messages=messages)
103
114
  print(response)
104
115
 
105
- # With web search
106
- response = client.chat("What's the latest news about AI?", web_search=True)
116
+ # Enable web search capability (if API supports it)
117
+ response = client.chat("What are the latest developments in quantum computing?", web_search=True)
107
118
  print(response)
108
119
 
109
- # With temperature control
110
- response = client.chat("Write a creative story", temperature=0.9) # More random
111
- response = client.chat("Explain how a CPU works", temperature=0.2) # More focused
112
-
113
- # With token limit
114
- response = client.chat("Summarize this concept", max_tokens=100)
120
+ # Enable markdown formatting for rich text responses
121
+ response = client.chat("Create a table comparing programming languages", markdown_format=True)
122
+ print(response) # Response will contain markdown formatting like tables, code blocks, etc.
115
123
  ```
116
124
 
117
125
  ## Generate Shell Command
@@ -157,13 +165,17 @@ command = client.generate_shell_command(
157
165
 
158
166
  ## Generate Code
159
167
 
160
- Generates clean code based on the prompt, without markdown formatting or explanations.
168
+ Generates code based on the prompt.
161
169
 
162
170
  ```python
163
171
  code = client.generate_code(
164
172
  prompt: str,
165
173
  language: str = "python",
166
- web_search: bool = False
174
+ web_search: bool = False,
175
+ temperature: float = 0.4,
176
+ top_p: float = 0.95,
177
+ max_tokens: Optional[int] = None,
178
+ markdown_format: bool = False
167
179
  ) -> str
168
180
  ```
169
181
 
@@ -174,15 +186,19 @@ code = client.generate_code(
174
186
  | `prompt` | `str` | Required | Description of the code to generate |
175
187
  | `language` | `str` | `"python"` | Programming language to generate code in |
176
188
  | `web_search` | `bool` | `False` | Whether to enable web search capability |
189
+ | `temperature` | `float` | `0.4` | Controls randomness in the response |
190
+ | `top_p` | `float` | `0.95` | Controls diversity via nucleus sampling |
191
+ | `max_tokens` | `Optional[int]` | `None` | Maximum number of tokens to generate |
192
+ | `markdown_format` | `bool` | `False` | If True, returns code with markdown formatting including syntax highlighting |
177
193
 
178
194
  ### Returns
179
195
 
180
- A string containing the generated code without any markdown formatting or explanations.
196
+ A string containing the generated code. If `markdown_format` is `False`, returns plain text code. If `markdown_format` is `True`, returns code formatted in markdown with appropriate syntax highlighting.
181
197
 
182
198
  ### Examples
183
199
 
184
200
  ```python
185
- # Generate Python code (default)
201
+ # Generate Python code (default, plain text)
186
202
  python_code = client.generate_code("function to calculate fibonacci numbers")
187
203
  print(python_code)
188
204
 
@@ -193,6 +209,15 @@ js_code = client.generate_code(
193
209
  )
194
210
  print(js_code)
195
211
 
212
+ # Generate code with markdown formatting for documentation or display
213
+ markdown_code = client.generate_code(
214
+ "class that implements a binary search tree",
215
+ language="python",
216
+ markdown_format=True
217
+ )
218
+ # This will output code wrapped in markdown code blocks with syntax highlighting
219
+ print(markdown_code)
220
+
196
221
  # Generate code with web search for latest best practices
197
222
  react_code = client.generate_code(
198
223
  "create a React component that fetches and displays data from an API",
@@ -6,11 +6,11 @@ There are several ways to install nGPT depending on your needs and environment.
6
6
 
7
7
  - Python 3.8 or newer
8
8
  - `requests` library (automatically installed as a dependency)
9
- - `prompt_toolkit` library (automatically installed as a dependency)
10
9
 
11
10
  ## Optional Dependencies
12
11
 
13
12
  - `rich` library - For enhanced markdown rendering with syntax highlighting
13
+ - `prompt_toolkit` library - For improved interactive input experience with multiline editing
14
14
 
15
15
  ## Installing from PyPI (Recommended)
16
16
 
@@ -20,18 +20,20 @@ The simplest way to install nGPT is through the Python Package Index (PyPI):
20
20
  pip install ngpt
21
21
  ```
22
22
 
23
- This will install the latest stable release of nGPT and all its dependencies.
23
+ This will install the latest stable release of nGPT with basic functionality.
24
24
 
25
- For markdown rendering capabilities, install with the prettify extra:
25
+ For additional capabilities like markdown rendering, syntax highlighting, and enhanced interactive input experience, install with the full extras:
26
26
 
27
27
  ```bash
28
- pip install ngpt[prettify]
28
+ pip install "ngpt[full]"
29
29
  ```
30
30
 
31
- Alternatively, you can install the optional dependency separately:
31
+ Note that quotes around the package name are required due to the square brackets.
32
+
33
+ Alternatively, you can install the optional dependencies separately:
32
34
 
33
35
  ```bash
34
- pip install rich
36
+ pip install rich prompt_toolkit
35
37
  ```
36
38
 
37
39
  ## Installing in a Virtual Environment
@@ -52,6 +54,9 @@ source ngpt-env/bin/activate
52
54
 
53
55
  # Install nGPT
54
56
  pip install ngpt
57
+
58
+ # Or with all features
59
+ pip install "ngpt[full]"
55
60
  ```
56
61
 
57
62
  ### Using conda
@@ -65,6 +70,9 @@ conda activate ngpt-env
65
70
 
66
71
  # Install nGPT
67
72
  pip install ngpt
73
+
74
+ # Or with all features
75
+ pip install "ngpt[full]"
68
76
  ```
69
77
 
70
78
  ## Installing from Source
@@ -80,6 +88,9 @@ cd ngpt
80
88
 
81
89
  # Install the package in development mode
82
90
  pip install -e .
91
+
92
+ # Or with all features
93
+ pip install -e ".[full]"
83
94
  ```
84
95
 
85
96
  ## Upgrading
@@ -88,6 +99,9 @@ To upgrade to the latest version:
88
99
 
89
100
  ```bash
90
101
  pip install --upgrade ngpt
102
+
103
+ # Or with all features
104
+ pip install --upgrade "ngpt[full]"
91
105
  ```
92
106
 
93
107
  ## Verifying the Installation
@@ -151,7 +151,7 @@ Display markdown responses with beautiful formatting and syntax highlighting:
151
151
  ngpt --prettify "Explain markdown syntax with examples"
152
152
  ```
153
153
 
154
- This renders the AI's response with proper markdown formatting, including:
154
+ This instructs the AI to generate properly formatted markdown responses, which are then rendered with appropriate formatting, including:
155
155
  - Syntax highlighting for code blocks
156
156
  - Proper rendering of tables
157
157
  - Formatted headers, lists, and other markdown elements
@@ -175,6 +175,8 @@ Combine with code generation for syntax-highlighted code:
175
175
  ngpt -c --prettify "function to calculate the Fibonacci sequence"
176
176
  ```
177
177
 
178
+ When using `--prettify` with code generation, the AI will output code in markdown format with proper syntax highlighting based on the language.
179
+
178
180
  See available renderers on your system:
179
181
 
180
182
  ```bash
@@ -115,10 +115,11 @@ def show_available_renderers():
115
115
  if HAS_RICH:
116
116
  print(f" {COLORS['green']}✓ Rich{COLORS['reset']} - Python library for terminal formatting (Recommended)")
117
117
  else:
118
- print(f" {COLORS['yellow']}✗ Rich{COLORS['reset']} - Not installed (pip install rich)")
118
+ print(f" {COLORS['yellow']}✗ Rich{COLORS['reset']} - Not installed (pip install \"ngpt[full]\" or pip install rich)")
119
119
 
120
120
  if not HAS_GLOW and not HAS_RICH:
121
121
  print(f"\n{COLORS['yellow']}To enable prettified markdown output, install one of the above renderers.{COLORS['reset']}")
122
+ print(f"{COLORS['yellow']}For Rich: pip install \"ngpt[full]\" or pip install rich{COLORS['reset']}")
122
123
  else:
123
124
  renderers = []
124
125
  if HAS_RICH:
@@ -148,11 +149,11 @@ def warn_if_no_markdown_renderer(renderer='auto'):
148
149
 
149
150
  if renderer == 'auto':
150
151
  print(f"{COLORS['yellow']}Warning: No markdown rendering library available.{COLORS['reset']}")
151
- print(f"{COLORS['yellow']}Install 'rich' package with: pip install rich{COLORS['reset']}")
152
+ print(f"{COLORS['yellow']}Install with: pip install \"ngpt[full]\"{COLORS['reset']}")
152
153
  print(f"{COLORS['yellow']}Or install 'glow' from https://github.com/charmbracelet/glow{COLORS['reset']}")
153
154
  elif renderer == 'rich':
154
155
  print(f"{COLORS['yellow']}Warning: Rich is not available.{COLORS['reset']}")
155
- print(f"{COLORS['yellow']}Install with: pip install rich{COLORS['reset']}")
156
+ print(f"{COLORS['yellow']}Install with: pip install \"ngpt[full]\" or pip install rich{COLORS['reset']}")
156
157
  elif renderer == 'glow':
157
158
  print(f"{COLORS['yellow']}Warning: Glow is not available.{COLORS['reset']}")
158
159
  print(f"{COLORS['yellow']}Install from https://github.com/charmbracelet/glow{COLORS['reset']}")
@@ -219,7 +220,8 @@ def prettify_markdown(text, renderer='auto'):
219
220
  # Use rich for rendering
220
221
  elif renderer == 'rich':
221
222
  if not HAS_RICH:
222
- print(f"{COLORS['yellow']}Warning: Rich is not available. Install with: pip install rich{COLORS['reset']}")
223
+ print(f"{COLORS['yellow']}Warning: Rich is not available.{COLORS['reset']}")
224
+ print(f"{COLORS['yellow']}Install with: pip install \"ngpt[full]\" or pip install rich{COLORS['reset']}")
223
225
  # Fall back to glow if available
224
226
  if HAS_GLOW:
225
227
  print(f"{COLORS['yellow']}Falling back to Glow renderer.{COLORS['reset']}")
@@ -553,6 +555,14 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
553
555
 
554
556
  # Initialize conversation history
555
557
  system_prompt = preprompt if preprompt else "You are a helpful assistant."
558
+
559
+ # Add markdown formatting instruction to system prompt if prettify is enabled
560
+ if prettify:
561
+ if system_prompt:
562
+ system_prompt += " You can use markdown formatting in your responses where appropriate."
563
+ else:
564
+ system_prompt = "You are a helpful assistant. You can use markdown formatting in your responses where appropriate."
565
+
556
566
  conversation = []
557
567
  system_message = {"role": "system", "content": system_prompt}
558
568
  conversation.append(system_message)
@@ -681,7 +691,8 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
681
691
  web_search=web_search,
682
692
  temperature=temperature,
683
693
  top_p=top_p,
684
- max_tokens=max_tokens
694
+ max_tokens=max_tokens,
695
+ markdown_format=prettify
685
696
  )
686
697
 
687
698
  # Add AI response to conversation history
@@ -1091,13 +1102,12 @@ def main():
1091
1102
 
1092
1103
  generated_code = client.generate_code(prompt, args.language, web_search=args.web_search,
1093
1104
  temperature=args.temperature, top_p=args.top_p,
1094
- max_tokens=args.max_tokens)
1105
+ max_tokens=args.max_tokens,
1106
+ markdown_format=args.prettify)
1095
1107
  if generated_code:
1096
1108
  if args.prettify:
1097
- # Format code as markdown with proper syntax highlighting
1098
- markdown_code = f"```{args.language}\n{generated_code}\n```"
1099
1109
  print("\nGenerated code:")
1100
- prettify_markdown(markdown_code, args.renderer)
1110
+ prettify_markdown(generated_code, args.renderer)
1101
1111
  else:
1102
1112
  print(f"\nGenerated code:\n{generated_code}")
1103
1113
 
@@ -1227,7 +1237,8 @@ def main():
1227
1237
 
1228
1238
  response = client.chat(prompt, stream=should_stream, web_search=args.web_search,
1229
1239
  temperature=args.temperature, top_p=args.top_p,
1230
- max_tokens=args.max_tokens, messages=messages)
1240
+ max_tokens=args.max_tokens, messages=messages,
1241
+ markdown_format=args.prettify)
1231
1242
 
1232
1243
  # Handle non-stream response (either because no_stream was set or prettify forced it)
1233
1244
  if (args.no_stream or args.prettify) and response:
@@ -1265,7 +1276,8 @@ def main():
1265
1276
 
1266
1277
  response = client.chat(prompt, stream=should_stream, web_search=args.web_search,
1267
1278
  temperature=args.temperature, top_p=args.top_p,
1268
- max_tokens=args.max_tokens, messages=messages)
1279
+ max_tokens=args.max_tokens, messages=messages,
1280
+ markdown_format=args.prettify)
1269
1281
 
1270
1282
  # Handle non-stream response (either because no_stream was set or prettify forced it)
1271
1283
  if (args.no_stream or args.prettify) and response:
@@ -33,6 +33,7 @@ class NGPTClient:
33
33
  top_p: float = 1.0,
34
34
  messages: Optional[List[Dict[str, str]]] = None,
35
35
  web_search: bool = False,
36
+ markdown_format: bool = False,
36
37
  **kwargs
37
38
  ) -> str:
38
39
  """
@@ -46,6 +47,7 @@ class NGPTClient:
46
47
  top_p: Controls diversity via nucleus sampling
47
48
  messages: Optional list of message objects to override default behavior
48
49
  web_search: Whether to enable web search capability
50
+ markdown_format: If True, allow markdown-formatted responses, otherwise plain text
49
51
  **kwargs: Additional arguments to pass to the API
50
52
 
51
53
  Returns:
@@ -56,7 +58,11 @@ class NGPTClient:
56
58
  return ""
57
59
 
58
60
  if messages is None:
59
- messages = [{"role": "user", "content": prompt}]
61
+ if markdown_format:
62
+ system_message = {"role": "system", "content": "You can use markdown formatting in your responses where appropriate."}
63
+ messages = [system_message, {"role": "user", "content": prompt}]
64
+ else:
65
+ messages = [{"role": "user", "content": prompt}]
60
66
 
61
67
  # Prepare API parameters
62
68
  payload = {
@@ -241,7 +247,8 @@ Command:"""
241
247
  web_search: bool = False,
242
248
  temperature: float = 0.4,
243
249
  top_p: float = 0.95,
244
- max_tokens: Optional[int] = None
250
+ max_tokens: Optional[int] = None,
251
+ markdown_format: bool = False
245
252
  ) -> str:
246
253
  """
247
254
  Generate code based on the prompt.
@@ -253,6 +260,7 @@ Command:"""
253
260
  temperature: Controls randomness in the response
254
261
  top_p: Controls diversity via nucleus sampling
255
262
  max_tokens: Maximum number of tokens to generate
263
+ markdown_format: If True, request markdown-formatted code, otherwise plain text
256
264
 
257
265
  Returns:
258
266
  The generated code
@@ -262,7 +270,18 @@ Command:"""
262
270
  print("Error: API key is not set. Please configure your API key in the config file or provide it with --api-key.")
263
271
  return ""
264
272
 
265
- system_prompt = f"""Your Role: Provide only code as output without any description.
273
+ if markdown_format:
274
+ system_prompt = f"""Your Role: Provide only code as output without any description with proper markdown formatting.
275
+ IMPORTANT: Format the code using markdown code blocks with the appropriate language syntax highlighting.
276
+ IMPORTANT: You must use markdown code blocks. with ```{language}
277
+ If there is a lack of details, provide most logical solution. You are not allowed to ask for more details.
278
+ Ignore any potential risk of errors or confusion.
279
+
280
+ Language: {language}
281
+ Request: {prompt}
282
+ Code:"""
283
+ else:
284
+ system_prompt = f"""Your Role: Provide only code as output without any description.
266
285
  IMPORTANT: Provide only plain text without Markdown formatting.
267
286
  IMPORTANT: Do not include markdown formatting.
268
287
  If there is a lack of details, provide most logical solution. You are not allowed to ask for more details.
@@ -1,14 +1,12 @@
1
1
  [project]
2
2
  name = "ngpt"
3
- version = "2.5.0"
3
+ version = "2.6.0"
4
4
  description = "A lightweight Python CLI and library for interacting with OpenAI-compatible APIs, supporting both official and self-hosted LLM endpoints."
5
5
  authors = [
6
6
  {name = "nazDridoy", email = "nazdridoy399@gmail.com"},
7
7
  ]
8
8
  dependencies = [
9
9
  "requests>=2.31.0",
10
- "prompt_toolkit>=3.0.0",
11
- "rich>=14.0.0",
12
10
  ]
13
11
  requires-python = ">=3.8"
14
12
  readme = "README.md"
@@ -36,7 +34,10 @@ classifiers = [
36
34
  ]
37
35
 
38
36
  [project.optional-dependencies]
39
- prettify = ["rich>=10.0.0"]
37
+ full = [
38
+ "rich>=10.0.0",
39
+ "prompt_toolkit>=3.0.0",
40
+ ]
40
41
 
41
42
  [project.urls]
42
43
  "Homepage" = "https://github.com/nazdridoy/ngpt"
@@ -134,27 +134,25 @@ wheels = [
134
134
 
135
135
  [[package]]
136
136
  name = "ngpt"
137
- version = "2.5.0"
137
+ version = "2.6.0"
138
138
  source = { editable = "." }
139
139
  dependencies = [
140
- { name = "prompt-toolkit" },
141
140
  { name = "requests" },
142
- { name = "rich" },
143
141
  ]
144
142
 
145
143
  [package.optional-dependencies]
146
- prettify = [
144
+ full = [
145
+ { name = "prompt-toolkit" },
147
146
  { name = "rich" },
148
147
  ]
149
148
 
150
149
  [package.metadata]
151
150
  requires-dist = [
152
- { name = "prompt-toolkit", specifier = ">=3.0.0" },
151
+ { name = "prompt-toolkit", marker = "extra == 'full'", specifier = ">=3.0.0" },
153
152
  { name = "requests", specifier = ">=2.31.0" },
154
- { name = "rich", specifier = ">=14.0.0" },
155
- { name = "rich", marker = "extra == 'prettify'", specifier = ">=10.0.0" },
153
+ { name = "rich", marker = "extra == 'full'", specifier = ">=10.0.0" },
156
154
  ]
157
- provides-extras = ["prettify"]
155
+ provides-extras = ["full"]
158
156
 
159
157
  [[package]]
160
158
  name = "prompt-toolkit"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes