ngpt 3.4.2__tar.gz → 3.4.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. {ngpt-3.4.2 → ngpt-3.4.4}/PKG-INFO +1 -1
  2. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/main.py +1 -1
  3. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/modes/__init__.py +2 -1
  4. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/modes/code.py +129 -11
  5. {ngpt-3.4.2/ngpt/cli → ngpt-3.4.4/ngpt/cli/modes}/interactive.py +3 -3
  6. ngpt-3.4.4/ngpt/cli/modes/shell.py +204 -0
  7. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/client.py +0 -142
  8. {ngpt-3.4.2 → ngpt-3.4.4}/pyproject.toml +1 -1
  9. {ngpt-3.4.2 → ngpt-3.4.4}/uv.lock +1 -1
  10. ngpt-3.4.2/ngpt/cli/modes/shell.py +0 -113
  11. {ngpt-3.4.2 → ngpt-3.4.4}/.github/workflows/aur-publish.yml +0 -0
  12. {ngpt-3.4.2 → ngpt-3.4.4}/.github/workflows/python-publish.yml +0 -0
  13. {ngpt-3.4.2 → ngpt-3.4.4}/.gitignore +0 -0
  14. {ngpt-3.4.2 → ngpt-3.4.4}/.python-version +0 -0
  15. {ngpt-3.4.2 → ngpt-3.4.4}/COMMIT_GUIDELINES.md +0 -0
  16. {ngpt-3.4.2 → ngpt-3.4.4}/CONTRIBUTING.md +0 -0
  17. {ngpt-3.4.2 → ngpt-3.4.4}/LICENSE +0 -0
  18. {ngpt-3.4.2 → ngpt-3.4.4}/PKGBUILD +0 -0
  19. {ngpt-3.4.2 → ngpt-3.4.4}/README.md +0 -0
  20. {ngpt-3.4.2 → ngpt-3.4.4}/docs/CONTRIBUTING.md +0 -0
  21. {ngpt-3.4.2 → ngpt-3.4.4}/docs/LICENSE.md +0 -0
  22. {ngpt-3.4.2 → ngpt-3.4.4}/docs/README.md +0 -0
  23. {ngpt-3.4.2 → ngpt-3.4.4}/docs/_config.yml +0 -0
  24. {ngpt-3.4.2 → ngpt-3.4.4}/docs/api/README.md +0 -0
  25. {ngpt-3.4.2 → ngpt-3.4.4}/docs/api/cli.md +0 -0
  26. {ngpt-3.4.2 → ngpt-3.4.4}/docs/api/cli_config.md +0 -0
  27. {ngpt-3.4.2 → ngpt-3.4.4}/docs/api/client.md +0 -0
  28. {ngpt-3.4.2 → ngpt-3.4.4}/docs/api/config.md +0 -0
  29. {ngpt-3.4.2 → ngpt-3.4.4}/docs/api/logging.md +0 -0
  30. {ngpt-3.4.2 → ngpt-3.4.4}/docs/assets/css/style.scss +0 -0
  31. {ngpt-3.4.2 → ngpt-3.4.4}/docs/configuration.md +0 -0
  32. {ngpt-3.4.2 → ngpt-3.4.4}/docs/examples/README.md +0 -0
  33. {ngpt-3.4.2 → ngpt-3.4.4}/docs/examples/advanced.md +0 -0
  34. {ngpt-3.4.2 → ngpt-3.4.4}/docs/examples/basic.md +0 -0
  35. {ngpt-3.4.2 → ngpt-3.4.4}/docs/examples/cli_components.md +0 -0
  36. {ngpt-3.4.2 → ngpt-3.4.4}/docs/examples/integrations.md +0 -0
  37. {ngpt-3.4.2 → ngpt-3.4.4}/docs/installation.md +0 -0
  38. {ngpt-3.4.2 → ngpt-3.4.4}/docs/overview.md +0 -0
  39. {ngpt-3.4.2 → ngpt-3.4.4}/docs/usage/README.md +0 -0
  40. {ngpt-3.4.2 → ngpt-3.4.4}/docs/usage/cli_config.md +0 -0
  41. {ngpt-3.4.2 → ngpt-3.4.4}/docs/usage/cli_framework.md +0 -0
  42. {ngpt-3.4.2 → ngpt-3.4.4}/docs/usage/cli_usage.md +0 -0
  43. {ngpt-3.4.2 → ngpt-3.4.4}/docs/usage/gitcommsg.md +0 -0
  44. {ngpt-3.4.2 → ngpt-3.4.4}/docs/usage/library_usage.md +0 -0
  45. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/__init__.py +0 -0
  46. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/__main__.py +0 -0
  47. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/__init__.py +0 -0
  48. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/args.py +0 -0
  49. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/config_manager.py +0 -0
  50. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/formatters.py +0 -0
  51. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/modes/chat.py +0 -0
  52. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/modes/gitcommsg.py +0 -0
  53. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/modes/rewrite.py +0 -0
  54. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/modes/text.py +0 -0
  55. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/renderers.py +0 -0
  56. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/cli/ui.py +0 -0
  57. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/utils/__init__.py +0 -0
  58. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/utils/cli_config.py +0 -0
  59. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/utils/config.py +0 -0
  60. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/utils/log.py +0 -0
  61. {ngpt-3.4.2 → ngpt-3.4.4}/ngpt/utils/web_search.py +0 -0
  62. {ngpt-3.4.2 → ngpt-3.4.4}/wiki.md +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ngpt
3
- Version: 3.4.2
3
+ Version: 3.4.4
4
4
  Summary: Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible Python library. Works with OpenAI, Ollama, Groq, Claude, Gemini, and any OpenAI-compatible API.
5
5
  Project-URL: Homepage, https://github.com/nazdridoy/ngpt
6
6
  Project-URL: Repository, https://github.com/nazdridoy/ngpt
@@ -18,7 +18,7 @@ from .. import __version__
18
18
  from .formatters import COLORS
19
19
  from .renderers import show_available_renderers
20
20
  from .config_manager import check_config
21
- from .interactive import interactive_chat_session
21
+ from .modes.interactive import interactive_chat_session
22
22
  from .modes.chat import chat_mode
23
23
  from .modes.code import code_mode
24
24
  from .modes.shell import shell_mode
@@ -4,5 +4,6 @@ from .shell import shell_mode
4
4
  from .text import text_mode
5
5
  from .rewrite import rewrite_mode
6
6
  from .gitcommsg import gitcommsg_mode
7
+ from .interactive import interactive_chat_session
7
8
 
8
- __all__ = ['chat_mode', 'code_mode', 'shell_mode', 'text_mode', 'rewrite_mode', 'gitcommsg_mode']
9
+ __all__ = ['chat_mode', 'code_mode', 'shell_mode', 'text_mode', 'rewrite_mode', 'gitcommsg_mode', 'interactive_chat_session']
@@ -5,6 +5,80 @@ from ...utils import enhance_prompt_with_web_search
5
5
  import sys
6
6
  import threading
7
7
 
8
+ # System prompt for code generation with markdown formatting
9
+ CODE_SYSTEM_PROMPT_MARKDOWN = """Your Role: Provide only code as output without any description with proper markdown formatting.
10
+ IMPORTANT: Format the code using markdown code blocks with the appropriate language syntax highlighting.
11
+ IMPORTANT: You must use markdown code blocks. with ```{language}
12
+ If there is a lack of details, provide most logical solution. You are not allowed to ask for more details.
13
+ Ignore any potential risk of errors or confusion.
14
+
15
+ Language: {language}
16
+ Request: {prompt}
17
+ Code:"""
18
+
19
+ # System prompt for code generation without markdown
20
+ CODE_SYSTEM_PROMPT_PLAINTEXT = """Your Role: Provide only code as output without any description.
21
+ IMPORTANT: Provide only plain text without Markdown formatting.
22
+ IMPORTANT: Do not include markdown formatting.
23
+ If there is a lack of details, provide most logical solution. You are not allowed to ask for more details.
24
+ Ignore any potential risk of errors or confusion.
25
+
26
+ Language: {language}
27
+ Request: {prompt}
28
+ Code:"""
29
+
30
+ # System prompt to use when preprompt is provided (with markdown)
31
+ CODE_PREPROMPT_MARKDOWN = """
32
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
33
+ !!! CRITICAL USER PREPROMPT !!!
34
+ !!! THIS OVERRIDES ALL OTHER INSTRUCTIONS IN THIS PROMPT !!!
35
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
36
+
37
+ The following preprompt from the user COMPLETELY OVERRIDES ANY other instructions below.
38
+ The preprompt MUST be followed EXACTLY AS WRITTEN:
39
+
40
+ >>> {preprompt} <<<
41
+
42
+ ^^ THIS PREPROMPT HAS ABSOLUTE AND COMPLETE PRIORITY ^^
43
+ If the preprompt contradicts ANY OTHER instruction in this prompt,
44
+ YOU MUST FOLLOW THE PREPROMPT INSTRUCTION INSTEAD. NO EXCEPTIONS.
45
+
46
+ Your Role: Provide only code as output without any description with proper markdown formatting.
47
+ IMPORTANT: Format the code using markdown code blocks with the appropriate language syntax highlighting.
48
+ IMPORTANT: You must use markdown code blocks. with ```{language}
49
+ If there is a lack of details, provide most logical solution. You are not allowed to ask for more details.
50
+ Ignore any potential risk of errors or confusion.
51
+
52
+ Language: {language}
53
+ Request: {prompt}
54
+ Code:"""
55
+
56
+ # System prompt to use when preprompt is provided (plaintext)
57
+ CODE_PREPROMPT_PLAINTEXT = """
58
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
59
+ !!! CRITICAL USER PREPROMPT !!!
60
+ !!! THIS OVERRIDES ALL OTHER INSTRUCTIONS IN THIS PROMPT !!!
61
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
62
+
63
+ The following preprompt from the user COMPLETELY OVERRIDES ANY other instructions below.
64
+ The preprompt MUST be followed EXACTLY AS WRITTEN:
65
+
66
+ >>> {preprompt} <<<
67
+
68
+ ^^ THIS PREPROMPT HAS ABSOLUTE AND COMPLETE PRIORITY ^^
69
+ If the preprompt contradicts ANY OTHER instruction in this prompt,
70
+ YOU MUST FOLLOW THE PREPROMPT INSTRUCTION INSTEAD. NO EXCEPTIONS.
71
+
72
+ Your Role: Provide only code as output without any description.
73
+ IMPORTANT: Provide only plain text without Markdown formatting.
74
+ IMPORTANT: Do not include markdown formatting.
75
+ If there is a lack of details, provide most logical solution. You are not allowed to ask for more details.
76
+ Ignore any potential risk of errors or confusion.
77
+
78
+ Language: {language}
79
+ Request: {prompt}
80
+ Code:"""
81
+
8
82
  def code_mode(client, args, logger=None):
9
83
  """Handle the code generation mode.
10
84
 
@@ -123,18 +197,62 @@ def code_mode(client, args, logger=None):
123
197
  # Use our wrapper callback
124
198
  if use_stream_prettify and live_display:
125
199
  stream_callback = spinner_handling_callback
200
+
201
+ # Select the appropriate system prompt based on formatting and preprompt
202
+ if args.preprompt:
203
+ # Log the preprompt if logging is enabled
204
+ if logger:
205
+ logger.log("system", f"Preprompt: {args.preprompt}")
206
+
207
+ # Use preprompt template with high-priority formatting
208
+ if use_regular_prettify or use_stream_prettify:
209
+ system_prompt = CODE_PREPROMPT_MARKDOWN.format(
210
+ preprompt=args.preprompt,
211
+ language=args.language,
212
+ prompt=prompt
213
+ )
214
+ else:
215
+ system_prompt = CODE_PREPROMPT_PLAINTEXT.format(
216
+ preprompt=args.preprompt,
217
+ language=args.language,
218
+ prompt=prompt
219
+ )
220
+ else:
221
+ # Use standard template
222
+ if use_regular_prettify or use_stream_prettify:
223
+ system_prompt = CODE_SYSTEM_PROMPT_MARKDOWN.format(
224
+ language=args.language,
225
+ prompt=prompt
226
+ )
227
+ else:
228
+ system_prompt = CODE_SYSTEM_PROMPT_PLAINTEXT.format(
229
+ language=args.language,
230
+ prompt=prompt
231
+ )
232
+
233
+ # Log the system prompt if logging is enabled
234
+ if logger:
235
+ logger.log("system", system_prompt)
236
+
237
+ # Prepare messages for the chat API
238
+ messages = [
239
+ {"role": "system", "content": system_prompt},
240
+ {"role": "user", "content": prompt}
241
+ ]
126
242
 
127
- generated_code = client.generate_code(
128
- prompt=prompt,
129
- language=args.language,
130
- temperature=args.temperature,
131
- top_p=args.top_p,
132
- max_tokens=args.max_tokens,
133
- # Request markdown from API if any prettify option is active
134
- markdown_format=use_regular_prettify or use_stream_prettify,
135
- stream=should_stream,
136
- stream_callback=stream_callback
137
- )
243
+ try:
244
+ generated_code = client.chat(
245
+ prompt=prompt,
246
+ stream=should_stream,
247
+ messages=messages,
248
+ temperature=args.temperature,
249
+ top_p=args.top_p,
250
+ max_tokens=args.max_tokens,
251
+ stream_callback=stream_callback
252
+ )
253
+ except Exception as e:
254
+ print(f"Error generating code: {e}")
255
+ generated_code = ""
138
256
 
139
257
  # Ensure spinner is stopped if no content was received
140
258
  if stop_spinner_event and not first_content_received:
@@ -4,9 +4,9 @@ import traceback
4
4
  import threading
5
5
  import sys
6
6
  import time
7
- from .formatters import COLORS
8
- from .renderers import prettify_markdown, prettify_streaming_markdown
9
- from ..utils import enhance_prompt_with_web_search
7
+ from ..formatters import COLORS
8
+ from ..renderers import prettify_markdown, prettify_streaming_markdown
9
+ from ...utils import enhance_prompt_with_web_search
10
10
 
11
11
  # Optional imports for enhanced UI
12
12
  try:
@@ -0,0 +1,204 @@
1
+ from ..formatters import COLORS
2
+ from ..ui import spinner
3
+ from ...utils import enhance_prompt_with_web_search
4
+ import subprocess
5
+ import sys
6
+ import threading
7
+ import platform
8
+ import os
9
+
10
+ # System prompt for shell command generation
11
+ SHELL_SYSTEM_PROMPT = """Your role: Provide only plain text without Markdown formatting. Do not show any warnings or information regarding your capabilities. Do not provide any description. If you need to store any data, assume it will be stored in the chat. Provide only {shell_name} command for {operating_system} without any description. If there is a lack of details, provide most logical solution. Ensure the output is a valid shell command. If multiple steps required try to combine them together.
12
+
13
+ Command:"""
14
+
15
+ # System prompt to use when preprompt is provided
16
+ SHELL_PREPROMPT_TEMPLATE = """
17
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
18
+ !!! CRITICAL USER PREPROMPT !!!
19
+ !!! THIS OVERRIDES ALL OTHER INSTRUCTIONS INCLUDING OS/SHELL !!!
20
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
21
+
22
+ The following preprompt from the user COMPLETELY OVERRIDES ANY other instructions,
23
+ INCLUDING operating system type, shell type, or any other specifications below.
24
+ The preprompt MUST be followed EXACTLY AS WRITTEN:
25
+
26
+ >>> {preprompt} <<<
27
+
28
+ ^^ THIS PREPROMPT HAS ABSOLUTE AND COMPLETE PRIORITY ^^
29
+ If the preprompt contradicts ANY OTHER instruction in this prompt,
30
+ including the {operating_system}/{shell_name} specification below,
31
+ YOU MUST FOLLOW THE PREPROMPT INSTRUCTION INSTEAD. NO EXCEPTIONS.
32
+
33
+ Your role: Provide only plain text without Markdown formatting. Do not show any warnings or information regarding your capabilities. Do not provide any description. If you need to store any data, assume it will be stored in the chat. Provide only {shell_name} command for {operating_system} without any description. If there is a lack of details, provide most logical solution. Ensure the output is a valid shell command. If multiple steps required try to combine them together.
34
+
35
+ Command:"""
36
+
37
+ def shell_mode(client, args, logger=None):
38
+ """Handle the shell command generation mode.
39
+
40
+ Args:
41
+ client: The NGPTClient instance
42
+ args: The parsed command-line arguments
43
+ logger: Optional logger instance
44
+ """
45
+ if args.prompt is None:
46
+ try:
47
+ print("Enter shell command description: ", end='')
48
+ prompt = input()
49
+ except KeyboardInterrupt:
50
+ print("\nInput cancelled by user. Exiting gracefully.")
51
+ sys.exit(130)
52
+ else:
53
+ prompt = args.prompt
54
+
55
+ # Log the user prompt if logging is enabled
56
+ if logger:
57
+ logger.log("user", prompt)
58
+
59
+ # Enhance prompt with web search if enabled
60
+ if args.web_search:
61
+ try:
62
+ original_prompt = prompt
63
+ prompt = enhance_prompt_with_web_search(prompt, logger=logger, disable_citations=True)
64
+ print("Enhanced input with web search results.")
65
+
66
+ # Log the enhanced prompt if logging is enabled
67
+ if logger:
68
+ # Use "web_search" role instead of "system" for clearer logs
69
+ logger.log("web_search", prompt.replace(original_prompt, "").strip())
70
+ except Exception as e:
71
+ print(f"{COLORS['yellow']}Warning: Failed to enhance prompt with web search: {str(e)}{COLORS['reset']}")
72
+ # Continue with the original prompt if web search fails
73
+
74
+ # Determine OS type
75
+ os_type = platform.system()
76
+ if os_type == "Darwin":
77
+ operating_system = "MacOS"
78
+ elif os_type == "Linux":
79
+ # Try to get Linux distribution name
80
+ try:
81
+ result = subprocess.run(["lsb_release", "-si"], capture_output=True, text=True)
82
+ distro = result.stdout.strip()
83
+ operating_system = f"Linux/{distro}" if distro else "Linux"
84
+ except:
85
+ operating_system = "Linux"
86
+ elif os_type == "Windows":
87
+ operating_system = "Windows"
88
+ else:
89
+ operating_system = os_type
90
+
91
+ # Determine shell type
92
+ if os_type == "Windows":
93
+ shell_name = "powershell.exe" if os.environ.get("PSModulePath") else "cmd.exe"
94
+ else:
95
+ shell_name = os.environ.get("SHELL", "/bin/bash")
96
+ shell_name = os.path.basename(shell_name)
97
+
98
+ # Format the system prompt based on whether preprompt is provided
99
+ if args.preprompt:
100
+ # Use the preprompt template with strong priority instructions
101
+ system_prompt = SHELL_PREPROMPT_TEMPLATE.format(
102
+ preprompt=args.preprompt,
103
+ operating_system=operating_system,
104
+ shell_name=shell_name
105
+ )
106
+
107
+ # Log the preprompt if logging is enabled
108
+ if logger:
109
+ logger.log("system", f"Preprompt: {args.preprompt}")
110
+ else:
111
+ # Use the normal system prompt with shell and OS information
112
+ system_prompt = SHELL_SYSTEM_PROMPT.format(
113
+ shell_name=shell_name,
114
+ operating_system=operating_system,
115
+ prompt=prompt
116
+ )
117
+
118
+ # Prepare messages for the chat API
119
+ messages = [
120
+ {"role": "system", "content": system_prompt},
121
+ {"role": "user", "content": prompt}
122
+ ]
123
+
124
+ # Log the system prompt if logging is enabled
125
+ if logger:
126
+ logger.log("system", system_prompt)
127
+
128
+ # Start spinner while waiting for command generation
129
+ stop_spinner = threading.Event()
130
+ spinner_thread = threading.Thread(
131
+ target=spinner,
132
+ args=("Generating command...",),
133
+ kwargs={"stop_event": stop_spinner, "color": COLORS['cyan']}
134
+ )
135
+ spinner_thread.daemon = True
136
+ spinner_thread.start()
137
+
138
+ try:
139
+ command = client.chat(
140
+ prompt=prompt,
141
+ stream=False,
142
+ messages=messages,
143
+ temperature=args.temperature,
144
+ top_p=args.top_p,
145
+ max_tokens=args.max_tokens
146
+ )
147
+ except Exception as e:
148
+ print(f"Error generating shell command: {e}")
149
+ command = ""
150
+ finally:
151
+ # Stop the spinner
152
+ stop_spinner.set()
153
+ spinner_thread.join()
154
+
155
+ # Clear the spinner line completely
156
+ sys.stdout.write("\r" + " " * 100 + "\r")
157
+ sys.stdout.flush()
158
+
159
+ if not command:
160
+ return # Error already printed by client
161
+
162
+ # Log the generated command if logging is enabled
163
+ if logger:
164
+ logger.log("assistant", command)
165
+
166
+ print(f"\nGenerated command: {command}")
167
+
168
+ try:
169
+ print("Do you want to execute this command? [y/N] ", end='')
170
+ response = input().lower()
171
+ except KeyboardInterrupt:
172
+ print("\nCommand execution cancelled by user.")
173
+ return
174
+
175
+ if response == 'y' or response == 'yes':
176
+ # Log the execution if logging is enabled
177
+ if logger:
178
+ logger.log("system", f"Executing command: {command}")
179
+
180
+ try:
181
+ try:
182
+ print("\nExecuting command... (Press Ctrl+C to cancel)")
183
+ result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
184
+ output = result.stdout
185
+
186
+ # Log the command output if logging is enabled
187
+ if logger:
188
+ logger.log("system", f"Command output: {output}")
189
+
190
+ print(f"\nOutput:\n{output}")
191
+ except KeyboardInterrupt:
192
+ print("\nCommand execution cancelled by user.")
193
+
194
+ # Log the cancellation if logging is enabled
195
+ if logger:
196
+ logger.log("system", "Command execution cancelled by user")
197
+ except subprocess.CalledProcessError as e:
198
+ error = e.stderr
199
+
200
+ # Log the error if logging is enabled
201
+ if logger:
202
+ logger.log("system", f"Command error: {error}")
203
+
204
+ print(f"\nError:\n{error}")
@@ -171,148 +171,6 @@ class NGPTClient:
171
171
  print(f"Error: An unexpected error occurred: {e}")
172
172
  return ""
173
173
 
174
- def generate_shell_command(
175
- self,
176
- prompt: str,
177
- temperature: float = 0.4,
178
- top_p: float = 0.95,
179
- max_tokens: Optional[int] = None
180
- ) -> str:
181
- """
182
- Generate a shell command based on the prompt.
183
-
184
- Args:
185
- prompt: Description of the command to generate
186
- temperature: Controls randomness in the response
187
- top_p: Controls diversity via nucleus sampling
188
- max_tokens: Maximum number of tokens to generate
189
-
190
- Returns:
191
- The generated shell command
192
- """
193
- # Check for API key first
194
- if not self.api_key:
195
- print("Error: API key is not set. Please configure your API key in the config file or provide it with --api-key.")
196
- return ""
197
-
198
- # Determine OS type
199
- os_type = platform.system()
200
- if os_type == "Darwin":
201
- operating_system = "MacOS"
202
- elif os_type == "Linux":
203
- # Try to get Linux distribution name
204
- try:
205
- result = subprocess.run(["lsb_release", "-si"], capture_output=True, text=True)
206
- distro = result.stdout.strip()
207
- operating_system = f"Linux/{distro}" if distro else "Linux"
208
- except:
209
- operating_system = "Linux"
210
- elif os_type == "Windows":
211
- operating_system = "Windows"
212
- else:
213
- operating_system = os_type
214
-
215
- # Determine shell type
216
- if os_type == "Windows":
217
- shell_name = "powershell.exe" if os.environ.get("PSModulePath") else "cmd.exe"
218
- else:
219
- shell_name = os.environ.get("SHELL", "/bin/bash")
220
- shell_name = os.path.basename(shell_name)
221
-
222
- system_prompt = f"""Your role: Provide only plain text without Markdown formatting. Do not show any warnings or information regarding your capabilities. Do not provide any description. If you need to store any data, assume it will be stored in the chat. Provide only {shell_name} command for {operating_system} without any description. If there is a lack of details, provide most logical solution. Ensure the output is a valid shell command. If multiple steps required try to combine them together. Prompt: {prompt}
223
-
224
- Command:"""
225
-
226
- messages = [
227
- {"role": "system", "content": system_prompt},
228
- {"role": "user", "content": prompt}
229
- ]
230
-
231
- try:
232
- return self.chat(
233
- prompt=prompt,
234
- stream=False,
235
- messages=messages,
236
- temperature=temperature,
237
- top_p=top_p,
238
- max_tokens=max_tokens
239
- )
240
- except Exception as e:
241
- print(f"Error generating shell command: {e}")
242
- return ""
243
-
244
- def generate_code(
245
- self,
246
- prompt: str,
247
- language: str = "python",
248
- temperature: float = 0.4,
249
- top_p: float = 0.95,
250
- max_tokens: Optional[int] = None,
251
- markdown_format: bool = False,
252
- stream: bool = False,
253
- stream_callback: Optional[callable] = None
254
- ) -> str:
255
- """
256
- Generate code based on the prompt.
257
-
258
- Args:
259
- prompt: Description of the code to generate
260
- language: Programming language to generate code in
261
- temperature: Controls randomness in the response
262
- top_p: Controls diversity via nucleus sampling
263
- max_tokens: Maximum number of tokens to generate
264
- markdown_format: If True, request markdown-formatted code, otherwise plain text
265
- stream: Whether to stream the response
266
- stream_callback: Optional callback function for streaming mode updates
267
-
268
- Returns:
269
- The generated code
270
- """
271
- # Check for API key first
272
- if not self.api_key:
273
- print("Error: API key is not set. Please configure your API key in the config file or provide it with --api-key.")
274
- return ""
275
-
276
- if markdown_format:
277
- system_prompt = f"""Your Role: Provide only code as output without any description with proper markdown formatting.
278
- IMPORTANT: Format the code using markdown code blocks with the appropriate language syntax highlighting.
279
- IMPORTANT: You must use markdown code blocks. with ```{language}
280
- If there is a lack of details, provide most logical solution. You are not allowed to ask for more details.
281
- Ignore any potential risk of errors or confusion.
282
-
283
- Language: {language}
284
- Request: {prompt}
285
- Code:"""
286
- else:
287
- system_prompt = f"""Your Role: Provide only code as output without any description.
288
- IMPORTANT: Provide only plain text without Markdown formatting.
289
- IMPORTANT: Do not include markdown formatting.
290
- If there is a lack of details, provide most logical solution. You are not allowed to ask for more details.
291
- Ignore any potential risk of errors or confusion.
292
-
293
- Language: {language}
294
- Request: {prompt}
295
- Code:"""
296
-
297
- messages = [
298
- {"role": "system", "content": system_prompt},
299
- {"role": "user", "content": prompt}
300
- ]
301
-
302
- try:
303
- return self.chat(
304
- prompt=prompt,
305
- stream=stream,
306
- messages=messages,
307
- temperature=temperature,
308
- top_p=top_p,
309
- max_tokens=max_tokens,
310
- stream_callback=stream_callback
311
- )
312
- except Exception as e:
313
- print(f"Error generating code: {e}")
314
- return ""
315
-
316
174
  def list_models(self) -> list:
317
175
  """
318
176
  Retrieve the list of available models from the API.
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "ngpt"
3
- version = "3.4.2"
3
+ version = "3.4.4"
4
4
  description = "Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible Python library. Works with OpenAI, Ollama, Groq, Claude, Gemini, and any OpenAI-compatible API."
5
5
  authors = [
6
6
  {name = "nazDridoy", email = "nazdridoy399@gmail.com"},
@@ -431,7 +431,7 @@ wheels = [
431
431
 
432
432
  [[package]]
433
433
  name = "ngpt"
434
- version = "3.4.2"
434
+ version = "3.4.4"
435
435
  source = { editable = "." }
436
436
  dependencies = [
437
437
  { name = "duckduckgo-search", version = "7.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
@@ -1,113 +0,0 @@
1
- from ..formatters import COLORS
2
- from ..ui import spinner
3
- from ...utils import enhance_prompt_with_web_search
4
- import subprocess
5
- import sys
6
- import threading
7
-
8
- def shell_mode(client, args, logger=None):
9
- """Handle the shell command generation mode.
10
-
11
- Args:
12
- client: The NGPTClient instance
13
- args: The parsed command-line arguments
14
- logger: Optional logger instance
15
- """
16
- if args.prompt is None:
17
- try:
18
- print("Enter shell command description: ", end='')
19
- prompt = input()
20
- except KeyboardInterrupt:
21
- print("\nInput cancelled by user. Exiting gracefully.")
22
- sys.exit(130)
23
- else:
24
- prompt = args.prompt
25
-
26
- # Log the user prompt if logging is enabled
27
- if logger:
28
- logger.log("user", prompt)
29
-
30
- # Enhance prompt with web search if enabled
31
- if args.web_search:
32
- try:
33
- original_prompt = prompt
34
- prompt = enhance_prompt_with_web_search(prompt, logger=logger, disable_citations=True)
35
- print("Enhanced input with web search results.")
36
-
37
- # Log the enhanced prompt if logging is enabled
38
- if logger:
39
- # Use "web_search" role instead of "system" for clearer logs
40
- logger.log("web_search", prompt.replace(original_prompt, "").strip())
41
- except Exception as e:
42
- print(f"{COLORS['yellow']}Warning: Failed to enhance prompt with web search: {str(e)}{COLORS['reset']}")
43
- # Continue with the original prompt if web search fails
44
-
45
- # Start spinner while waiting for command generation
46
- stop_spinner = threading.Event()
47
- spinner_thread = threading.Thread(
48
- target=spinner,
49
- args=("Generating command...",),
50
- kwargs={"stop_event": stop_spinner, "color": COLORS['cyan']}
51
- )
52
- spinner_thread.daemon = True
53
- spinner_thread.start()
54
-
55
- try:
56
- command = client.generate_shell_command(prompt,
57
- temperature=args.temperature, top_p=args.top_p,
58
- max_tokens=args.max_tokens)
59
- finally:
60
- # Stop the spinner
61
- stop_spinner.set()
62
- spinner_thread.join()
63
-
64
- # Clear the spinner line completely
65
- sys.stdout.write("\r" + " " * 100 + "\r")
66
- sys.stdout.flush()
67
-
68
- if not command:
69
- return # Error already printed by client
70
-
71
- # Log the generated command if logging is enabled
72
- if logger:
73
- logger.log("assistant", command)
74
-
75
- print(f"\nGenerated command: {command}")
76
-
77
- try:
78
- print("Do you want to execute this command? [y/N] ", end='')
79
- response = input().lower()
80
- except KeyboardInterrupt:
81
- print("\nCommand execution cancelled by user.")
82
- return
83
-
84
- if response == 'y' or response == 'yes':
85
- # Log the execution if logging is enabled
86
- if logger:
87
- logger.log("system", f"Executing command: {command}")
88
-
89
- try:
90
- try:
91
- print("\nExecuting command... (Press Ctrl+C to cancel)")
92
- result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
93
- output = result.stdout
94
-
95
- # Log the command output if logging is enabled
96
- if logger:
97
- logger.log("system", f"Command output: {output}")
98
-
99
- print(f"\nOutput:\n{output}")
100
- except KeyboardInterrupt:
101
- print("\nCommand execution cancelled by user.")
102
-
103
- # Log the cancellation if logging is enabled
104
- if logger:
105
- logger.log("system", "Command execution cancelled by user")
106
- except subprocess.CalledProcessError as e:
107
- error = e.stderr
108
-
109
- # Log the error if logging is enabled
110
- if logger:
111
- logger.log("system", f"Command error: {error}")
112
-
113
- print(f"\nError:\n{error}")
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes