ngpt 3.2.0__tar.gz → 3.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. {ngpt-3.2.0 → ngpt-3.3.0}/PKG-INFO +5 -3
  2. {ngpt-3.2.0 → ngpt-3.3.0}/README.md +2 -2
  3. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/args.py +1 -1
  4. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/interactive.py +28 -2
  5. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/modes/chat.py +17 -1
  6. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/modes/code.py +17 -2
  7. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/modes/rewrite.py +16 -1
  8. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/modes/shell.py +19 -3
  9. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/modes/text.py +17 -1
  10. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/client.py +0 -12
  11. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/utils/__init__.py +7 -1
  12. ngpt-3.3.0/ngpt/utils/web_search.py +270 -0
  13. {ngpt-3.2.0 → ngpt-3.3.0}/pyproject.toml +3 -1
  14. ngpt-3.3.0/uv.lock +796 -0
  15. ngpt-3.2.0/uv.lock +0 -251
  16. {ngpt-3.2.0 → ngpt-3.3.0}/.github/workflows/aur-publish.yml +0 -0
  17. {ngpt-3.2.0 → ngpt-3.3.0}/.github/workflows/python-publish.yml +0 -0
  18. {ngpt-3.2.0 → ngpt-3.3.0}/.gitignore +0 -0
  19. {ngpt-3.2.0 → ngpt-3.3.0}/.python-version +0 -0
  20. {ngpt-3.2.0 → ngpt-3.3.0}/COMMIT_GUIDELINES.md +0 -0
  21. {ngpt-3.2.0 → ngpt-3.3.0}/CONTRIBUTING.md +0 -0
  22. {ngpt-3.2.0 → ngpt-3.3.0}/LICENSE +0 -0
  23. {ngpt-3.2.0 → ngpt-3.3.0}/PKGBUILD +0 -0
  24. {ngpt-3.2.0 → ngpt-3.3.0}/docs/CONTRIBUTING.md +0 -0
  25. {ngpt-3.2.0 → ngpt-3.3.0}/docs/LICENSE.md +0 -0
  26. {ngpt-3.2.0 → ngpt-3.3.0}/docs/README.md +0 -0
  27. {ngpt-3.2.0 → ngpt-3.3.0}/docs/_config.yml +0 -0
  28. {ngpt-3.2.0 → ngpt-3.3.0}/docs/api/README.md +0 -0
  29. {ngpt-3.2.0 → ngpt-3.3.0}/docs/api/cli.md +0 -0
  30. {ngpt-3.2.0 → ngpt-3.3.0}/docs/api/cli_config.md +0 -0
  31. {ngpt-3.2.0 → ngpt-3.3.0}/docs/api/client.md +0 -0
  32. {ngpt-3.2.0 → ngpt-3.3.0}/docs/api/config.md +0 -0
  33. {ngpt-3.2.0 → ngpt-3.3.0}/docs/api/logging.md +0 -0
  34. {ngpt-3.2.0 → ngpt-3.3.0}/docs/assets/css/style.scss +0 -0
  35. {ngpt-3.2.0 → ngpt-3.3.0}/docs/configuration.md +0 -0
  36. {ngpt-3.2.0 → ngpt-3.3.0}/docs/examples/README.md +0 -0
  37. {ngpt-3.2.0 → ngpt-3.3.0}/docs/examples/advanced.md +0 -0
  38. {ngpt-3.2.0 → ngpt-3.3.0}/docs/examples/basic.md +0 -0
  39. {ngpt-3.2.0 → ngpt-3.3.0}/docs/examples/cli_components.md +0 -0
  40. {ngpt-3.2.0 → ngpt-3.3.0}/docs/examples/integrations.md +0 -0
  41. {ngpt-3.2.0 → ngpt-3.3.0}/docs/installation.md +0 -0
  42. {ngpt-3.2.0 → ngpt-3.3.0}/docs/overview.md +0 -0
  43. {ngpt-3.2.0 → ngpt-3.3.0}/docs/usage/README.md +0 -0
  44. {ngpt-3.2.0 → ngpt-3.3.0}/docs/usage/cli_config.md +0 -0
  45. {ngpt-3.2.0 → ngpt-3.3.0}/docs/usage/cli_framework.md +0 -0
  46. {ngpt-3.2.0 → ngpt-3.3.0}/docs/usage/cli_usage.md +0 -0
  47. {ngpt-3.2.0 → ngpt-3.3.0}/docs/usage/gitcommsg.md +0 -0
  48. {ngpt-3.2.0 → ngpt-3.3.0}/docs/usage/library_usage.md +0 -0
  49. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/__init__.py +0 -0
  50. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/__main__.py +0 -0
  51. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/__init__.py +0 -0
  52. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/config_manager.py +0 -0
  53. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/formatters.py +0 -0
  54. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/main.py +0 -0
  55. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/modes/__init__.py +0 -0
  56. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/modes/gitcommsg.py +0 -0
  57. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/renderers.py +0 -0
  58. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/cli/ui.py +0 -0
  59. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/utils/cli_config.py +0 -0
  60. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/utils/config.py +0 -0
  61. {ngpt-3.2.0 → ngpt-3.3.0}/ngpt/utils/log.py +0 -0
  62. {ngpt-3.2.0 → ngpt-3.3.0}/wiki.md +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ngpt
3
- Version: 3.2.0
3
+ Version: 3.3.0
4
4
  Summary: Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible Python library. Works with OpenAI, Ollama, Groq, Claude, Gemini, and any OpenAI-compatible API.
5
5
  Project-URL: Homepage, https://github.com/nazdridoy/ngpt
6
6
  Project-URL: Repository, https://github.com/nazdridoy/ngpt
@@ -28,10 +28,12 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
28
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
29
29
  Classifier: Topic :: Utilities
30
30
  Requires-Python: >=3.8
31
+ Requires-Dist: duckduckgo-search>=3.0.0
31
32
  Requires-Dist: prompt-toolkit>=3.0.0
32
33
  Requires-Dist: pyperclip>=1.8.0
33
34
  Requires-Dist: requests>=2.31.0
34
35
  Requires-Dist: rich>=10.0.0
36
+ Requires-Dist: trafilatura>=1.6.0
35
37
  Description-Content-Type: text/markdown
36
38
 
37
39
  # nGPT
@@ -63,7 +65,7 @@ Description-Content-Type: text/markdown
63
65
  - 🔄 **API Flexibility**: Works with OpenAI, Ollama, Groq, Claude, Gemini, and any compatible endpoint
64
66
  - 💬 **Interactive Chat**: Continuous conversation with memory in modern UI
65
67
  - 📊 **Streaming Responses**: Real-time output for better user experience
66
- - 🔍 **Web Search**: Integrated with compatible API endpoints
68
+ - 🔍 **Web Search**: Enhance any model with contextual information from the web
67
69
  - 📥 **Stdin Processing**: Process piped content by using `{}` placeholder in prompts
68
70
  - 🎨 **Markdown Rendering**: Beautiful formatting of markdown and code with syntax highlighting
69
71
  - ⚡ **Real-time Markdown**: Stream responses with live updating syntax highlighting and formatting
@@ -271,7 +273,7 @@ ngpt --list-models --provider Gemini
271
273
  # With custom options
272
274
  ngpt --api-key your-key --base-url http://your-endpoint --model your-model "Hello"
273
275
 
274
- # Enable web search (if your API endpoint supports it)
276
+ # Enable web search capability to enhance prompts with web information
275
277
  ngpt --web-search "What's the latest news about AI?"
276
278
 
277
279
  # Generate and execute shell commands (using -s or --shell flag)
@@ -27,7 +27,7 @@
27
27
  - 🔄 **API Flexibility**: Works with OpenAI, Ollama, Groq, Claude, Gemini, and any compatible endpoint
28
28
  - 💬 **Interactive Chat**: Continuous conversation with memory in modern UI
29
29
  - 📊 **Streaming Responses**: Real-time output for better user experience
30
- - 🔍 **Web Search**: Integrated with compatible API endpoints
30
+ - 🔍 **Web Search**: Enhance any model with contextual information from the web
31
31
  - 📥 **Stdin Processing**: Process piped content by using `{}` placeholder in prompts
32
32
  - 🎨 **Markdown Rendering**: Beautiful formatting of markdown and code with syntax highlighting
33
33
  - ⚡ **Real-time Markdown**: Stream responses with live updating syntax highlighting and formatting
@@ -235,7 +235,7 @@ ngpt --list-models --provider Gemini
235
235
  # With custom options
236
236
  ngpt --api-key your-key --base-url http://your-endpoint --model your-model "Hello"
237
237
 
238
- # Enable web search (if your API endpoint supports it)
238
+ # Enable web search capability to enhance prompts with web information
239
239
  ngpt --web-search "What's the latest news about AI?"
240
240
 
241
241
  # Generate and execute shell commands (using -s or --shell flag)
@@ -68,7 +68,7 @@ def setup_argument_parser():
68
68
  global_group.add_argument('--model',
69
69
  help='Model to use')
70
70
  global_group.add_argument('--web-search', action='store_true',
71
- help='Enable web search capability (Note: Your API endpoint must support this feature)')
71
+ help='Enable web search capability using DuckDuckGo to enhance prompts with relevant information')
72
72
  global_group.add_argument('--temperature', type=float, default=0.7,
73
73
  help='Set temperature (controls randomness, default: 0.7)')
74
74
  global_group.add_argument('--top_p', type=float, default=1.0,
@@ -6,6 +6,7 @@ import sys
6
6
  import time
7
7
  from .formatters import COLORS
8
8
  from .renderers import prettify_markdown, prettify_streaming_markdown
9
+ from ..utils import enhance_prompt_with_web_search
9
10
 
10
11
  # Optional imports for enhanced UI
11
12
  try:
@@ -64,6 +65,10 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
64
65
  if logger:
65
66
  print(f"{COLORS['green']}Logging conversation to: {logger.get_log_path()}{COLORS['reset']}")
66
67
 
68
+ # Display a note about web search if enabled
69
+ if web_search:
70
+ print(f"{COLORS['green']}Web search capability is enabled.{COLORS['reset']}")
71
+
67
72
  # Display a note about markdown rendering only once at the beginning
68
73
  if prettify and not no_stream and not stream_prettify:
69
74
  print(f"{COLORS['yellow']}Note: Using standard markdown rendering (--prettify). For streaming markdown rendering, use --stream-prettify instead.{COLORS['reset']}")
@@ -188,6 +193,28 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
188
193
  # Log user message if logging is enabled
189
194
  if logger:
190
195
  logger.log("user", user_input)
196
+
197
+ # Enhance prompt with web search if enabled
198
+ enhanced_prompt = user_input
199
+ if web_search:
200
+ try:
201
+ print(f"{COLORS['cyan']}Searching the web...{COLORS['reset']}")
202
+ enhanced_prompt = enhance_prompt_with_web_search(user_input, logger=logger)
203
+ print(f"{COLORS['green']}Enhanced input with web search results.{COLORS['reset']}")
204
+
205
+ # Update the user message in conversation with enhanced prompt
206
+ for i in range(len(conversation) - 1, -1, -1):
207
+ if conversation[i]["role"] == "user" and conversation[i]["content"] == user_input:
208
+ conversation[i]["content"] = enhanced_prompt
209
+ break
210
+
211
+ # Log the enhanced prompt if logging is enabled
212
+ if logger:
213
+ # Use "web_search" role instead of "system" for clearer logs
214
+ logger.log("web_search", enhanced_prompt.replace(user_input, "").strip())
215
+ except Exception as e:
216
+ print(f"{COLORS['yellow']}Warning: Failed to enhance prompt with web search: {str(e)}{COLORS['reset']}")
217
+ # Continue with the original prompt if web search fails
191
218
 
192
219
  # Print assistant indicator with formatting - but only if we're not going to show a rich formatted box
193
220
  # With Rich prettify, no header should be printed as the Rich panel already includes it
@@ -271,10 +298,9 @@ def interactive_chat_session(client, web_search=False, no_stream=False, temperat
271
298
 
272
299
  # Get AI response with conversation history
273
300
  response = client.chat(
274
- prompt=user_input,
301
+ prompt=enhanced_prompt,
275
302
  messages=conversation,
276
303
  stream=should_stream,
277
- web_search=web_search,
278
304
  temperature=temperature,
279
305
  top_p=top_p,
280
306
  max_tokens=max_tokens,
@@ -1,6 +1,7 @@
1
1
  from ..formatters import COLORS
2
2
  from ..renderers import prettify_markdown, prettify_streaming_markdown
3
3
  from ..ui import spinner
4
+ from ...utils import enhance_prompt_with_web_search
4
5
  import sys
5
6
  import threading
6
7
 
@@ -53,6 +54,21 @@ def chat_mode(client, args, logger=None):
53
54
  # Log the user message if logging is enabled
54
55
  if logger:
55
56
  logger.log("user", prompt)
57
+
58
+ # Enhance prompt with web search if enabled
59
+ if args.web_search:
60
+ try:
61
+ original_prompt = prompt
62
+ prompt = enhance_prompt_with_web_search(prompt, logger=logger)
63
+ print("Enhanced input with web search results.")
64
+
65
+ # Log the enhanced prompt if logging is enabled
66
+ if logger:
67
+ # Use "web_search" role instead of "system" for clearer logs
68
+ logger.log("web_search", prompt.replace(original_prompt, "").strip())
69
+ except Exception as e:
70
+ print(f"{COLORS['yellow']}Warning: Failed to enhance prompt with web search: {str(e)}{COLORS['reset']}")
71
+ # Continue with the original prompt if web search fails
56
72
 
57
73
  # Create messages array with preprompt if available
58
74
  messages = None
@@ -123,7 +139,7 @@ def chat_mode(client, args, logger=None):
123
139
  if args.stream_prettify and live_display:
124
140
  stream_callback = spinner_handling_callback
125
141
 
126
- response = client.chat(prompt, stream=should_stream, web_search=args.web_search,
142
+ response = client.chat(prompt, stream=should_stream,
127
143
  temperature=args.temperature, top_p=args.top_p,
128
144
  max_tokens=args.max_tokens, messages=messages,
129
145
  markdown_format=args.prettify or args.stream_prettify,
@@ -1,6 +1,7 @@
1
1
  from ..formatters import COLORS
2
2
  from ..renderers import prettify_markdown, prettify_streaming_markdown, has_markdown_renderer, show_available_renderers
3
3
  from ..ui import spinner
4
+ from ...utils import enhance_prompt_with_web_search
4
5
  import sys
5
6
  import threading
6
7
 
@@ -25,6 +26,21 @@ def code_mode(client, args, logger=None):
25
26
  # Log the user prompt if logging is enabled
26
27
  if logger:
27
28
  logger.log("user", prompt)
29
+
30
+ # Enhance prompt with web search if enabled
31
+ if args.web_search:
32
+ try:
33
+ original_prompt = prompt
34
+ prompt = enhance_prompt_with_web_search(prompt, logger=logger)
35
+ print("Enhanced input with web search results.")
36
+
37
+ # Log the enhanced prompt if logging is enabled
38
+ if logger:
39
+ # Use "web_search" role instead of "system" for clearer logs
40
+ logger.log("web_search", prompt.replace(original_prompt, "").strip())
41
+ except Exception as e:
42
+ print(f"{COLORS['yellow']}Warning: Failed to enhance prompt with web search: {str(e)}{COLORS['reset']}")
43
+ # Continue with the original prompt if web search fails
28
44
 
29
45
  # Setup for streaming and prettify logic
30
46
  stream_callback = None
@@ -110,8 +126,7 @@ def code_mode(client, args, logger=None):
110
126
 
111
127
  generated_code = client.generate_code(
112
128
  prompt=prompt,
113
- language=args.language,
114
- web_search=args.web_search,
129
+ language=args.language,
115
130
  temperature=args.temperature,
116
131
  top_p=args.top_p,
117
132
  max_tokens=args.max_tokens,
@@ -5,6 +5,7 @@ import time
5
5
  from ..formatters import COLORS
6
6
  from ..renderers import prettify_markdown, prettify_streaming_markdown
7
7
  from ..ui import get_multiline_input, spinner
8
+ from ...utils import enhance_prompt_with_web_search
8
9
 
9
10
  # System prompt for rewriting text
10
11
  REWRITE_SYSTEM_PROMPT = """You are an expert text editor and rewriter. Your task is to rewrite the user's text to improve readability and flow while carefully preserving the original meaning, tone, and style.
@@ -125,6 +126,21 @@ def rewrite_mode(client, args, logger=None):
125
126
  print(f"{COLORS['yellow']}Error: Empty input. Please provide text to rewrite.{COLORS['reset']}")
126
127
  return
127
128
 
129
+ # Enhance input with web search if enabled
130
+ if args.web_search:
131
+ try:
132
+ original_text = input_text
133
+ input_text = enhance_prompt_with_web_search(input_text, logger=logger)
134
+ print("Enhanced input with web search results.")
135
+
136
+ # Log the enhanced input if logging is enabled
137
+ if logger:
138
+ # Use "web_search" role instead of "system" for clearer logs
139
+ logger.log("web_search", input_text.replace(original_text, "").strip())
140
+ except Exception as e:
141
+ print(f"{COLORS['yellow']}Warning: Failed to enhance input with web search: {str(e)}{COLORS['reset']}")
142
+ # Continue with the original input if web search fails
143
+
128
144
  # Set up messages array with system prompt and user content
129
145
  messages = [
130
146
  {"role": "system", "content": REWRITE_SYSTEM_PROMPT},
@@ -195,7 +211,6 @@ def rewrite_mode(client, args, logger=None):
195
211
  response = client.chat(
196
212
  prompt=None, # Not used when messages are provided
197
213
  stream=should_stream,
198
- web_search=args.web_search,
199
214
  temperature=args.temperature,
200
215
  top_p=args.top_p,
201
216
  max_tokens=args.max_tokens,
@@ -1,5 +1,6 @@
1
1
  from ..formatters import COLORS
2
2
  from ..ui import spinner
3
+ from ...utils import enhance_prompt_with_web_search
3
4
  import subprocess
4
5
  import sys
5
6
  import threading
@@ -26,6 +27,21 @@ def shell_mode(client, args, logger=None):
26
27
  if logger:
27
28
  logger.log("user", prompt)
28
29
 
30
+ # Enhance prompt with web search if enabled
31
+ if args.web_search:
32
+ try:
33
+ original_prompt = prompt
34
+ prompt = enhance_prompt_with_web_search(prompt, logger=logger)
35
+ print("Enhanced input with web search results.")
36
+
37
+ # Log the enhanced prompt if logging is enabled
38
+ if logger:
39
+ # Use "web_search" role instead of "system" for clearer logs
40
+ logger.log("web_search", prompt.replace(original_prompt, "").strip())
41
+ except Exception as e:
42
+ print(f"{COLORS['yellow']}Warning: Failed to enhance prompt with web search: {str(e)}{COLORS['reset']}")
43
+ # Continue with the original prompt if web search fails
44
+
29
45
  # Start spinner while waiting for command generation
30
46
  stop_spinner = threading.Event()
31
47
  spinner_thread = threading.Thread(
@@ -37,9 +53,9 @@ def shell_mode(client, args, logger=None):
37
53
  spinner_thread.start()
38
54
 
39
55
  try:
40
- command = client.generate_shell_command(prompt, web_search=args.web_search,
41
- temperature=args.temperature, top_p=args.top_p,
42
- max_tokens=args.max_tokens)
56
+ command = client.generate_shell_command(prompt,
57
+ temperature=args.temperature, top_p=args.top_p,
58
+ max_tokens=args.max_tokens)
43
59
  finally:
44
60
  # Stop the spinner
45
61
  stop_spinner.set()
@@ -1,6 +1,7 @@
1
1
  from ..formatters import COLORS
2
2
  from ..renderers import prettify_markdown, prettify_streaming_markdown
3
3
  from ..ui import get_multiline_input, spinner
4
+ from ...utils import enhance_prompt_with_web_search
4
5
  import threading
5
6
  import sys
6
7
 
@@ -25,6 +26,21 @@ def text_mode(client, args, logger=None):
25
26
  if logger:
26
27
  logger.log("user", prompt)
27
28
 
29
+ # Enhance prompt with web search if enabled
30
+ if args.web_search:
31
+ try:
32
+ original_prompt = prompt
33
+ prompt = enhance_prompt_with_web_search(prompt, logger=logger)
34
+ print("Enhanced input with web search results.")
35
+
36
+ # Log the enhanced prompt if logging is enabled
37
+ if logger:
38
+ # Use "web_search" role instead of "system" for clearer logs
39
+ logger.log("web_search", prompt.replace(original_prompt, "").strip())
40
+ except Exception as e:
41
+ print(f"{COLORS['yellow']}Warning: Failed to enhance prompt with web search: {str(e)}{COLORS['reset']}")
42
+ # Continue with the original prompt if web search fails
43
+
28
44
  # Create messages array with preprompt if available
29
45
  messages = None
30
46
  if args.preprompt:
@@ -94,7 +110,7 @@ def text_mode(client, args, logger=None):
94
110
  if args.stream_prettify and live_display:
95
111
  stream_callback = spinner_handling_callback
96
112
 
97
- response = client.chat(prompt, stream=should_stream, web_search=args.web_search,
113
+ response = client.chat(prompt, stream=should_stream,
98
114
  temperature=args.temperature, top_p=args.top_p,
99
115
  max_tokens=args.max_tokens, messages=messages,
100
116
  markdown_format=args.prettify or args.stream_prettify,
@@ -32,7 +32,6 @@ class NGPTClient:
32
32
  max_tokens: Optional[int] = None,
33
33
  top_p: float = 1.0,
34
34
  messages: Optional[List[Dict[str, str]]] = None,
35
- web_search: bool = False,
36
35
  markdown_format: bool = False,
37
36
  stream_callback: Optional[callable] = None,
38
37
  **kwargs
@@ -47,7 +46,6 @@ class NGPTClient:
47
46
  max_tokens: Maximum number of tokens to generate
48
47
  top_p: Controls diversity via nucleus sampling
49
48
  messages: Optional list of message objects to override default behavior
50
- web_search: Whether to enable web search capability
51
49
  markdown_format: If True, allow markdown-formatted responses, otherwise plain text
52
50
  stream_callback: Optional callback function for streaming mode updates
53
51
  **kwargs: Additional arguments to pass to the API
@@ -75,10 +73,6 @@ class NGPTClient:
75
73
  "top_p": top_p,
76
74
  }
77
75
 
78
- # Conditionally add web_search
79
- if web_search:
80
- payload["web_search"] = True
81
-
82
76
  # Add max_tokens if provided
83
77
  if max_tokens is not None:
84
78
  payload["max_tokens"] = max_tokens
@@ -180,7 +174,6 @@ class NGPTClient:
180
174
  def generate_shell_command(
181
175
  self,
182
176
  prompt: str,
183
- web_search: bool = False,
184
177
  temperature: float = 0.4,
185
178
  top_p: float = 0.95,
186
179
  max_tokens: Optional[int] = None
@@ -190,7 +183,6 @@ class NGPTClient:
190
183
 
191
184
  Args:
192
185
  prompt: Description of the command to generate
193
- web_search: Whether to enable web search capability
194
186
  temperature: Controls randomness in the response
195
187
  top_p: Controls diversity via nucleus sampling
196
188
  max_tokens: Maximum number of tokens to generate
@@ -241,7 +233,6 @@ Command:"""
241
233
  prompt=prompt,
242
234
  stream=False,
243
235
  messages=messages,
244
- web_search=web_search,
245
236
  temperature=temperature,
246
237
  top_p=top_p,
247
238
  max_tokens=max_tokens
@@ -254,7 +245,6 @@ Command:"""
254
245
  self,
255
246
  prompt: str,
256
247
  language: str = "python",
257
- web_search: bool = False,
258
248
  temperature: float = 0.4,
259
249
  top_p: float = 0.95,
260
250
  max_tokens: Optional[int] = None,
@@ -268,7 +258,6 @@ Command:"""
268
258
  Args:
269
259
  prompt: Description of the code to generate
270
260
  language: Programming language to generate code in
271
- web_search: Whether to enable web search capability
272
261
  temperature: Controls randomness in the response
273
262
  top_p: Controls diversity via nucleus sampling
274
263
  max_tokens: Maximum number of tokens to generate
@@ -315,7 +304,6 @@ Code:"""
315
304
  prompt=prompt,
316
305
  stream=stream,
317
306
  messages=messages,
318
- web_search=web_search,
319
307
  temperature=temperature,
320
308
  top_p=top_p,
321
309
  max_tokens=max_tokens,
@@ -22,6 +22,11 @@ from .cli_config import (
22
22
  get_cli_config_dir,
23
23
  get_cli_config_path
24
24
  )
25
+ from .web_search import (
26
+ enhance_prompt_with_web_search,
27
+ get_web_search_results,
28
+ format_web_search_results_for_prompt
29
+ )
25
30
 
26
31
  __all__ = [
27
32
  "create_logger", "Logger",
@@ -29,5 +34,6 @@ __all__ = [
29
34
  "add_config_entry", "remove_config_entry", "DEFAULT_CONFIG", "DEFAULT_CONFIG_ENTRY",
30
35
  "load_cli_config", "set_cli_config_option", "get_cli_config_option",
31
36
  "unset_cli_config_option", "apply_cli_config", "list_cli_config_options",
32
- "CLI_CONFIG_OPTIONS", "get_cli_config_dir", "get_cli_config_path"
37
+ "CLI_CONFIG_OPTIONS", "get_cli_config_dir", "get_cli_config_path",
38
+ "enhance_prompt_with_web_search", "get_web_search_results", "format_web_search_results_for_prompt"
33
39
  ]