webscout 8.2.4__py3-none-any.whl → 8.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (110) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIutel.py +240 -344
  3. webscout/Extra/autocoder/autocoder.py +66 -5
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Provider/AISEARCH/scira_search.py +3 -5
  6. webscout/Provider/Aitopia.py +75 -51
  7. webscout/Provider/AllenAI.py +64 -67
  8. webscout/Provider/ChatGPTClone.py +33 -34
  9. webscout/Provider/ChatSandbox.py +342 -0
  10. webscout/Provider/Cloudflare.py +79 -32
  11. webscout/Provider/Deepinfra.py +69 -56
  12. webscout/Provider/ElectronHub.py +48 -39
  13. webscout/Provider/ExaChat.py +36 -20
  14. webscout/Provider/GPTWeb.py +24 -18
  15. webscout/Provider/GithubChat.py +52 -49
  16. webscout/Provider/GizAI.py +285 -0
  17. webscout/Provider/Glider.py +39 -28
  18. webscout/Provider/Groq.py +48 -20
  19. webscout/Provider/HeckAI.py +18 -36
  20. webscout/Provider/Jadve.py +30 -37
  21. webscout/Provider/LambdaChat.py +36 -59
  22. webscout/Provider/MCPCore.py +18 -21
  23. webscout/Provider/Marcus.py +23 -14
  24. webscout/Provider/Nemotron.py +218 -0
  25. webscout/Provider/Netwrck.py +35 -26
  26. webscout/Provider/OPENAI/__init__.py +1 -1
  27. webscout/Provider/OPENAI/exachat.py +4 -0
  28. webscout/Provider/OPENAI/scirachat.py +3 -4
  29. webscout/Provider/OPENAI/textpollinations.py +20 -22
  30. webscout/Provider/OPENAI/toolbaz.py +1 -0
  31. webscout/Provider/PI.py +22 -13
  32. webscout/Provider/StandardInput.py +42 -30
  33. webscout/Provider/TeachAnything.py +24 -12
  34. webscout/Provider/TextPollinationsAI.py +78 -76
  35. webscout/Provider/TwoAI.py +120 -88
  36. webscout/Provider/TypliAI.py +305 -0
  37. webscout/Provider/Venice.py +24 -22
  38. webscout/Provider/VercelAI.py +31 -12
  39. webscout/Provider/WiseCat.py +1 -1
  40. webscout/Provider/WrDoChat.py +370 -0
  41. webscout/Provider/__init__.py +11 -13
  42. webscout/Provider/ai4chat.py +5 -3
  43. webscout/Provider/akashgpt.py +59 -66
  44. webscout/Provider/asksteve.py +53 -44
  45. webscout/Provider/cerebras.py +77 -31
  46. webscout/Provider/chatglm.py +47 -37
  47. webscout/Provider/elmo.py +38 -32
  48. webscout/Provider/freeaichat.py +57 -43
  49. webscout/Provider/granite.py +24 -21
  50. webscout/Provider/hermes.py +27 -20
  51. webscout/Provider/learnfastai.py +25 -20
  52. webscout/Provider/llmchatco.py +48 -78
  53. webscout/Provider/multichat.py +13 -3
  54. webscout/Provider/scira_chat.py +50 -30
  55. webscout/Provider/scnet.py +27 -21
  56. webscout/Provider/searchchat.py +16 -24
  57. webscout/Provider/sonus.py +37 -39
  58. webscout/Provider/toolbaz.py +24 -46
  59. webscout/Provider/turboseek.py +37 -41
  60. webscout/Provider/typefully.py +30 -22
  61. webscout/Provider/typegpt.py +47 -51
  62. webscout/Provider/uncovr.py +46 -40
  63. webscout/__init__.py +0 -1
  64. webscout/cli.py +256 -0
  65. webscout/conversation.py +305 -448
  66. webscout/exceptions.py +3 -0
  67. webscout/swiftcli/__init__.py +80 -794
  68. webscout/swiftcli/core/__init__.py +7 -0
  69. webscout/swiftcli/core/cli.py +297 -0
  70. webscout/swiftcli/core/context.py +104 -0
  71. webscout/swiftcli/core/group.py +241 -0
  72. webscout/swiftcli/decorators/__init__.py +28 -0
  73. webscout/swiftcli/decorators/command.py +221 -0
  74. webscout/swiftcli/decorators/options.py +220 -0
  75. webscout/swiftcli/decorators/output.py +252 -0
  76. webscout/swiftcli/exceptions.py +21 -0
  77. webscout/swiftcli/plugins/__init__.py +9 -0
  78. webscout/swiftcli/plugins/base.py +135 -0
  79. webscout/swiftcli/plugins/manager.py +262 -0
  80. webscout/swiftcli/utils/__init__.py +59 -0
  81. webscout/swiftcli/utils/formatting.py +252 -0
  82. webscout/swiftcli/utils/parsing.py +267 -0
  83. webscout/version.py +1 -1
  84. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/METADATA +166 -45
  85. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/RECORD +89 -89
  86. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/WHEEL +1 -1
  87. webscout-8.2.6.dist-info/entry_points.txt +3 -0
  88. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -1
  89. inferno/__init__.py +0 -6
  90. inferno/__main__.py +0 -9
  91. inferno/cli.py +0 -6
  92. inferno/lol.py +0 -589
  93. webscout/LLM.py +0 -442
  94. webscout/Local/__init__.py +0 -12
  95. webscout/Local/__main__.py +0 -9
  96. webscout/Local/api.py +0 -576
  97. webscout/Local/cli.py +0 -516
  98. webscout/Local/config.py +0 -75
  99. webscout/Local/llm.py +0 -287
  100. webscout/Local/model_manager.py +0 -253
  101. webscout/Local/server.py +0 -721
  102. webscout/Local/utils.py +0 -93
  103. webscout/Provider/Chatify.py +0 -175
  104. webscout/Provider/PizzaGPT.py +0 -228
  105. webscout/Provider/askmyai.py +0 -158
  106. webscout/Provider/gaurish.py +0 -244
  107. webscout/Provider/promptrefine.py +0 -193
  108. webscout/Provider/tutorai.py +0 -270
  109. webscout-8.2.4.dist-info/entry_points.txt +0 -5
  110. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
@@ -1,10 +1,10 @@
1
- from typing import Union, Any, Dict
1
+ from typing import Optional, Union, Any, Dict
2
2
  import re
3
3
  from uuid import uuid4
4
4
 
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts
7
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
10
  from webscout.litagent import LitAgent
@@ -114,6 +114,17 @@ class TypefullyAI(Provider):
114
114
  )
115
115
  self.conversation.history_offset = history_offset
116
116
 
117
+ @staticmethod
118
+ def _typefully_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
119
+ """Extracts content from the Typefully stream format '0:"..."'."""
120
+ if isinstance(chunk, str):
121
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
122
+ if match:
123
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
124
+ content = match.group(1).encode().decode('unicode_escape')
125
+ return content.replace('\\\\', '\\').replace('\\"', '"')
126
+ return None
127
+
117
128
  def ask(
118
129
  self,
119
130
  prompt: str,
@@ -174,23 +185,22 @@ class TypefullyAI(Provider):
174
185
  raise exceptions.FailedToGenerateResponseError(
175
186
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
176
187
  )
177
- streaming_response = ""
178
- # Iterate over bytes and decode manually
179
- for line_bytes in response.iter_lines():
180
- if line_bytes:
181
- line = line_bytes.decode('utf-8') # Decode bytes
182
- match = re.search(r'0:"(.*?)"', line)
183
- if match:
184
- # Decode potential unicode escapes
185
- content = match.group(1).encode().decode('unicode_escape')
186
- streaming_response += content
187
- # Yield dict or raw string
188
- yield content if raw else dict(text=content)
189
- elif line.startswith('e:') or line.startswith('d:'):
190
- # End of response
191
- break
188
+ streaming_text = ""
189
+ # Use sanitize_stream with the custom extractor
190
+ processed_stream = sanitize_stream(
191
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
192
+ intro_value=None, # No simple prefix
193
+ to_json=False, # Content is not JSON
194
+ content_extractor=self._typefully_extractor, # Use the specific extractor
195
+ end_marker="e:", # Stop processing if "e:" line is encountered (adjust if needed)
196
+ )
197
+
198
+ for content_chunk in processed_stream:
199
+ if content_chunk and isinstance(content_chunk, str):
200
+ streaming_text += content_chunk
201
+ yield content_chunk if raw else dict(text=content_chunk)
192
202
  # Update history and last response after stream finishes
193
- self.last_response.update(dict(text=streaming_response))
203
+ self.last_response.update(dict(text=streaming_text))
194
204
  self.conversation.update_chat_history(
195
205
  prompt, self.get_message(self.last_response)
196
206
  )
@@ -271,13 +281,12 @@ class TypefullyAI(Provider):
271
281
  """
272
282
  assert isinstance(response, dict), "Response should be of dict data-type only"
273
283
  # Handle potential unicode escapes in the final text
284
+ # Formatting is now handled by the extractor
274
285
  text = response.get("text", "")
275
286
  try:
276
- # Attempt to decode escapes, return original if fails
277
- # Already decoded in ask method, just handle formatting
278
287
  formatted_text = text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
279
288
  return formatted_text
280
- except Exception: # Catch potential errors during formatting
289
+ except Exception: # Catch potential errors during newline replacement
281
290
  return text # Return original text if formatting fails
282
291
 
283
292
 
@@ -319,4 +328,3 @@ if __name__ == "__main__":
319
328
 
320
329
  except Exception as e:
321
330
  print(f"\r{model:<50} {'✗':<10} {str(e)}")
322
-
@@ -5,7 +5,7 @@ from typing import Union, Any, Dict, Generator
5
5
 
6
6
  from webscout.AIutel import Optimizers
7
7
  from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
11
11
  from webscout.litagent import LitAgent
@@ -137,37 +137,30 @@ class TypeGPT(Provider):
137
137
  f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
138
138
  ) from ce
139
139
 
140
- if not response.ok:
141
- raise exceptions.FailedToGenerateResponseError(
142
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
143
- )
144
- message_load = ""
145
- # Iterate over bytes and decode manually
146
- for line_bytes in response.iter_lines():
147
- if line_bytes:
148
- line = line_bytes.decode("utf-8")
149
- if line.startswith("data: "):
150
- line = line[6:] # Remove "data: " prefix
151
- # Skip [DONE] message
152
- if line.strip() == "[DONE]":
153
- break
154
- try:
155
- data = json.loads(line)
156
- # Extract and yield only new content
157
- if 'choices' in data and len(data['choices']) > 0:
158
- delta = data['choices'][0].get('delta', {})
159
- if 'content' in delta:
160
- new_content = delta['content']
161
- message_load += new_content
162
- # Yield only the new content
163
- yield dict(text=new_content) if not raw else new_content
164
- # Update last_response incrementally for potential non-stream use later
165
- self.last_response = dict(text=message_load)
166
- except json.JSONDecodeError:
167
- continue
140
+ response.raise_for_status() # Check for HTTP errors first
141
+
142
+ streaming_text = ""
143
+ # Use sanitize_stream
144
+ processed_stream = sanitize_stream(
145
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
146
+ intro_value="data:",
147
+ to_json=True, # Stream sends JSON
148
+ skip_markers=["[DONE]"],
149
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
150
+ yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
151
+ )
152
+
153
+ for content_chunk in processed_stream:
154
+ # content_chunk is the string extracted by the content_extractor
155
+ if content_chunk and isinstance(content_chunk, str):
156
+ streaming_text += content_chunk
157
+ yield dict(text=content_chunk) if not raw else content_chunk
158
+ # Update last_response incrementally
159
+ self.last_response = dict(text=streaming_text)
160
+
168
161
  # Update conversation history after stream finishes
169
- if message_load: # Only update if something was received
170
- self.conversation.update_chat_history(prompt, message_load)
162
+ if streaming_text: # Only update if something was received
163
+ self.conversation.update_chat_history(prompt, streaming_text)
171
164
 
172
165
 
173
166
  def for_non_stream():
@@ -185,26 +178,30 @@ class TypeGPT(Provider):
185
178
  f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
186
179
  ) from ce
187
180
 
188
- if not response.ok:
189
- raise exceptions.FailedToGenerateResponseError(
190
- f"Request failed - {response.status_code}: {response.text}"
191
- )
192
-
181
+ response.raise_for_status() # Check for HTTP errors
182
+
193
183
  try:
194
- # curl_cffi response.json() handles decoding
195
- response_data = response.json()
196
- # Extract the message content for history and return value
197
- if 'choices' in response_data and len(response_data['choices']) > 0:
198
- message = response_data['choices'][0].get('message', {})
199
- content = message.get('content', '')
200
- self.last_response = {"text": content} # Store in expected format
201
- self.conversation.update_chat_history(prompt, content)
202
- return self.last_response
203
- else:
204
- # Handle cases where response structure is unexpected
205
- self.last_response = {"text": ""}
206
- return self.last_response
207
- except json.JSONDecodeError as je:
184
+ response_text = response.text # Get raw text
185
+
186
+ # Use sanitize_stream for non-streaming JSON response
187
+ processed_stream = sanitize_stream(
188
+ data=response_text,
189
+ to_json=True, # Parse the whole text as JSON
190
+ intro_value=None,
191
+ # Extractor for non-stream structure
192
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('message', {}).get('content') if isinstance(chunk, dict) else None,
193
+ yield_raw_on_error=False
194
+ )
195
+
196
+ # Extract the single result
197
+ content = ""
198
+ for extracted_content in processed_stream:
199
+ content = extracted_content if isinstance(extracted_content, str) else ""
200
+
201
+ self.last_response = {"text": content} # Store in expected format
202
+ self.conversation.update_chat_history(prompt, content)
203
+ return self.last_response
204
+ except (json.JSONDecodeError, Exception) as je: # Catch potential JSON errors or others
208
205
  raise exceptions.FailedToGenerateResponseError(f"Failed to decode JSON response: {je} - Response text: {response.text}")
209
206
 
210
207
 
@@ -290,4 +287,3 @@ if __name__ == "__main__":
290
287
 
291
288
  except Exception as e:
292
289
  print(f"\r{model:<50} {'✗':<10} {str(e)}")
293
-
@@ -5,7 +5,7 @@ import uuid
5
5
  import re
6
6
  from typing import Any, Dict, Optional, Generator, Union
7
7
  from webscout.AIutel import Optimizers
8
- from webscout.AIutel import Conversation
8
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
9
9
  from webscout.AIutel import AwesomePrompts
10
10
  from webscout.AIbase import Provider
11
11
  from webscout import exceptions
@@ -109,6 +109,17 @@ class UncovrAI(Provider):
109
109
  )
110
110
  self.conversation.history_offset = history_offset
111
111
 
112
+ @staticmethod
113
+ def _uncovr_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
114
+ """Extracts content from the UncovrAI stream format '0:"..."'."""
115
+ if isinstance(chunk, str):
116
+ match = re.match(r'^0:\s*"?(.*?)"?$', chunk) # Match 0: maybe optional quotes
117
+ if match:
118
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
119
+ content = match.group(1).encode().decode('unicode_escape')
120
+ return content.replace('\\\\', '\\').replace('\\"', '"')
121
+ return None
122
+
112
123
  def refresh_identity(self, browser: str = None):
113
124
  """
114
125
  Refreshes the browser identity fingerprint.
@@ -202,27 +213,21 @@ class UncovrAI(Provider):
202
213
  raise exceptions.FailedToGenerateResponseError(
203
214
  f"Request failed with status code {response.status_code} - {response.text}"
204
215
  )
205
-
216
+
206
217
  streaming_text = ""
207
- # Iterate over bytes and decode manually
208
- for line_bytes in response.iter_lines():
209
- if line_bytes:
210
- try:
211
- line = line_bytes.decode('utf-8')
212
- # Use regex to match content messages
213
- content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
214
- if content_match: # Content message
215
- content = content_match.group(1).encode().decode('unicode_escape') # Decode escapes
216
- streaming_text += content
217
- resp = dict(text=content)
218
- yield resp if raw else resp
219
- # Check for error messages
220
- error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
221
- if error_match:
222
- error_msg = error_match.group(1)
223
- raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
224
- except (json.JSONDecodeError, UnicodeDecodeError):
225
- continue
218
+ # Use sanitize_stream with the custom extractor
219
+ processed_stream = sanitize_stream(
220
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
221
+ intro_value=None, # No simple prefix
222
+ to_json=False, # Content is not JSON
223
+ content_extractor=self._uncovr_extractor, # Use the specific extractor
224
+ yield_raw_on_error=True # Keep yielding even if extractor fails, for potential error messages? (Adjust if needed)
225
+ )
226
+
227
+ for content_chunk in processed_stream:
228
+ if content_chunk and isinstance(content_chunk, str):
229
+ streaming_text += content_chunk
230
+ yield dict(text=content_chunk) if not raw else content_chunk
226
231
 
227
232
  self.last_response = {"text": streaming_text}
228
233
  self.conversation.update_chat_history(prompt, streaming_text)
@@ -262,25 +267,25 @@ class UncovrAI(Provider):
262
267
  f"Request failed with status code {response.status_code} - {response.text}"
263
268
  )
264
269
 
265
- # Process the non-streamed response content (assuming it's similar line format)
270
+ response_text = response.text # Get the full response text
271
+
272
+ # Use sanitize_stream to process the non-streaming text
273
+ # It won't parse as JSON, but will apply the extractor line by line
274
+ processed_stream = sanitize_stream(
275
+ data=response_text.splitlines(), # Split into lines first
276
+ intro_value=None,
277
+ to_json=False,
278
+ content_extractor=self._uncovr_extractor,
279
+ yield_raw_on_error=True
280
+ )
281
+
282
+ # Aggregate the results from the generator
266
283
  full_response = ""
267
- # Use response.text which should contain the full body for non-streamed curl_cffi requests
268
- for line in response.text.splitlines():
269
- if line:
270
- try:
271
- # line is already decoded string
272
- content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
273
- if content_match:
274
- content = content_match.group(1).encode().decode('unicode_escape') # Decode escapes
275
- full_response += content
276
- # Check for error messages
277
- error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
278
- if error_match:
279
- error_msg = error_match.group(1)
280
- raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
281
- except (json.JSONDecodeError): # UnicodeDecodeError less likely here
282
- continue
284
+ for content in processed_stream:
285
+ if content and isinstance(content, str):
286
+ full_response += content
283
287
 
288
+ # Check if aggregation resulted in empty response (might indicate error not caught by extractor)
284
289
  self.last_response = {"text": full_response}
285
290
  self.conversation.update_chat_history(prompt, full_response)
286
291
  return {"text": full_response}
@@ -323,7 +328,9 @@ class UncovrAI(Provider):
323
328
 
324
329
  def get_message(self, response: dict) -> str:
325
330
  assert isinstance(response, dict), "Response should be of dict data-type only"
326
- return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
331
+ # Formatting handled by extractor
332
+ text = response.get("text", "")
333
+ return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement
327
334
 
328
335
  if __name__ == "__main__":
329
336
  # Ensure curl_cffi is installed
@@ -359,4 +366,3 @@ if __name__ == "__main__":
359
366
  print(f"\r{model:<50} {status:<10} {display_text}")
360
367
  except Exception as e:
361
368
  print(f"\r{model:<50} {'✗':<10} {str(e)}")
362
-
webscout/__init__.py CHANGED
@@ -5,7 +5,6 @@ from .webscout_search_async import AsyncWEBS
5
5
  from .version import __version__
6
6
  from .DWEBS import *
7
7
  from .tempid import *
8
- from .LLM import VLM, LLM
9
8
  from .Provider import *
10
9
  from .Provider.TTI import *
11
10
  from .Provider.TTS import *
webscout/cli.py CHANGED
@@ -1,6 +1,8 @@
1
1
  import sys
2
2
  from .swiftcli import CLI, option
3
3
  from .webscout_search import WEBS
4
+ from .DWEBS import GoogleSearch # Import GoogleSearch from DWEBS
5
+ from .yep_search import YepSearch # Import YepSearch from yep_search
4
6
  from .version import __version__
5
7
 
6
8
 
@@ -282,6 +284,260 @@ def weather(location: str, language: str, proxy: str = None, timeout: int = 10):
282
284
  except Exception as e:
283
285
  raise e
284
286
 
287
+ @app.command()
288
+ @option("--keywords", "-k", help="Search keywords", required=True)
289
+ @option("--region", "-r", help="Region for search results (ISO country code)", default="all")
290
+ @option("--safesearch", "-s", help="SafeSearch setting (on, moderate, off)", default="moderate")
291
+ @option("--max-results", "-m", help="Maximum number of results", type=int, default=10)
292
+ @option("--start-num", "-start", help="Starting position for pagination", type=int, default=0)
293
+ @option("--unique", "-u", help="Filter duplicate results", type=bool, default=True)
294
+ @option("--timeout", "-timeout", help="Timeout value for requests", type=int, default=10)
295
+ @option("--proxy", "-p", help="Proxy URL to use for requests")
296
+ @option("--impersonate", "-i", help="Browser to impersonate", default="chrome110")
297
+ def google_text(
298
+ keywords: str,
299
+ region: str,
300
+ safesearch: str,
301
+ max_results: int,
302
+ start_num: int,
303
+ unique: bool,
304
+ timeout: int = 10,
305
+ proxy: str = None,
306
+ impersonate: str = "chrome110"
307
+ ):
308
+ """Perform a text search using Google Search."""
309
+ google = GoogleSearch(
310
+ timeout=timeout,
311
+ proxies={"https": proxy, "http": proxy} if proxy else None,
312
+ verify=True,
313
+ lang="en",
314
+ sleep_interval=0.0,
315
+ impersonate=impersonate
316
+ )
317
+
318
+ try:
319
+ results = google.text(
320
+ keywords=keywords,
321
+ region=region,
322
+ safesearch=safesearch,
323
+ max_results=max_results,
324
+ start_num=start_num,
325
+ unique=unique
326
+ )
327
+
328
+ # Convert SearchResult objects to dictionaries for printing
329
+ formatted_results = []
330
+ for result in results:
331
+ result_dict = {
332
+ "title": result.title,
333
+ "url": result.url,
334
+ "description": result.description,
335
+ }
336
+ # Add any metadata to the result dictionary
337
+ for k, v in result.metadata.items():
338
+ result_dict[k] = v
339
+
340
+ formatted_results.append(result_dict)
341
+
342
+ _print_data(formatted_results)
343
+ except Exception as e:
344
+ raise e
345
+
346
+ @app.command()
347
+ @option("--keywords", "-k", help="Search keywords", required=True)
348
+ @option("--region", "-r", help="Region for search results (ISO country code)", default="all")
349
+ @option("--safesearch", "-s", help="SafeSearch setting (on, moderate, off)", default="moderate")
350
+ @option("--max-results", "-m", help="Maximum number of results", type=int, default=10)
351
+ @option("--timeout", "-timeout", help="Timeout value for requests", type=int, default=10)
352
+ @option("--proxy", "-p", help="Proxy URL to use for requests")
353
+ @option("--impersonate", "-i", help="Browser to impersonate", default="chrome110")
354
+ def google_news(
355
+ keywords: str,
356
+ region: str,
357
+ safesearch: str,
358
+ max_results: int,
359
+ timeout: int = 10,
360
+ proxy: str = None,
361
+ impersonate: str = "chrome110"
362
+ ):
363
+ """Perform a news search using Google Search."""
364
+ google = GoogleSearch(
365
+ timeout=timeout,
366
+ proxies={"https": proxy, "http": proxy} if proxy else None,
367
+ verify=True,
368
+ lang="en",
369
+ sleep_interval=0.0,
370
+ impersonate=impersonate
371
+ )
372
+
373
+ try:
374
+ results = google.news(
375
+ keywords=keywords,
376
+ region=region,
377
+ safesearch=safesearch,
378
+ max_results=max_results
379
+ )
380
+
381
+ # Convert SearchResult objects to dictionaries for printing
382
+ formatted_results = []
383
+ for result in results:
384
+ result_dict = {
385
+ "title": result.title,
386
+ "url": result.url,
387
+ "description": result.description,
388
+ }
389
+ # Add any metadata to the result dictionary
390
+ for k, v in result.metadata.items():
391
+ result_dict[k] = v
392
+
393
+ formatted_results.append(result_dict)
394
+
395
+ _print_data(formatted_results)
396
+ except Exception as e:
397
+ raise e
398
+
399
+ @app.command()
400
+ @option("--query", "-q", help="Search query", required=True)
401
+ @option("--region", "-r", help="Region for suggestions (ISO country code)", default="all")
402
+ @option("--timeout", "-timeout", help="Timeout value for requests", type=int, default=10)
403
+ @option("--proxy", "-p", help="Proxy URL to use for requests")
404
+ @option("--impersonate", "-i", help="Browser to impersonate", default="chrome110")
405
+ def google_suggestions(
406
+ query: str,
407
+ region: str,
408
+ timeout: int = 10,
409
+ proxy: str = None,
410
+ impersonate: str = "chrome110"
411
+ ):
412
+ """Get search suggestions from Google Search."""
413
+ google = GoogleSearch(
414
+ timeout=timeout,
415
+ proxies={"https": proxy, "http": proxy} if proxy else None,
416
+ verify=True,
417
+ lang="en",
418
+ sleep_interval=0.0,
419
+ impersonate=impersonate
420
+ )
421
+
422
+ try:
423
+ results = google.suggestions(query=query, region=region)
424
+
425
+ # Format suggestions for printing
426
+ formatted_results = []
427
+ for i, suggestion in enumerate(results, 1):
428
+ formatted_results.append({"position": i, "suggestion": suggestion})
429
+
430
+ _print_data(formatted_results)
431
+ except Exception as e:
432
+ raise e
433
+
434
+ @app.command()
435
+ @option("--keywords", "-k", help="Search keywords", required=True)
436
+ @option("--region", "-r", help="Region for search results", default="all")
437
+ @option("--safesearch", "-s", help="SafeSearch setting (on, moderate, off)", default="moderate")
438
+ @option("--max-results", "-m", help="Maximum number of results", type=int, default=10)
439
+ @option("--timeout", "-timeout", help="Timeout value for requests", type=int, default=20)
440
+ @option("--proxy", "-p", help="Proxy URL to use for requests")
441
+ @option("--impersonate", "-i", help="Browser to impersonate", default="chrome110")
442
+ def yep_text(
443
+ keywords: str,
444
+ region: str,
445
+ safesearch: str,
446
+ max_results: int,
447
+ timeout: int = 20,
448
+ proxy: str = None,
449
+ impersonate: str = "chrome110"
450
+ ):
451
+ """Perform a text search using Yep Search."""
452
+ yep = YepSearch(
453
+ timeout=timeout,
454
+ proxies={"https": proxy, "http": proxy} if proxy else None,
455
+ verify=True,
456
+ impersonate=impersonate
457
+ )
458
+
459
+ try:
460
+ results = yep.text(
461
+ keywords=keywords,
462
+ region=region,
463
+ safesearch=safesearch,
464
+ max_results=max_results
465
+ )
466
+
467
+ _print_data(results)
468
+ except Exception as e:
469
+ raise e
470
+
471
+ @app.command()
472
+ @option("--keywords", "-k", help="Search keywords", required=True)
473
+ @option("--region", "-r", help="Region for search results", default="all")
474
+ @option("--safesearch", "-s", help="SafeSearch setting (on, moderate, off)", default="moderate")
475
+ @option("--max-results", "-m", help="Maximum number of results", type=int, default=10)
476
+ @option("--timeout", "-timeout", help="Timeout value for requests", type=int, default=20)
477
+ @option("--proxy", "-p", help="Proxy URL to use for requests")
478
+ @option("--impersonate", "-i", help="Browser to impersonate", default="chrome110")
479
+ def yep_images(
480
+ keywords: str,
481
+ region: str,
482
+ safesearch: str,
483
+ max_results: int,
484
+ timeout: int = 20,
485
+ proxy: str = None,
486
+ impersonate: str = "chrome110"
487
+ ):
488
+ """Perform an image search using Yep Search."""
489
+ yep = YepSearch(
490
+ timeout=timeout,
491
+ proxies={"https": proxy, "http": proxy} if proxy else None,
492
+ verify=True,
493
+ impersonate=impersonate
494
+ )
495
+
496
+ try:
497
+ results = yep.images(
498
+ keywords=keywords,
499
+ region=region,
500
+ safesearch=safesearch,
501
+ max_results=max_results
502
+ )
503
+
504
+ _print_data(results)
505
+ except Exception as e:
506
+ raise e
507
+
508
+ @app.command()
509
+ @option("--query", "-q", help="Search query", required=True)
510
+ @option("--region", "-r", help="Region for suggestions", default="all")
511
+ @option("--timeout", "-timeout", help="Timeout value for requests", type=int, default=20)
512
+ @option("--proxy", "-p", help="Proxy URL to use for requests")
513
+ @option("--impersonate", "-i", help="Browser to impersonate", default="chrome110")
514
+ def yep_suggestions(
515
+ query: str,
516
+ region: str,
517
+ timeout: int = 20,
518
+ proxy: str = None,
519
+ impersonate: str = "chrome110"
520
+ ):
521
+ """Get search suggestions from Yep Search."""
522
+ yep = YepSearch(
523
+ timeout=timeout,
524
+ proxies={"https": proxy, "http": proxy} if proxy else None,
525
+ verify=True,
526
+ impersonate=impersonate
527
+ )
528
+
529
+ try:
530
+ results = yep.suggestions(query=query, region=region)
531
+
532
+ # Format suggestions for printing
533
+ formatted_results = []
534
+ for i, suggestion in enumerate(results, 1):
535
+ formatted_results.append({"position": i, "suggestion": suggestion})
536
+
537
+ _print_data(formatted_results)
538
+ except Exception as e:
539
+ raise e
540
+
285
541
  def main():
286
542
  """Main entry point for the CLI."""
287
543
  try: