webscout 4.8__py3-none-any.whl → 4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -0,0 +1,202 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from webscout.AIutel import Optimizers
23
+ from webscout.AIutel import Conversation
24
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
25
+ from webscout.AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+
31
+ class XDASH(Provider):
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 600,
36
+ timeout: int = 30,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ ):
44
+ """Instantiates XDASH
45
+
46
+ Args:
47
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
48
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
49
+ timeout (int, optional): Http request timeout. Defaults to 30.
50
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
51
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
52
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
53
+ proxies (dict, optional): Http request proxies. Defaults to {}.
54
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
55
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
56
+ """
57
+ self.session = requests.Session()
58
+ self.is_conversation = is_conversation
59
+ self.max_tokens_to_sample = max_tokens
60
+ self.chat_endpoint = "https://www.xdash.ai/api/query"
61
+ self.stream_chunk_size = 64
62
+ self.timeout = timeout
63
+ self.last_response = {}
64
+ self.headers = {
65
+ "accept": "*/*",
66
+ "accept-encoding": "gzip, deflate, br, zstd",
67
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
68
+ "content-type": "application/json",
69
+ "cookie": "cf_clearance=73aup_8JU0LU.tRr7D4qd4Kt7gapKFi3RVW8jLzQoP0-1723549451-1.0.1.1-HTRrjMvM5GRLsfCTB0v3N_UxQzQMfA1fvOSf0dsZJ73HR6.IUTH8BH.G1dpx3s_IxVHCBCHMXOCt0K7vyIwMgw",
70
+ "dnt": "1",
71
+ "origin": "https://www.xdash.ai",
72
+ "priority": "u=1, i",
73
+ "referer": "https://www.xdash.ai/search",
74
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
75
+ "sec-ch-ua-mobile": "?0",
76
+ "sec-ch-ua-platform": '"Windows"',
77
+ "sec-fetch-dest": "empty",
78
+ "sec-fetch-mode": "cors",
79
+ "sec-fetch-site": "same-origin",
80
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
81
+ }
82
+
83
+ self.__available_optimizers = (
84
+ method
85
+ for method in dir(Optimizers)
86
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
87
+ )
88
+ self.session.headers.update(self.headers)
89
+ Conversation.intro = (
90
+ AwesomePrompts().get_act(
91
+ act, raise_not_found=True, default=None, case_insensitive=True
92
+ )
93
+ if act
94
+ else intro or Conversation.intro
95
+ )
96
+ self.conversation = Conversation(
97
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
98
+ )
99
+ self.conversation.history_offset = history_offset
100
+ self.session.proxies = proxies
101
+
102
+ def ask(
103
+ self,
104
+ prompt: str,
105
+ stream: bool = False,
106
+ raw: bool = False,
107
+ optimizer: str = None,
108
+ conversationally: bool = False,
109
+ ) -> dict:
110
+ """Chat with AI
111
+
112
+ Args:
113
+ prompt (str): Prompt to be send.
114
+ stream (bool, optional): Flag for streaming response. Defaults to False.
115
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
116
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
117
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
118
+ Returns:
119
+ dict : {}
120
+ ```json
121
+ {
122
+ "text" : "How may I assist you today?"
123
+ }
124
+ ```
125
+ """
126
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
127
+ if optimizer:
128
+ if optimizer in self.__available_optimizers:
129
+ conversation_prompt = getattr(Optimizers, optimizer)(
130
+ conversation_prompt if conversationally else prompt
131
+ )
132
+ else:
133
+ raise Exception(
134
+ f"Optimizer is not one of {self.__available_optimizers}"
135
+ )
136
+
137
+ self.session.headers.update(self.headers)
138
+ payload = {
139
+ "query": conversation_prompt,
140
+ "search_uuid": uuid.uuid4().hex,
141
+ "visitor_uuid": uuid.uuid4().hex,
142
+ "token": uuid.uuid4().hex
143
+ }
144
+
145
+ response = self.session.post(
146
+ self.chat_endpoint, json=payload, timeout=self.timeout
147
+ )
148
+ if not response.ok:
149
+ raise exceptions.FailedToGenerateResponseError(
150
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
151
+ )
152
+
153
+ # Extract the LLM response
154
+ llm_response = response.text.split("__LLM_RESPONSE__")[1].split("__RELATED_QUESTIONS__")[0].strip()
155
+ resp = dict(text=llm_response)
156
+ self.last_response.update(resp)
157
+ self.conversation.update_chat_history(
158
+ prompt, self.get_message(self.last_response)
159
+ )
160
+ return self.last_response
161
+
162
+ def chat(
163
+ self,
164
+ prompt: str,
165
+ stream: bool = False,
166
+ optimizer: str = None,
167
+ conversationally: bool = False,
168
+ ) -> str:
169
+ """Generate response `str`
170
+ Args:
171
+ prompt (str): Prompt to be send.
172
+ stream (bool, optional): Flag for streaming response. Defaults to False.
173
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
174
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
175
+ Returns:
176
+ str: Response generated
177
+ """
178
+
179
+ return self.get_message(
180
+ self.ask(
181
+ prompt,
182
+ optimizer=optimizer,
183
+ conversationally=conversationally,
184
+ )
185
+ )
186
+
187
+ def get_message(self, response: dict) -> str:
188
+ """Retrieves message only from response
189
+
190
+ Args:
191
+ response (dict): Response generated by `self.ask`
192
+
193
+ Returns:
194
+ str: Message extracted
195
+ """
196
+ assert isinstance(response, dict), "Response should be of dict data-type only"
197
+ return response["text"]
198
+ if __name__ == '__main__':
199
+ from rich import print
200
+ ai = XDASH()
201
+ response = ai.chat(input(">>> "))
202
+ print(response)
@@ -0,0 +1,258 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from webscout.AIutel import Optimizers
23
+ from webscout.AIutel import Conversation
24
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
25
+ from webscout.AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+ import cloudscraper
31
+
32
+ class YEPCHAT(Provider):
33
+ """
34
+ This class provides methods for interacting with the Yep.com chat API in a consistent provider structure.
35
+ """
36
+
37
+ AVAILABLE_MODELS = [
38
+ "Mixtral-8x7B-Instruct-v0.1" # Add other available models here
39
+ ]
40
+
41
+ def __init__(
42
+ self,
43
+ is_conversation: bool = True,
44
+ max_tokens: int = 1280,
45
+ timeout: int = 30,
46
+ intro: str = None,
47
+ filepath: str = None,
48
+ update_file: bool = True,
49
+ proxies: dict = {},
50
+ history_offset: int = 10250,
51
+ act: str = None,
52
+ model: str = "Mixtral-8x7B-Instruct-v0.1",
53
+ temperature: float = 0.6,
54
+ top_p: float = 0.7,
55
+ ):
56
+ """Instantiates YEPCHAT
57
+
58
+ Args:
59
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
60
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 1280.
61
+ timeout (int, optional): Http request timeout. Defaults to 30.
62
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
63
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
64
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
65
+ proxies (dict, optional): Http request proxies. Defaults to {}.
66
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
67
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
68
+ model (str, optional): Model to use for generating text. Defaults to "Mixtral-8x7B-Instruct-v0.1".
69
+ temperature (float, optional): Temperature parameter for the model. Defaults to 0.6.
70
+ top_p (float, optional): Top_p parameter for the model. Defaults to 0.7.
71
+ """
72
+
73
+ if model not in self.AVAILABLE_MODELS:
74
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
75
+
76
+ self.session = cloudscraper.create_scraper() # Create a Cloudscraper session
77
+ self.is_conversation = is_conversation
78
+ self.max_tokens_to_sample = max_tokens
79
+ self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
80
+ self.stream_chunk_size = 64
81
+ self.timeout = timeout
82
+ self.last_response = {}
83
+ self.model = model
84
+ self.temperature = temperature
85
+ self.top_p = top_p
86
+ self.headers = {
87
+ 'Accept': '*/*',
88
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
89
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
90
+ 'Content-Type': 'application/json; charset=utf-8',
91
+ 'DNT': '1',
92
+ 'Origin': 'https://yep.com',
93
+ 'Referer': 'https://yep.com/',
94
+ 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
95
+ 'Sec-CH-UA-Mobile': '?0',
96
+ 'Sec-CH-UA-Platform': '"Windows"',
97
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0'
98
+ }
99
+ self.cookies = {
100
+ '__Host-session': uuid4().hex,
101
+ }
102
+
103
+ self.__available_optimizers = (
104
+ method
105
+ for method in dir(Optimizers)
106
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
107
+ )
108
+ Conversation.intro = (
109
+ AwesomePrompts().get_act(
110
+ act, raise_not_found=True, default=None, case_insensitive=True
111
+ )
112
+ if act
113
+ else intro or Conversation.intro
114
+ )
115
+ self.conversation = Conversation(
116
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
117
+ )
118
+ self.conversation.history_offset = history_offset
119
+ self.session.proxies = proxies
120
+
121
+ def ask(
122
+ self,
123
+ prompt: str,
124
+ stream: bool = False,
125
+ raw: bool = False,
126
+ optimizer: str = None,
127
+ conversationally: bool = False,
128
+ ) -> dict:
129
+ """Chat with AI
130
+
131
+ Args:
132
+ prompt (str): Prompt to be send.
133
+ stream (bool, optional): Whether to stream the response. Defaults to False.
134
+ raw (bool, optional): Whether to return the raw response. Defaults to False.
135
+ optimizer (str, optional): The name of the optimizer to use. Defaults to None.
136
+ conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
137
+
138
+ Returns:
139
+ The response from the API.
140
+ """
141
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
142
+ if optimizer:
143
+ if optimizer in self.__available_optimizers:
144
+ conversation_prompt = getattr(Optimizers, optimizer)(
145
+ conversation_prompt if conversationally else prompt
146
+ )
147
+ else:
148
+ raise Exception(
149
+ f"Optimizer is not one of {self.__available_optimizers}"
150
+ )
151
+
152
+ data = {
153
+ "stream": stream,
154
+ "max_tokens": self.max_tokens_to_sample,
155
+ "top_p": self.top_p,
156
+ "temperature": self.temperature,
157
+ "messages": [
158
+ {"content": conversation_prompt, "role": "user"}
159
+ ],
160
+ "model": self.model
161
+ }
162
+
163
+ def for_stream():
164
+ response = self.session.post(
165
+ self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout
166
+ )
167
+ if not response.ok:
168
+ raise exceptions.FailedToGenerateResponseError(
169
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
170
+ )
171
+ streaming_response = ""
172
+ for line in response.iter_lines(decode_unicode=True):
173
+ if line:
174
+ if line.startswith("data: "):
175
+ json_data = line[6:]
176
+ if json_data == "[DONE]":
177
+ break
178
+ try:
179
+ data = json.loads(json_data)
180
+ content = data["choices"][0]["delta"].get("content", "")
181
+ streaming_response += content
182
+ yield content if raw else dict(text=streaming_response)
183
+ except json.decoder.JSONDecodeError:
184
+ continue
185
+ self.last_response.update(dict(text=streaming_response))
186
+ self.conversation.update_chat_history(
187
+ prompt, self.get_message(self.last_response)
188
+ )
189
+
190
+ def for_non_stream():
191
+ response = self.session.post(
192
+ self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout
193
+ )
194
+ if not response.ok:
195
+ raise exceptions.FailedToGenerateResponseError(
196
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
197
+ )
198
+ resp = response.json()
199
+ self.last_response.update(dict(text=resp['choices'][0]['message']['content']))
200
+ self.conversation.update_chat_history(
201
+ prompt, self.get_message(self.last_response)
202
+ )
203
+ return self.last_response
204
+
205
+ return for_stream() if stream else for_non_stream()
206
+
207
+ def chat(
208
+ self,
209
+ prompt: str,
210
+ stream: bool = False,
211
+ optimizer: str = None,
212
+ conversationally: bool = False,
213
+ ) -> str:
214
+ """Generate response `str`
215
+ Args:
216
+ prompt (str): Prompt to be send.
217
+ stream (bool, optional): Flag for streaming response. Defaults to False.
218
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
219
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
220
+ Returns:
221
+ str: Response generated
222
+ """
223
+
224
+ def for_stream():
225
+ for response in self.ask(
226
+ prompt, True, optimizer=optimizer, conversationally=conversationally
227
+ ):
228
+ yield self.get_message(response)
229
+
230
+ def for_non_stream():
231
+ return self.get_message(
232
+ self.ask(
233
+ prompt,
234
+ False,
235
+ optimizer=optimizer,
236
+ conversationally=conversationally,
237
+ )
238
+ )
239
+
240
+ return for_stream() if stream else for_non_stream()
241
+
242
+ def get_message(self, response: dict) -> str:
243
+ """Retrieves message only from response
244
+
245
+ Args:
246
+ response (dict): Response generated by `self.ask`
247
+
248
+ Returns:
249
+ str: Message extracted
250
+ """
251
+ assert isinstance(response, dict), "Response should be of dict data-type only"
252
+ return response["text"]
253
+ if __name__ == '__main__':
254
+ from rich import print
255
+ ai = YEPCHAT()
256
+ response = ai.chat("tell me about india")
257
+ for chunk in response:
258
+ print(chunk, end="", flush=True)
webscout/__init__.py CHANGED
@@ -7,7 +7,7 @@ from .voice import play_audio
7
7
  from .websx_search import WEBSX
8
8
  from .LLM import VLM, LLM
9
9
  from .YTdownloader import *
10
-
10
+ from .Bing_search import *
11
11
  import g4f
12
12
  from .YTdownloader import *
13
13
  from .Provider import *
@@ -55,64 +55,6 @@ gpt4free_providers = [
55
55
 
56
56
  available_providers = webai + gpt4free_providers
57
57
 
58
- # Add all the provider classes, Localai models, Thread, and Model to __all__
59
- __all__ = [
60
- "WEBS",
61
- "AsyncWEBS",
62
- "__version__",
63
- "DWEBS",
64
- "transcriber",
65
- "play_audio",
66
- "TempMailClient",
67
- "TemporaryPhoneNumber",
68
- "LLM",
69
- "YTdownloader",
70
- "WEBSX",
71
- "VLM",
72
- # Localai models and utilities
73
- # "Model",
74
- # "Thread",
75
- # "formats",
76
-
77
- # AI Providers
78
- 'ThinkAnyAI',
79
- 'Xjai',
80
- 'LLAMA2',
81
- 'AsyncLLAMA2',
82
- 'Cohere',
83
- 'REKA',
84
- 'GROQ',
85
- 'AsyncGROQ',
86
- 'OPENAI',
87
- 'AsyncOPENAI',
88
- 'LEO',
89
- 'AsyncLEO',
90
- 'KOBOLDAI',
91
- 'AsyncKOBOLDAI',
92
- 'OPENGPT',
93
- 'AsyncOPENGPT',
94
- 'PERPLEXITY',
95
- 'BLACKBOXAI',
96
- 'AsyncBLACKBOXAI',
97
- 'PhindSearch',
98
- 'AsyncPhindSearch',
99
- 'YEPCHAT',
100
- 'AsyncYEPCHAT',
101
- 'YouChat',
102
- 'GEMINI',
103
- 'Berlin4h',
104
- 'ChatGPTUK',
105
- 'POE',
106
- 'BasedGPT',
107
- 'DeepSeek',
108
- 'DeepInfra',
109
- 'VLM',
110
- 'AsyncDeepInfra',
111
- 'VTLchat',
112
- 'AsyncPhindv2',
113
- 'Phindv2',
114
- 'OPENGPTv2',
115
- ]
116
58
 
117
59
  import logging
118
60
  logging.getLogger("webscout").addHandler(logging.NullHandler())
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.8
3
+ Version: 4.9
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -64,6 +64,8 @@ Requires-Dist: yaspin
64
64
  Requires-Dist: pillow
65
65
  Requires-Dist: requests-html
66
66
  Requires-Dist: bson
67
+ Requires-Dist: cloudscraper
68
+ Requires-Dist: emoji
67
69
  Provides-Extra: dev
68
70
  Requires-Dist: ruff >=0.1.6 ; extra == 'dev'
69
71
  Requires-Dist: pytest >=7.4.2 ; extra == 'dev'
@@ -208,7 +210,7 @@ python -m webscout --help
208
210
 
209
211
 
210
212
  [Go To TOP](#TOP)
211
- ## YTdownloader -webscout can now download yt videos
213
+ ## YTdownloader
212
214
 
213
215
  ```python
214
216
  from os import rename, getcwd
@@ -232,7 +234,7 @@ if __name__ == "__main__":
232
234
  download_video("https://www.youtube.com/watch?v=c0tMvzB0OKw")
233
235
  ```
234
236
 
235
- ## Weather - webscout can now forcast weather
237
+ ## Weather
236
238
  1. weather
237
239
  ```python
238
240
  from webscout import weather as w
@@ -331,6 +333,7 @@ async def main() -> None:
331
333
  if __name__ == '__main__':
332
334
  asyncio.run(main())
333
335
  ```
336
+
334
337
  ## Transcriber
335
338
  The transcriber function in webscout is a handy tool that transcribes YouTube videos. Here's an example code demonstrating its usage:
336
339
  ```python
@@ -387,77 +390,25 @@ if __name__ == "__main__":
387
390
  main()
388
391
  ```
389
392
 
390
- ## DWEBS: Advanced Web Searches
391
-
392
- `DWEBS` is a standalone feature designed to perform advanced web searches with enhanced capabilities. It is particularly powerful in extracting relevant information directly from webpages and Search engine, focusing exclusively on text (web) searches. Unlike the `WEBS` , which provides a broader range of search functionalities, `DWEBS` is specifically tailored for in-depth web searches.
393
-
394
- ### Activating DWEBS
395
-
396
- To utilize the `DWEBS` feature, you must first create an instance of the `DWEBS` . This is designed to be used independently of the `WEBS` , offering a focused approach to web searches.
397
-
398
- ### Point to remember before using `DWEBS`
399
- As `DWEBS` is designed to extract relevant information directly from webpages and Search engine, It extracts html from webpages and saves them to folder named files
400
-
401
- ### Usage Example
402
-
403
- Here's a basic example of how to use the `DWEBS` :
393
+ ## GoogleS -- formerly DWEBS
404
394
  ```python
405
- from webscout import DWEBS
406
-
407
- def finalextractor(extract_webpage=True):
408
- print('---------------Here Running for GoogleSearch--------------------')
409
- # 1. Google Search
410
- google_searcher = DWEBS.GoogleSearcher()
411
- query_html_path = google_searcher.search(
412
- query='HelpingAI-9B',
413
- result_num=10,
414
- safe=False,
415
- overwrite=False,
416
- )
417
-
418
- # 2. Search Result Extraction
419
- query_results_extractor = DWEBS.QueryResultsExtractor()
420
- query_search_results = query_results_extractor.extract(query_html_path)
421
-
422
- if extract_webpage:
423
- print('---------------Batch Webpage Fetcher--------------------')
424
- # 3. Batch Webpage Fetching
425
- batch_webpage_fetcher = DWEBS.BatchWebpageFetcher()
426
- urls = [query_extracts['url'] for query_extracts in query_search_results['query_results']]
427
- url_and_html_path_list = batch_webpage_fetcher.fetch(
428
- urls,
429
- overwrite=False,
430
- output_parent=query_search_results["query"],
431
- )
432
-
433
- print('---------------Batch Webpage Extractor--------------------')
434
- # 4. Batch Webpage Content Extraction
435
- batch_webpage_content_extractor = DWEBS.BatchWebpageContentExtractor()
436
- webpageurls = [url_and_html['html_path'] for url_and_html in url_and_html_path_list]
437
- html_path_and_extracted_content_list = batch_webpage_content_extractor.extract(webpageurls)
438
-
439
- # 5. Printing Extracted Content
440
- for html_path_and_extracted_content in html_path_and_extracted_content_list:
441
- print(html_path_and_extracted_content['extracted_content'])
442
- else:
443
- # Print only search results if extract_webpage is False
444
- for result in query_search_results['query_results']:
445
- DWEBS.logger.mesg(
446
- f"{result['title']}\n"
447
- f" - {result['site']}\n"
448
- f" - {result['url']}\n"
449
- f" - {result['abstract']}\n"
450
- f"\n"
451
- )
452
-
453
- DWEBS.logger.success(f"- {len(query_search_results['query_results'])} query results")
454
- DWEBS.logger.success(f"- {len(query_search_results['related_questions'])} related questions")
455
-
456
- # Example usage:
457
- finalextractor(extract_webpage=True) # Extract webpage content
458
- finalextractor(extract_webpage=False) # Skip webpage extraction and print search results only
459
-
395
+ from webscout import GoogleS
396
+ from rich import print
397
+ searcher = GoogleS()
398
+ results = searcher.search("HelpingAI-9B", max_results=20, extract_webpage_text=False, max_extract_characters=100)
399
+ for result in results:
400
+ print(result)
460
401
  ```
402
+ ### BingS
403
+ ```python
404
+ from webscout import BingS
405
+ from rich import print
406
+ searcher = BingS()
407
+ results = searcher.search("Python development tools", max_results=30)
408
+ for result in results:
409
+ print(result)
410
+ ```
411
+
461
412
  ## Text-to-Speech:
462
413
  ```python
463
414
  from webscout import play_audio
@@ -1406,7 +1357,7 @@ from webscout import AndiSearch
1406
1357
  a = AndiSearch()
1407
1358
  print(a.chat("HelpingAI-9B"))
1408
1359
  ```
1409
- ### Function calling-bete
1360
+ ### Function calling-beta
1410
1361
  ```python
1411
1362
  import json
1412
1363
  import logging
@@ -1519,7 +1470,7 @@ if "error" not in function_call_data:
1519
1470
  else:
1520
1471
  print(f"Error: {function_call_data['error']}")
1521
1472
  ```
1522
- ### LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai, AI4Chat, Farfalle
1473
+ ### LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai, AI4Chat, Farfalle, PIAI, Felo, XDASH, Julius, YouChat, YEPCHAT, Cloudflare, TurboSeek,
1523
1474
  code similar to other provider
1524
1475
  ### `LLM`
1525
1476
  ```python