webscout 1.4.3__py3-none-any.whl → 1.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AI.py CHANGED
@@ -25,9 +25,259 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
25
25
  from webscout.AIbase import Provider, AsyncProvider
26
26
  from Helpingai_T2 import Perplexity
27
27
  from webscout import exceptions
28
- from typing import Any, AsyncGenerator
28
+ from typing import Any, AsyncGenerator, Dict
29
29
  import logging
30
30
  import httpx
31
+ #------------------------------------ThinkAnyAI------------
32
+ class ThinkAnyAI(Provider):
33
+ def __init__(
34
+ self,
35
+ model: str = "claude-3-haiku",
36
+ locale: str = "en",
37
+ web_search: bool = False,
38
+ chunk_size: int = 1,
39
+ streaming: bool = True,
40
+ is_conversation: bool = True,
41
+ max_tokens: int = 600,
42
+ timeout: int = 30,
43
+ intro: str = None,
44
+ filepath: str = None,
45
+ update_file: bool = True,
46
+ proxies: dict = {},
47
+ history_offset: int = 10250,
48
+ act: str = None,
49
+ ):
50
+ """Initializes ThinkAnyAI
51
+
52
+ Args:
53
+ model (str): The AI model to be used for generating responses. Defaults to "claude-3-haiku".
54
+ locale (str): The language locale. Defaults to "en" (English).
55
+ web_search (bool): Whether to include web search results in the response. Defaults to False.
56
+ chunk_size (int): The size of data chunks when streaming responses. Defaults to 1.
57
+ streaming (bool): Whether to stream response data. Defaults to True.
58
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
59
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
60
+ timeout (int, optional): Http request timeout. Defaults to 30.
61
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
62
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
63
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
64
+ proxies (dict, optional): Http request proxies. Defaults to {}.
65
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
66
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
67
+ """
68
+ self.base_url = "https://thinkany.ai/api"
69
+ self.model = model
70
+ self.locale = locale
71
+ self.web_search = web_search
72
+ self.chunk_size = chunk_size
73
+ self.streaming = streaming
74
+ self.last_response = {}
75
+ self.session = requests.Session()
76
+ self.session.proxies = proxies
77
+
78
+ self.__available_optimizers = (
79
+ method
80
+ for method in dir(Optimizers)
81
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
82
+ )
83
+
84
+ Conversation.intro = (
85
+ AwesomePrompts().get_act(
86
+ act, raise_not_found=True, default=None, case_insensitive=True
87
+ )
88
+ if act
89
+ else intro or Conversation.intro
90
+ )
91
+ self.conversation = Conversation(
92
+ is_conversation, max_tokens, filepath, update_file
93
+ )
94
+ self.conversation.history_offset = history_offset
95
+
96
+ def ask(
97
+ self,
98
+ prompt: str,
99
+ stream: bool = False,
100
+ raw: bool = False,
101
+ optimizer: str = None,
102
+ conversationally: bool = False,
103
+ ) -> dict | AsyncGenerator:
104
+ """Chat with AI asynchronously.
105
+
106
+ Args:
107
+ prompt (str): Prompt to be send.
108
+ stream (bool, optional): Flag for streaming response. Defaults to False.
109
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
110
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defeaults to None
111
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
112
+ Returns:
113
+ dict : {}
114
+ ```json
115
+ {
116
+ "content": "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]",
117
+ "conversation_id": "c_f13f6217f9a997aa",
118
+ "response_id": "r_d3665f95975c368f",
119
+ "factualityQueries": null,
120
+ "textQuery": [
121
+ "hello there",
122
+ 1
123
+ ],
124
+ "choices": [
125
+ {
126
+ "id": "rc_ea075c9671bfd8cb",
127
+ "content": [
128
+ "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]"
129
+ ]
130
+ },
131
+ {
132
+ "id": "rc_de6dd3fb793a5402",
133
+ "content": [
134
+ "General Kenobi! (or just a friendly hello, whichever you prefer!). \n\nI see you're a person of culture as well. *Star Wars* references are always appreciated. \n\nHow can I help you today?\n"
135
+ ]
136
+ },
137
+ {
138
+ "id": "rc_a672ac089caf32db",
139
+ "content": [
140
+ "General Kenobi! (or just a friendly hello if you're not a Star Wars fan!). \n\nHow can I help you today? Feel free to ask me anything, or tell me what you'd like to chat about. I'm here to assist in any way I can.\n[Image of Obi-Wan Kenobi saying hello there]"
141
+ ]
142
+ }
143
+ ],
144
+
145
+ "images": [
146
+ "https://i.pinimg.com/originals/40/74/60/407460925c9e419d82b93313f0b42f71.jpg"
147
+ ]
148
+ }
149
+
150
+ ```
151
+ """
152
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
153
+ if optimizer:
154
+ if optimizer in self.__available_optimizers:
155
+ conversation_prompt = getattr(Optimizers, optimizer)(
156
+ conversation_prompt if conversationally else prompt
157
+ )
158
+ else:
159
+ raise Exception(
160
+ f"Optimizer is not one of {self.__available_optimizers}"
161
+ )
162
+
163
+ def initiate_conversation(query: str) -> str:
164
+ """
165
+ Initiates a new conversation with the ThinkAny AI API.
166
+
167
+ Args:
168
+ query (str): The initial query to start the conversation.
169
+
170
+ Returns:
171
+ str: The UUID (Unique Identifier) of the conversation.
172
+ """
173
+ url = f"{self.base_url}/new-conversation"
174
+ payload = {
175
+ "content": query,
176
+ "locale": self.locale,
177
+ "mode": "search" if self.web_search else "chat",
178
+ "model": self.model,
179
+ "source": "all",
180
+ }
181
+ response = self.session.post(url, json=payload)
182
+ return response.json().get("data", {}).get("uuid", "DevsDoCode")
183
+
184
+ def RAG_search(uuid: str) -> tuple[bool, list]:
185
+ """
186
+ Performs a web search using the Retrieve And Generate (RAG) model.
187
+
188
+ Args:
189
+ uuid (str): The UUID of the conversation.
190
+
191
+ Returns:
192
+ tuple: A tuple containing a boolean indicating the success of the search
193
+ and a list of search result links.
194
+ """
195
+ if not self.web_search:
196
+ return True, []
197
+ url = f"{self.base_url}/rag-search"
198
+ payload = {"conv_uuid": uuid}
199
+ response = self.session.post(url, json=payload)
200
+ links = [source["link"] for source in response.json().get("data", [])]
201
+ return response.json().get("message", "").strip(), links
202
+
203
+ def for_stream():
204
+ conversation_uuid = initiate_conversation(conversation_prompt)
205
+ web_search_result, links = RAG_search(conversation_uuid)
206
+ if not web_search_result:
207
+ print("Failed to generate WEB response. Making normal Query...")
208
+
209
+ url = f"{self.base_url}/chat"
210
+ payload = {
211
+ "role": "user",
212
+ "content": prompt,
213
+ "conv_uuid": conversation_uuid,
214
+ "model": self.model,
215
+ }
216
+ response = self.session.post(url, json=payload, stream=True)
217
+ complete_content = ""
218
+ for content in response.iter_content(
219
+ decode_unicode=True, chunk_size=self.chunk_size
220
+ ):
221
+ complete_content += content
222
+ yield content if raw else dict(text=complete_content)
223
+ self.last_response.update(dict(text=complete_content, links=links))
224
+ self.conversation.update_chat_history(
225
+ prompt, self.get_message(self.last_response)
226
+ )
227
+
228
+ def for_non_stream():
229
+ for _ in for_stream():
230
+ pass
231
+ return self.last_response
232
+
233
+ return for_stream() if stream else for_non_stream()
234
+
235
+ def chat(
236
+ self,
237
+ prompt: str,
238
+ stream: bool = False,
239
+ optimizer: str = None,
240
+ conversationally: bool = False,
241
+ ) -> str:
242
+ """Generate response `str`
243
+ Args:
244
+ prompt (str): Prompt to be send.
245
+ stream (bool, optional): Flag for streaming response. Defaults to False.
246
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
247
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
248
+ Returns:
249
+ str: Response generated
250
+ """
251
+
252
+ def for_stream():
253
+ for response in self.ask(
254
+ prompt, True, optimizer=optimizer, conversationally=conversationally
255
+ ):
256
+ yield self.get_message(response)
257
+
258
+ def for_non_stream():
259
+ return self.get_message(
260
+ self.ask(
261
+ prompt,
262
+ False,
263
+ optimizer=optimizer,
264
+ conversationally=conversationally,
265
+ )
266
+ )
267
+
268
+ return for_stream() if stream else for_non_stream()
269
+
270
+ def get_message(self, response: Dict[str, Any]) -> str:
271
+ """Retrieves message only from response
272
+
273
+ Args:
274
+ response (dict): Response generated by `self.ask`
275
+
276
+ Returns:
277
+ str: Message extracted
278
+ """
279
+ assert isinstance(response, dict), "Response should be of dict data-type only"
280
+ return response["text"]
31
281
  #-----------------------------------------------xjai-------------------------------------------
32
282
  class Xjai(Provider):
33
283
  def __init__(
@@ -2802,6 +3052,7 @@ class AsyncKOBOLDAI(AsyncProvider):
2802
3052
  class OPENGPT:
2803
3053
  def __init__(
2804
3054
  self,
3055
+ assistant_id,
2805
3056
  is_conversation: bool = True,
2806
3057
  max_tokens: int = 600,
2807
3058
  timeout: int = 30,
@@ -2834,7 +3085,7 @@ class OPENGPT:
2834
3085
  self.stream_chunk_size = 64
2835
3086
  self.timeout = timeout
2836
3087
  self.last_response = {}
2837
- self.assistant_id = "bca37014-6f97-4f2b-8928-81ea8d478d88"
3088
+ self.assistant_id = assistant_id
2838
3089
  self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app"
2839
3090
 
2840
3091
  self.headers = {
@@ -3026,7 +3277,6 @@ class OPENGPT:
3026
3277
  str: Message extracted
3027
3278
  """
3028
3279
  assert isinstance(response, dict), "Response should be of dict data-type only"
3029
- return response["content"]
3030
3280
  class AsyncOPENGPT(AsyncProvider):
3031
3281
  def __init__(
3032
3282
  self,
webscout/AIutel.py CHANGED
@@ -18,6 +18,7 @@ from pathlib import Path
18
18
  from playsound import playsound
19
19
  from time import sleep as wait
20
20
  import pathlib
21
+ import urllib.parse
21
22
  appdir = appdirs.AppDirs("AIWEBS", "vortex")
22
23
 
23
24
  default_path = appdir.user_cache_dir
@@ -39,7 +40,8 @@ webai = [
39
40
  "cohere",
40
41
  "yepchat",
41
42
  "you",
42
- "xjai"
43
+ "xjai",
44
+ "thinkany"
43
45
  ]
44
46
 
45
47
  gpt4free_providers = [
@@ -938,7 +940,7 @@ class Audio:
938
940
  ), f"Voice '{voice}' not one of [{', '.join(cls.all_voices)}]"
939
941
  # Base URL for provider API
940
942
  url: str = (
941
- f"https://api.streamelements.com/kappa/v2/speech?voice={voice}&text={{{message}}}"
943
+ f"https://api.streamelements.com/kappa/v2/speech?voice={voice}&text={{{urllib.parse.quote(message)}}}"
942
944
  )
943
945
  resp = requests.get(url=url, headers=cls.headers, stream=True)
944
946
  if not resp.ok:
webscout/__init__.py CHANGED
@@ -29,7 +29,8 @@ webai = [
29
29
  "cohere",
30
30
  "yepchat",
31
31
  "you",
32
- "xjai"
32
+ "xjai",
33
+ "thinkany"
33
34
  ]
34
35
 
35
36
  gpt4free_providers = [
webscout/utils.py CHANGED
@@ -4,7 +4,6 @@ from html import unescape
4
4
  from math import atan2, cos, radians, sin, sqrt
5
5
  from typing import Any, Dict, List, Union
6
6
  from urllib.parse import unquote
7
-
8
7
  import orjson
9
8
 
10
9
  from .exceptions import WebscoutE
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "1.4.3"
1
+ __version__ = "1.4.4"
2
2
 
webscout/webai.py CHANGED
@@ -471,6 +471,20 @@ class Main(cmd.Cmd):
471
471
  history_offset=history_offset,
472
472
  act=awesome_prompt,
473
473
  )
474
+ elif provider == "thinkany":
475
+ from webscout.AI import ThinkAnyAI
476
+
477
+ self.bot = ThinkAnyAI(
478
+ is_conversation=disable_conversation,
479
+ max_tokens=max_tokens,
480
+ timeout=timeout,
481
+ intro=intro,
482
+ filepath=filepath,
483
+ update_file=update_file,
484
+ proxies=proxies,
485
+ history_offset=history_offset,
486
+ act=awesome_prompt,
487
+ )
474
488
  elif provider == "yepchat":
475
489
  from webscout.AI import YEPCHAT
476
490
 
@@ -981,7 +995,7 @@ class Main(cmd.Cmd):
981
995
  self.output_bond("Chat History", formatted_history, self.color)
982
996
  if click.confirm("Do you wish to save this chat"):
983
997
  save_to = click.prompt(
984
- "Enter path/file-name", default="llama-conversation.txt"
998
+ "Enter path/file-name", default=f"{self.provider}-chat.txt"
985
999
  )
986
1000
  with open(save_to, "a") as fh:
987
1001
  fh.write(history)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.4.3
3
+ Version: 1.4.4
4
4
  Summary: Search for anything using the Google, DuckDuckGo, phind.com. Also containes AI models, can transcribe yt videos, temporary email and phone number generation, have TTS support and webai(terminal gpt and open interpeter)
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -54,9 +54,31 @@ Provides-Extra: dev
54
54
  Requires-Dist: ruff >=0.1.6 ; extra == 'dev'
55
55
  Requires-Dist: pytest >=7.4.2 ; extra == 'dev'
56
56
 
57
- # webscout
58
- <p align="center">
57
+ <div align="center">
58
+ <!-- Replace `#` with your actual links -->
59
+ <a href="https://t.me/devsdocode"><img alt="Telegram" src="https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
60
+ <a href="https://www.instagram.com/sree.shades_/"><img alt="Instagram" src="https://img.shields.io/badge/Instagram-E4405F?style=for-the-badge&logo=instagram&logoColor=white"></a>
61
+ <a href="https://www.linkedin.com/in/developer-sreejan/"><img alt="LinkedIn" src="https://img.shields.io/badge/LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white"></a>
62
+ <a href="https://buymeacoffee.com/devsdocode"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
63
+ </div>
64
+
65
+ <div align="center">
66
+ <!-- Replace `#` with your actual links -->
67
+ <a href="https://youtube.com/@@OEvortex">&#10148; Vortex's YouTube Channel</a>
68
+ </div>
69
+ <div align="center">
70
+ <a href="https://youtube.com/@devsdocode">&#10148; Devs Do Code's YouTube Channel</a>
71
+ </div>
72
+
73
+
59
74
 
75
+
76
+ # WEBSCOUT
77
+ </div>
78
+ <p align="center">
79
+ <div align="center">
80
+ <img src="https://img.shields.io/badge/WebScout-API-blue?style=for-the-badge&logo=WebScout" alt="WebScout API Badge">
81
+ </div>
60
82
  <a href="#"><img alt="Python version" src="https://img.shields.io/pypi/pyversions/webscout"/></a>
61
83
  <a href="https://pepy.tech/project/webscout"><img alt="Downloads" src="https://static.pepy.tech/badge/webscout"></a>
62
84
 
@@ -64,7 +86,7 @@ Search for anything using the Google, DuckDuckGo, phind.com. Also containes AI m
64
86
 
65
87
 
66
88
  ## Table of Contents
67
- - [webscout](#webscout)
89
+ - [WEBSCOUT](#webscout)
68
90
  - [Table of Contents](#table-of-contents)
69
91
  - [Install](#install)
70
92
  - [CLI version](#cli-version)
@@ -105,6 +127,7 @@ Search for anything using the Google, DuckDuckGo, phind.com. Also containes AI m
105
127
  - [10. `Reka` - chat with reka](#10-reka---chat-with-reka)
106
128
  - [11. `Cohere` - chat with cohere](#11-cohere---chat-with-cohere)
107
129
  - [12. `Xjai` - chat with free gpt 3.5](#12-xjai---chat-with-free-gpt-35)
130
+ - [`ThinkAny` - AI search engine](#thinkany---ai-search-engine)
108
131
  - [`LLM`](#llm)
109
132
  - [`LLM` with internet](#llm-with-internet)
110
133
  - [LLM with deepwebs](#llm-with-deepwebs)
@@ -790,7 +813,7 @@ print(response)
790
813
  ```python
791
814
  from webscout.AI import OPENGPT
792
815
 
793
- opengpt = OPENGPT(is_conversation=True, max_tokens=8000, timeout=30)
816
+ opengpt = OPENGPT(is_conversation=True, max_tokens=8000, timeout=30, assistant_id="bca37014-6f97-4f2b-8928-81ea8d478d88")
794
817
  while True:
795
818
  # Prompt the user for input
796
819
  prompt = input("Enter your prompt: ")
@@ -862,7 +885,31 @@ prompt = "Tell me about india"
862
885
  response = ai.chat(prompt)
863
886
  print(response)
864
887
  ```
888
+ ### `ThinkAny` - AI search engine
889
+ ```python
890
+ from webscout.AI import ThinkAnyAI
865
891
 
892
+ ai = ThinkAnyAI(
893
+ is_conversation=True,
894
+ max_tokens=800,
895
+ timeout=30,
896
+ intro=None,
897
+ filepath=None,
898
+ update_file=True,
899
+ proxies={},
900
+ history_offset=10250,
901
+ act=None,
902
+ web_search=False,
903
+ )
904
+
905
+ prompt = "what is meaning of life"
906
+
907
+ response = ai.ask(prompt)
908
+
909
+ # Extract and print the message from the response
910
+ message = ai.get_message(response)
911
+ print(message)
912
+ ```
866
913
  ### `LLM`
867
914
  ```python
868
915
  from webscout.LLM import LLM
@@ -1093,3 +1140,21 @@ if __name__ == "__main__":
1093
1140
  ```shell
1094
1141
  python -m webscout.webai webai --provider "phind" --rawdog
1095
1142
  ```
1143
+
1144
+ <div align="center">
1145
+ <!-- Replace `#` with your actual links -->
1146
+ <a href="https://t.me/devsdocode"><img alt="Telegram" src="https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
1147
+ <a href="https://www.instagram.com/sree.shades_/"><img alt="Instagram" src="https://img.shields.io/badge/Instagram-E4405F?style=for-the-badge&logo=instagram&logoColor=white"></a>
1148
+ <a href="https://www.linkedin.com/in/developer-sreejan/"><img alt="LinkedIn" src="https://img.shields.io/badge/LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white"></a>
1149
+ <a href="https://buymeacoffee.com/devsdocode"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
1150
+ </div>
1151
+
1152
+ <div align="center">
1153
+ <!-- Replace `#` with your actual links -->
1154
+ <a href="https://youtube.com/@@OEvortex">&#10148; Vortex's YouTube Channel</a>
1155
+ </div>
1156
+ <div align="center">
1157
+ <a href="https://youtube.com/@devsdocode">&#10148; Devs Do Code's YouTube Channel</a>
1158
+ </div>
1159
+
1160
+
@@ -10,12 +10,12 @@ DeepWEBS/networks/webpage_fetcher.py,sha256=vRB9T3o-nMgrMkG2NPHTDctNeXaPSKCmBXqu
10
10
  DeepWEBS/utilsdw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  DeepWEBS/utilsdw/enver.py,sha256=vpI7s4_o_VL9govSryOv-z1zYK3pTEW3-H9QNN8JYtc,2472
12
12
  DeepWEBS/utilsdw/logger.py,sha256=Z0nFUcEGyU8r28yKiIyvEtO26xxpmJgbvNToTfwZecc,8174
13
- webscout/AI.py,sha256=cC3v-b4uH9h8DKtXrnHNQotiuhzrJk10HNa7_KWtN9E,215473
13
+ webscout/AI.py,sha256=sQGpHRwikBBXcxWsB-nRCSBG3oYhSVBHUFr3C2dHrv8,226145
14
14
  webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
15
- webscout/AIutel.py,sha256=Mfmw_MYg1f1VJT02wfgqh0zzX-WpVikBG4IT2HlAbVY,33159
15
+ webscout/AIutel.py,sha256=WJXAUaNK4IQ-txweZhm3scE11b-pK_tlIjS5VWJN8_E,33217
16
16
  webscout/DWEBS.py,sha256=QT-7-dUgWhQ_H7EVZD53AVyXxyskoPMKCkFIpzkN56Q,7332
17
17
  webscout/LLM.py,sha256=CiDz0okZNEoXuxMwadZnwRGSLpqk2zg0vzvXSxQZjcE,1910
18
- webscout/__init__.py,sha256=7IVxt29I9ZFX4BMdb7oJk5ZmnH1dtJwpS-B6650jFms,1073
18
+ webscout/__init__.py,sha256=WcRrw6-MIpt_lHtA498MaO5oWuMRkEk5qYH0mVt4_Nc,1090
19
19
  webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
20
20
  webscout/async_providers.py,sha256=pPoSdfB_4SlOYcpAtkKIyDtl7sZ9DGgWy5aIBOjBO9Q,971
21
21
  webscout/cli.py,sha256=F888fdrFUQgczMBN4yMOSf6Nh-IbvkqpPhDsbnA2FtQ,17059
@@ -24,15 +24,15 @@ webscout/g4f.py,sha256=F7POjR03ek7eZvcTX-p7gMe1b0nLNoIqF-L_vZwos0c,24489
24
24
  webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
25
25
  webscout/tempid.py,sha256=5oc3UbXhPGKxrMRTfRABT-V-dNzH_hOKWtLYM6iCWd4,5896
26
26
  webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
27
- webscout/utils.py,sha256=c_98M4oqpb54pUun3fpGGlCerFD6ZHUbghyp5b7Mwgo,2605
28
- webscout/version.py,sha256=gOqqpnGHPIpFBhG81HqrJ8d0hLm31uWy0NtgxrQYAlY,25
27
+ webscout/utils.py,sha256=CxeXvp0rWIulUrEaPZMaNfg_tSuQLRSV8uuHA2chyKE,2603
28
+ webscout/version.py,sha256=icxTd9Gky-sd7EyeiHD05a_JawvBpOmZOYI-HbNB0jM,25
29
29
  webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
30
- webscout/webai.py,sha256=et6gQFFiU5XQkTNNjrTucSOTEM9PmJFMWPIcWZ9dW_g,82281
30
+ webscout/webai.py,sha256=Ijnkr8b0mO2I8-mdCZggoIZ5cqMfxaVFpKpeJf7xtTw,82831
31
31
  webscout/webscout_search.py,sha256=3_lli-hDb8_kCGwscK29xuUcOS833ROgpNhDzrxh0dk,3085
32
32
  webscout/webscout_search_async.py,sha256=Y5frH0k3hLqBCR-8dn7a_b7EvxdYxn6wHiKl3jWosE0,40670
33
- webscout-1.4.3.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
34
- webscout-1.4.3.dist-info/METADATA,sha256=r00ZVw3ZELW_A0WNLxjpyIuU4myUz3KYsvbUd2CZu3I,40471
35
- webscout-1.4.3.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
36
- webscout-1.4.3.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
37
- webscout-1.4.3.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
38
- webscout-1.4.3.dist-info/RECORD,,
33
+ webscout-1.4.4.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
34
+ webscout-1.4.4.dist-info/METADATA,sha256=Oy_Blaq8AL9Jd2BclAmCPNdoXxK1Pibo55rB1iiJ9l0,43430
35
+ webscout-1.4.4.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
36
+ webscout-1.4.4.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
37
+ webscout-1.4.4.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
38
+ webscout-1.4.4.dist-info/RECORD,,