webscout 1.3.5__py3-none-any.whl → 1.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AI.py CHANGED
@@ -25,6 +25,396 @@ from webscout.AIbase import Provider
25
25
  from Helpingai_T2 import Perplexity
26
26
  from typing import Any
27
27
  import logging
28
+ #-----------------------------------------------Cohere--------------------------------------------
29
+ class Cohere(Provider):
30
+ def __init__(
31
+ self,
32
+ api_key: str,
33
+ is_conversation: bool = True,
34
+ max_tokens: int = 600,
35
+ model: str = "command-r-plus",
36
+ temperature: float = 0.7,
37
+ system_prompt: str = "You are helpful AI",
38
+ timeout: int = 30,
39
+ intro: str = None,
40
+ filepath: str = None,
41
+ update_file: bool = True,
42
+ proxies: dict = {},
43
+ history_offset: int = 10250,
44
+ act: str = None,
45
+ top_k: int = -1,
46
+ top_p: float = 0.999,
47
+ ):
48
+ """Initializes Cohere
49
+
50
+ Args:
51
+ api_key (str): Cohere API key.
52
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
53
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
54
+ model (str, optional): Model to use for generating text. Defaults to "command-r-plus".
55
+ temperature (float, optional): Diversity of the generated text. Higher values produce more diverse outputs.
56
+ Defaults to 0.7.
57
+ system_prompt (str, optional): A system_prompt or context to set the style or tone of the generated text.
58
+ Defaults to "You are helpful AI".
59
+ timeout (int, optional): Http request timeout. Defaults to 30.
60
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
61
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
62
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
63
+ proxies (dict, optional): Http request proxies. Defaults to {}.
64
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
65
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
66
+ """
67
+ self.session = requests.Session()
68
+ self.is_conversation = is_conversation
69
+ self.max_tokens_to_sample = max_tokens
70
+ self.api_key = api_key
71
+ self.model = model
72
+ self.temperature = temperature
73
+ self.system_prompt = system_prompt
74
+ self.chat_endpoint = "https://production.api.os.cohere.ai/coral/v1/chat"
75
+ self.stream_chunk_size = 64
76
+ self.timeout = timeout
77
+ self.last_response = {}
78
+ self.headers = {
79
+ "Content-Type": "application/json",
80
+ "Authorization": f"Bearer {self.api_key}",
81
+ }
82
+
83
+ self.__available_optimizers = (
84
+ method
85
+ for method in dir(Optimizers)
86
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
87
+ )
88
+ self.session.headers.update(self.headers)
89
+ Conversation.intro = (
90
+ AwesomePrompts().get_act(
91
+ act, raise_not_found=True, default=None, case_insensitive=True
92
+ )
93
+ if act
94
+ else intro or Conversation.intro
95
+ )
96
+ self.conversation = Conversation(
97
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
98
+ )
99
+ self.conversation.history_offset = history_offset
100
+ self.session.proxies = proxies
101
+
102
+ def ask(
103
+ self,
104
+ prompt: str,
105
+ stream: bool = False,
106
+ raw: bool = False,
107
+ optimizer: str = None,
108
+ conversationally: bool = False,
109
+ ) -> dict:
110
+ """Chat with AI
111
+
112
+ Args:
113
+ prompt (str): Prompt to be send.
114
+ stream (bool, optional): Flag for streaming response. Defaults to False.
115
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
116
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
117
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
118
+ Returns:
119
+ dict : {}
120
+ ```json
121
+ {
122
+ "text" : "How may I assist you today?"
123
+ }
124
+ ```
125
+ """
126
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
127
+ if optimizer:
128
+ if optimizer in self.__available_optimizers:
129
+ conversation_prompt = getattr(Optimizers, optimizer)(
130
+ conversation_prompt if conversationally else prompt
131
+ )
132
+ else:
133
+ raise Exception(
134
+ f"Optimizer is not one of {self.__available_optimizers}"
135
+ )
136
+ self.session.headers.update(self.headers)
137
+ payload = {
138
+ "message": conversation_prompt,
139
+ "model": self.model,
140
+ "temperature": self.temperature,
141
+ "preamble": self.system_prompt,
142
+ }
143
+
144
+ def for_stream():
145
+ response = self.session.post(
146
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
147
+ )
148
+ if not response.ok:
149
+ raise Exception(
150
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
151
+ )
152
+
153
+ for value in response.iter_lines(
154
+ decode_unicode=True,
155
+ chunk_size=self.stream_chunk_size,
156
+ ):
157
+ try:
158
+ resp = json.loads(value.strip().split("\n")[-1])
159
+ self.last_response.update(resp)
160
+ yield value if raw else resp
161
+ except json.decoder.JSONDecodeError:
162
+ pass
163
+ self.conversation.update_chat_history(
164
+ prompt, self.get_message(self.last_response)
165
+ )
166
+
167
+ def for_non_stream():
168
+ # let's make use of stream
169
+ for _ in for_stream():
170
+ pass
171
+ return self.last_response
172
+
173
+ return for_stream() if stream else for_non_stream()
174
+
175
+ def chat(
176
+ self,
177
+ prompt: str,
178
+ stream: bool = False,
179
+ optimizer: str = None,
180
+ conversationally: bool = False,
181
+ ) -> str:
182
+ """Generate response `str`
183
+ Args:
184
+ prompt (str): Prompt to be send.
185
+ stream (bool, optional): Flag for streaming response. Defaults to False.
186
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
187
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
188
+ Returns:
189
+ str: Response generated
190
+ """
191
+
192
+ def for_stream():
193
+ for response in self.ask(
194
+ prompt, True, optimizer=optimizer, conversationally=conversationally
195
+ ):
196
+ yield self.get_message(response)
197
+
198
+ def for_non_stream():
199
+ return self.get_message(
200
+ self.ask(
201
+ prompt,
202
+ False,
203
+ optimizer=optimizer,
204
+ conversationally=conversationally,
205
+ )
206
+ )
207
+
208
+ return for_stream() if stream else for_non_stream()
209
+
210
+ def get_message(self, response: dict) -> str:
211
+ """Retrieves message only from response
212
+
213
+ Args:
214
+ response (dict): Response generated by `self.ask`
215
+
216
+ Returns:
217
+ str: Message extracted
218
+ """
219
+ assert isinstance(response, dict), "Response should be of dict data-type only"
220
+ return response["result"]["chatStreamEndEvent"]["response"]["text"]
221
+ #-----------------------------------------------REKA-----------------------------------------------
222
+ class REKA(Provider):
223
+ def __init__(
224
+ self,
225
+ api_key: str,
226
+ is_conversation: bool = True,
227
+ max_tokens: int = 600,
228
+ timeout: int = 30,
229
+ intro: str = None,
230
+ filepath: str = None,
231
+ update_file: bool = True,
232
+ proxies: dict = {},
233
+ history_offset: int = 10250,
234
+ act: str = None,
235
+ model: str = "reka-core",
236
+ system_prompt: str = "Be Helpful and Friendly. Keep your response straightforward, short and concise",
237
+ use_search_engine: bool = False,
238
+ use_code_interpreter: bool = False,
239
+ ):
240
+ """Instantiates REKA
241
+
242
+ Args:
243
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
244
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
245
+ timeout (int, optional): Http request timeout. Defaults to 30.
246
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
247
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
248
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
249
+ proxies (dict, optional): Http request proxies. Defaults to {}.
250
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
251
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
252
+ model (str, optional): REKA model name. Defaults to "reka-core".
253
+ system_prompt (str, optional): System prompt for REKA. Defaults to "Be Helpful and Friendly. Keep your response straightforward, short and concise".
254
+ use_search_engine (bool, optional): Whether to use the search engine. Defaults to False.
255
+ use_code_interpreter (bool, optional): Whether to use the code interpreter. Defaults to False.
256
+ """
257
+ self.session = requests.Session()
258
+ self.is_conversation = is_conversation
259
+ self.max_tokens_to_sample = max_tokens
260
+ self.api_endpoint = "https://chat.reka.ai/api/chat"
261
+ self.stream_chunk_size = 64
262
+ self.timeout = timeout
263
+ self.last_response = {}
264
+ self.model = model
265
+ self.system_prompt = system_prompt
266
+ self.use_search_engine = use_search_engine
267
+ self.use_code_interpreter = use_code_interpreter
268
+ self.access_token = api_key
269
+ self.headers = {
270
+ "Authorization": f"Bearer {self.access_token}",
271
+ }
272
+
273
+ self.__available_optimizers = (
274
+ method
275
+ for method in dir(Optimizers)
276
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
277
+ )
278
+ self.session.headers.update(self.headers)
279
+ Conversation.intro = (
280
+ AwesomePrompts().get_act(
281
+ act, raise_not_found=True, default=None, case_insensitive=True
282
+ )
283
+ if act
284
+ else intro or Conversation.intro
285
+ )
286
+ self.conversation = Conversation(
287
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
288
+ )
289
+ self.conversation.history_offset = history_offset
290
+ self.session.proxies = proxies
291
+
292
+ def ask(
293
+ self,
294
+ prompt: str,
295
+ stream: bool = False,
296
+ raw: bool = False,
297
+ optimizer: str = None,
298
+ conversationally: bool = False,
299
+ ) -> dict:
300
+ """Chat with AI
301
+
302
+ Args:
303
+ prompt (str): Prompt to be send.
304
+ stream (bool, optional): Flag for streaming response. Defaults to False.
305
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
306
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
307
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
308
+ Returns:
309
+ dict : {}
310
+ ```json
311
+ {
312
+ "text" : "How may I assist you today?"
313
+ }
314
+ ```
315
+ """
316
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
317
+ if optimizer:
318
+ if optimizer in self.__available_optimizers:
319
+ conversation_prompt = getattr(Optimizers, optimizer)(
320
+ conversation_prompt if conversationally else prompt
321
+ )
322
+ else:
323
+ raise Exception(
324
+ f"Optimizer is not one of {self.__available_optimizers}"
325
+ )
326
+
327
+ self.session.headers.update(self.headers)
328
+ payload = {
329
+
330
+ "conversation_history": [
331
+ {"type": "human", "text": f"## SYSTEM PROMPT: {self.system_prompt}\n\n## QUERY: {conversation_prompt}"},
332
+ ],
333
+
334
+ "stream": stream,
335
+ "use_search_engine": self.use_search_engine,
336
+ "use_code_interpreter": self.use_code_interpreter,
337
+ "model_name": self.model,
338
+ # "model_name": "reka-flash",
339
+ # "model_name": "reka-edge",
340
+ }
341
+
342
+ def for_stream():
343
+ response = self.session.post(self.api_endpoint, json=payload, stream=True, timeout=self.timeout)
344
+ if not response.ok:
345
+ raise Exception(
346
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
347
+ )
348
+
349
+ for value in response.iter_lines(
350
+ decode_unicode=True,
351
+ chunk_size=self.stream_chunk_size,
352
+ ):
353
+ try:
354
+ resp = json.loads(value)
355
+ self.last_response.update(resp)
356
+ yield value if raw else resp
357
+ except json.decoder.JSONDecodeError:
358
+ pass
359
+ self.conversation.update_chat_history(
360
+ prompt, self.get_message(self.last_response)
361
+ )
362
+
363
+ def for_non_stream():
364
+ # let's make use of stream
365
+ for _ in for_stream():
366
+ pass
367
+ return self.last_response
368
+
369
+ return for_stream() if stream else for_non_stream()
370
+
371
+ def chat(
372
+ self,
373
+ prompt: str,
374
+ stream: bool = False,
375
+ optimizer: str = None,
376
+ conversationally: bool = False,
377
+ ) -> str:
378
+ """Generate response `str`
379
+ Args:
380
+ prompt (str): Prompt to be send.
381
+ stream (bool, optional): Flag for streaming response. Defaults to False.
382
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
383
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
384
+ Returns:
385
+ str: Response generated
386
+ """
387
+
388
+ def for_stream():
389
+ for response in self.ask(
390
+ prompt, True, optimizer=optimizer, conversationally=conversationally
391
+ ):
392
+ yield self.get_message(response)
393
+
394
+ def for_non_stream():
395
+ return self.get_message(
396
+ self.ask(
397
+ prompt,
398
+ False,
399
+ optimizer=optimizer,
400
+ conversationally=conversationally,
401
+ )
402
+ )
403
+
404
+ return for_stream() if stream else for_non_stream()
405
+
406
+ def get_message(self, response: dict) -> str:
407
+ """Retrieves message only from response
408
+
409
+ Args:
410
+ response (dict): Response generated by `self.ask`
411
+
412
+ Returns:
413
+ str: Message extracted
414
+ """
415
+ assert isinstance(response, dict), "Response should be of dict data-type only"
416
+ return response.get("text")
417
+ #-----------------------------------------------GROQ-----------------------------------------------
28
418
  class GROQ(Provider):
29
419
  def __init__(
30
420
  self,
@@ -267,8 +657,7 @@ class GROQ(Provider):
267
657
  return response["choices"][0]["message"]["content"]
268
658
  except KeyError:
269
659
  return ""
270
- #----------------------------------------------------------Sean-----------------------------------------------------------
271
- class Sean:
660
+ #----------------------------------------------------------Sean-------------------------------------class Sean:
272
661
  def __init__(
273
662
  self,
274
663
  is_conversation: bool = True,
@@ -496,7 +885,7 @@ class Sean:
496
885
  """
497
886
  assert isinstance(response, dict), "Response should be of dict data-type only"
498
887
  return response["content"]
499
- #----------------------------------------------------------OpenAI-----------------------------------------------------------
888
+ #----------------------------------------------------------OpenAI-----------------------------------
500
889
  class OPENAI(Provider):
501
890
  model = "gpt-3.5-turbo"
502
891
  def __init__(
webscout/AIutel.py CHANGED
@@ -29,6 +29,8 @@ webai = [
29
29
  "perplexity",
30
30
  "sean",
31
31
  "groq",
32
+ "reka",
33
+ "cohere"
32
34
  ]
33
35
 
34
36
  gpt4free_providers = [
webscout/__init__.py CHANGED
@@ -26,6 +26,8 @@ webai = [
26
26
  "perplexity",
27
27
  "sean",
28
28
  "groq",
29
+ "reka",
30
+ "cohere"
29
31
  ]
30
32
 
31
33
  gpt4free_providers = [
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "1.3.5"
1
+ __version__ = "1.3.6"
2
2
 
webscout/webai.py CHANGED
@@ -509,6 +509,44 @@ class Main(cmd.Cmd):
509
509
  history_offset=history_offset,
510
510
  act=awesome_prompt,
511
511
  )
512
+ elif provider == "cohere":
513
+ assert auth, (
514
+ "Cohere's API-key is required. Use the flag `--key` or `-k`"
515
+ )
516
+ from webscout.AI import Cohere
517
+ self.bot = Cohere(
518
+ api_key=auth,
519
+ is_conversation=disable_conversation,
520
+ max_tokens=max_tokens,
521
+ temperature=temperature,
522
+ top_k=top_k,
523
+ top_p=top_p,
524
+ model=getOr(model, "command-r-plus"),
525
+ timeout=timeout,
526
+ intro=intro,
527
+ filepath=filepath,
528
+ update_file=update_file,
529
+ proxies=proxies,
530
+ history_offset=history_offset,
531
+ act=awesome_prompt,
532
+ )
533
+ elif provider == "reka":
534
+ from webscout.AI import REKA
535
+
536
+ self.bot = REKA(
537
+ api_key=auth,
538
+ is_conversation=disable_conversation,
539
+ max_tokens=max_tokens,
540
+ timeout=timeout,
541
+ intro=intro,
542
+ filepath=filepath,
543
+ update_file=update_file,
544
+ proxies=proxies,
545
+ history_offset=history_offset,
546
+ act=awesome_prompt,
547
+ model=getOr(model, "reka-core"),
548
+ # quiet=quiet,
549
+ )
512
550
 
513
551
  elif provider == "koboldai":
514
552
  from webscout.AI import KOBOLDAI
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.3.5
3
+ Version: 1.3.6
4
4
  Summary: Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos, have TTS support and now has webai(terminal gpt and open interpeter) support
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -97,11 +97,12 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
97
97
  - [8. `OpenGPT` - chat With OPENGPT](#8-opengpt---chat-with-opengpt)
98
98
  - [9. `KOBOLDIA` -](#9-koboldia--)
99
99
  - [10. `Sean` - chat With Sean](#10-sean---chat-with-sean)
100
+ - [11. `Reka` - chat with reka](#11-reka---chat-with-reka)
101
+ - [12. `Cohere` - chat with cohere](#12-cohere---chat-with-cohere)
100
102
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
101
103
  - [`LLM`](#llm)
102
104
  - [`LLM` with internet](#llm-with-internet)
103
105
  - [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
104
- - [for using as terminal gpt](#for-using-as-terminal-gpt)
105
106
 
106
107
  ## Install
107
108
  ```python
@@ -698,6 +699,27 @@ response_str = a.chat(prompt)
698
699
  print(response_str)
699
700
  ```
700
701
 
702
+ ### 11. `Reka` - chat with reka
703
+ ```python
704
+ from webscout.AI import REKA
705
+
706
+ a = REKA(is_conversation=True, max_tokens=8000, timeout=30,api_key="")
707
+
708
+ prompt = "tell me about india"
709
+ response_str = a.chat(prompt)
710
+ print(response_str)
711
+ ```
712
+
713
+ ### 12. `Cohere` - chat with cohere
714
+ ```python
715
+ from webscout.AI import Cohere
716
+
717
+ a = Cohere(is_conversation=True, max_tokens=8000, timeout=30,api_key="")
718
+
719
+ prompt = "tell me about india"
720
+ response_str = a.chat(prompt)
721
+ print(response_str)
722
+ ```
701
723
  ## usage of special .LLM file from webscout (webscout.LLM)
702
724
 
703
725
  ### `LLM`
@@ -816,12 +838,12 @@ def use_rawdog_with_webai(prompt):
816
838
  try:
817
839
  webai_bot = Main(
818
840
  max_tokens=500,
819
- provider="phind",
841
+ provider="cohere",
820
842
  temperature=0.7,
821
843
  top_k=40,
822
844
  top_p=0.95,
823
- model="Phind Model", # Replace with your desired model
824
- auth=None, # Replace with your auth key/value (if needed)
845
+ model="command-r-plus", # Replace with your desired model
846
+ auth="0zoQbKs1AAgd8WrPBO9CTIGgVvm5ZMbDcCqJOVyl", # Replace with your auth key/value (if needed)
825
847
  timeout=30,
826
848
  disable_conversation=True,
827
849
  filepath=None,
@@ -841,11 +863,8 @@ def use_rawdog_with_webai(prompt):
841
863
  if __name__ == "__main__":
842
864
  user_prompt = input("Enter your prompt: ")
843
865
  use_rawdog_with_webai(user_prompt)
866
+
844
867
  ```
845
868
  ```shell
846
869
  python -m webscout.webai webai --provider "phind" --rawdog
847
870
  ```
848
- ### for using as terminal gpt
849
- ```python
850
- python -m webscout.webai webai --provider "sean"
851
- ```
@@ -10,13 +10,13 @@ DeepWEBS/networks/webpage_fetcher.py,sha256=vRB9T3o-nMgrMkG2NPHTDctNeXaPSKCmBXqu
10
10
  DeepWEBS/utilsdw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  DeepWEBS/utilsdw/enver.py,sha256=vpI7s4_o_VL9govSryOv-z1zYK3pTEW3-H9QNN8JYtc,2472
12
12
  DeepWEBS/utilsdw/logger.py,sha256=Z0nFUcEGyU8r28yKiIyvEtO26xxpmJgbvNToTfwZecc,8174
13
- webscout/AI.py,sha256=iuNRGcRY-StwH0PwwUYNC_JhUKG_yAk8RYOh4-t67Gs,103168
13
+ webscout/AI.py,sha256=rl5-O-stc1FCvKVsvjC1qlbDCTIGW9mYi05fsOWwnYA,119027
14
14
  webscout/AIbase.py,sha256=vQi2ougu5bG-QdmoYmxCQsOg7KTEgG7EF6nZh5qqUGw,2343
15
- webscout/AIutel.py,sha256=1C2HA-xgpW3OalFrI8r6md-uJwzCPvnUG0dBWYOV2KI,24426
15
+ webscout/AIutel.py,sha256=QwOEdsoboyOop2GW7lu8l_ZnrFb9TN8FrWWPsaFNzKU,24453
16
16
  webscout/DWEBS.py,sha256=QT-7-dUgWhQ_H7EVZD53AVyXxyskoPMKCkFIpzkN56Q,7332
17
17
  webscout/HelpingAI.py,sha256=YeZw0zYVHMcBFFPNdd3_Ghpm9ebt_EScQjHO_IIs4lg,8103
18
18
  webscout/LLM.py,sha256=CiDz0okZNEoXuxMwadZnwRGSLpqk2zg0vzvXSxQZjcE,1910
19
- webscout/__init__.py,sha256=6l3n5S_nDg1uhoG6eBW7C7Bhb7B23uBqQwNN35E7P3E,1015
19
+ webscout/__init__.py,sha256=yA465j5M4a6u3wtQHGEM57FH5Cb86cAV3Lp0MRcaZkw,1042
20
20
  webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
21
21
  webscout/cli.py,sha256=F888fdrFUQgczMBN4yMOSf6Nh-IbvkqpPhDsbnA2FtQ,17059
22
22
  webscout/exceptions.py,sha256=4AOO5wexeL96nvUS-badcckcwrPS7UpZyAgB9vknHZE,276
@@ -24,14 +24,14 @@ webscout/g4f.py,sha256=NEZbXOoVfmHiKcSjVpBMNKZzHgbTJLsd8xOXjtn4js4,16358
24
24
  webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
25
25
  webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
26
26
  webscout/utils.py,sha256=c_98M4oqpb54pUun3fpGGlCerFD6ZHUbghyp5b7Mwgo,2605
27
- webscout/version.py,sha256=84L_BIC4IlYF8PSO3raVy2A4eNETbCETloSoLV9OffA,25
27
+ webscout/version.py,sha256=lXdfqHqoX9RttSEm10OiusQmVO7SMygzE2W164D7704,25
28
28
  webscout/voice.py,sha256=1Ids_2ToPBMX0cH_UyPMkY_6eSE9H4Gazrl0ujPmFag,941
29
- webscout/webai.py,sha256=ogbIOgLUwGtXg2zdYgJq7H46l8mzfyncWLR-g0vXLcU,74812
29
+ webscout/webai.py,sha256=oQgJVkNHc_BeIQbQG5rGCx1AadMzfQggNGha3IRthRc,76302
30
30
  webscout/webscout_search.py,sha256=3_lli-hDb8_kCGwscK29xuUcOS833ROgpNhDzrxh0dk,3085
31
31
  webscout/webscout_search_async.py,sha256=Y5frH0k3hLqBCR-8dn7a_b7EvxdYxn6wHiKl3jWosE0,40670
32
- webscout-1.3.5.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
33
- webscout-1.3.5.dist-info/METADATA,sha256=piJwnW1M9ZmLGx7als7tWJyY9X5vZXnBahqapGK3Ju0,31961
34
- webscout-1.3.5.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
35
- webscout-1.3.5.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
36
- webscout-1.3.5.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
37
- webscout-1.3.5.dist-info/RECORD,,
32
+ webscout-1.3.6.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
33
+ webscout-1.3.6.dist-info/METADATA,sha256=4WtkJ4GTnb8UrgYyZDINJ9J9dDp-bTjUQtvyjw7yoJM,32467
34
+ webscout-1.3.6.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
35
+ webscout-1.3.6.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
36
+ webscout-1.3.6.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
37
+ webscout-1.3.6.dist-info/RECORD,,