webscout 3.4__py3-none-any.whl → 3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -0,0 +1,479 @@
1
+ import time
2
+ import uuid
3
+ import click
4
+ import requests
5
+ from requests import get
6
+ from uuid import uuid4
7
+ from re import findall
8
+ from requests.exceptions import RequestException
9
+ from curl_cffi.requests import get, RequestsError
10
+ import g4f
11
+ from random import randint
12
+ from PIL import Image
13
+ import io
14
+ import re
15
+ import json
16
+ import yaml
17
+ from ..AIutel import Optimizers
18
+ from ..AIutel import Conversation
19
+ from ..AIutel import AwesomePrompts, sanitize_stream
20
+ from ..AIbase import Provider, AsyncProvider
21
+ from webscout import exceptions
22
+ from typing import Any, AsyncGenerator
23
+ import logging
24
+ import httpx
25
+
26
+ class DeepInfra(Provider):
27
+ def __init__(
28
+ self,
29
+ is_conversation: bool = True,
30
+ max_tokens: int = 600,
31
+ timeout: int = 30,
32
+ intro: str = None,
33
+ filepath: str = None,
34
+ update_file: bool = True,
35
+ proxies: dict = {},
36
+ history_offset: int = 10250,
37
+ act: str = None,
38
+ model: str = "Qwen/Qwen2-72B-Instruct",
39
+ system_prompt: str = "You are a Helpful AI."
40
+ ):
41
+ """Instantiates DeepInfra
42
+
43
+ Args:
44
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
45
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
46
+ timeout (int, optional): Http request timeout. Defaults to 30.
47
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
48
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
49
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50
+ proxies (dict, optional): Http request proxies. Defaults to {}.
51
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53
+ model (str, optional): DeepInfra model name. Defaults to "meta-llama/Meta-Llama-3-70B-Instruct".
54
+ system_prompt (str, optional): System prompt for DeepInfra. Defaults to "You are a Helpful AI.".
55
+ """
56
+ self.session = requests.Session()
57
+ self.is_conversation = is_conversation
58
+ self.max_tokens_to_sample = max_tokens
59
+ self.chat_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
60
+ self.timeout = timeout
61
+ self.last_response = {}
62
+ self.model = model
63
+ self.system_prompt = system_prompt
64
+
65
+ self.headers = {
66
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
67
+ 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
68
+ 'Cache-Control': 'no-cache',
69
+ 'Connection': 'keep-alive',
70
+ 'Content-Type': 'application/json',
71
+ 'Origin': 'https://deepinfra.com',
72
+ 'Pragma': 'no-cache',
73
+ 'Referer': 'https://deepinfra.com/',
74
+ 'Sec-Fetch-Dest': 'empty',
75
+ 'Sec-Fetch-Mode': 'cors',
76
+ 'Sec-Fetch-Site': 'same-site',
77
+ 'X-Deepinfra-Source': 'web-embed',
78
+ 'accept': 'text/event-stream',
79
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
80
+ 'sec-ch-ua-mobile': '?0',
81
+ 'sec-ch-ua-platform': '"macOS"'
82
+ }
83
+
84
+ self.__available_optimizers = (
85
+ method
86
+ for method in dir(Optimizers)
87
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
88
+ )
89
+ self.session.headers.update(self.headers)
90
+ Conversation.intro = (
91
+ AwesomePrompts().get_act(
92
+ act, raise_not_found=True, default=None, case_insensitive=True
93
+ )
94
+ if act
95
+ else intro or Conversation.intro
96
+ )
97
+ self.conversation = Conversation(
98
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
99
+ )
100
+ self.conversation.history_offset = history_offset
101
+ self.session.proxies = proxies
102
+
103
+ def ask(
104
+ self,
105
+ prompt: str,
106
+ raw: bool = False,
107
+ optimizer: str = None,
108
+ conversationally: bool = False,
109
+ ) -> dict:
110
+ """Chat with AI
111
+
112
+ Args:
113
+ prompt (str): Prompt to be sent.
114
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
115
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
116
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
117
+ Returns:
118
+ dict : {}
119
+ """
120
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
121
+ if optimizer:
122
+ if optimizer in self.__available_optimizers:
123
+ conversation_prompt = getattr(Optimizers, optimizer)(
124
+ conversation_prompt if conversationally else prompt
125
+ )
126
+ else:
127
+ raise Exception(
128
+ f"Optimizer is not one of {self.__available_optimizers}"
129
+ )
130
+ self.session.headers.update(self.headers)
131
+ payload = {
132
+ 'model': self.model,
133
+ 'messages': [
134
+ {"role": "system", "content": self.system_prompt},
135
+ {"role": "user", "content": conversation_prompt},
136
+ ],
137
+ 'temperature': 0.7,
138
+ 'max_tokens': 8028,
139
+ 'stop': []
140
+ }
141
+
142
+ response = self.session.post(
143
+ self.chat_endpoint, json=payload, timeout=self.timeout
144
+ )
145
+ if not response.ok:
146
+ raise Exception(
147
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
148
+ )
149
+
150
+ resp = response.json()
151
+ message_load = self.get_message(resp)
152
+ self.conversation.update_chat_history(
153
+ prompt, message_load
154
+ )
155
+ return resp
156
+
157
+ def chat(
158
+ self,
159
+ prompt: str,
160
+ optimizer: str = None,
161
+ conversationally: bool = False,
162
+ ) -> str:
163
+ """Generate response `str`
164
+ Args:
165
+ prompt (str): Prompt to be send.
166
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
167
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
168
+ Returns:
169
+ str: Response generated
170
+ """
171
+ return self.get_message(
172
+ self.ask(
173
+ prompt,
174
+ optimizer=optimizer,
175
+ conversationally=conversationally,
176
+ )
177
+ )
178
+
179
+ def get_message(self, response: dict) -> str:
180
+ """Retrieves message only from response
181
+
182
+ Args:
183
+ response (dict): Response generated by `self.ask`
184
+
185
+ Returns:
186
+ str: Message extracted
187
+ """
188
+ assert isinstance(response, dict), "Response should be of dict data-type only"
189
+ try:
190
+ return response["choices"][0]["message"]["content"]
191
+ except KeyError:
192
+ return ""
193
+
194
+ class AsyncDeepInfra(AsyncProvider):
195
+ def __init__(
196
+ self,
197
+ is_conversation: bool = True,
198
+ max_tokens: int = 600,
199
+ timeout: int = 30,
200
+ intro: str = None,
201
+ filepath: str = None,
202
+ update_file: bool = True,
203
+ proxies: dict = {},
204
+ history_offset: int = 10250,
205
+ act: str = None,
206
+ model: str = "meta-llama/Meta-Llama-3-70B-Instruct",
207
+ system_prompt: str = "You are a Helpful AI."
208
+ ):
209
+ """Instantiates DeepInfra
210
+
211
+ Args:
212
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
213
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
214
+ timeout (int, optional): Http request timeout. Defaults to 30.
215
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
216
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
217
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
218
+ proxies (dict, optional): Http request proxies. Defaults to {}.
219
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
220
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
221
+ model (str, optional): DeepInfra model name. Defaults to "meta-llama/Meta-Llama-3-70B-Instruct".
222
+ system_prompt (str, optional): System prompt for DeepInfra. Defaults to "You are a Helpful AI.".
223
+ """
224
+ self.is_conversation = is_conversation
225
+ self.max_tokens_to_sample = max_tokens
226
+ self.chat_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
227
+ self.timeout = timeout
228
+ self.last_response = {}
229
+ self.model = model
230
+ self.system_prompt = system_prompt
231
+
232
+ self.headers = {
233
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
234
+ 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
235
+ 'Cache-Control': 'no-cache',
236
+ 'Connection': 'keep-alive',
237
+ 'Content-Type': 'application/json',
238
+ 'Origin': 'https://deepinfra.com',
239
+ 'Pragma': 'no-cache',
240
+ 'Referer': 'https://deepinfra.com/',
241
+ 'Sec-Fetch-Dest': 'empty',
242
+ 'Sec-Fetch-Mode': 'cors',
243
+ 'Sec-Fetch-Site': 'same-site',
244
+ 'X-Deepinfra-Source': 'web-embed',
245
+ 'accept': 'text/event-stream',
246
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
247
+ 'sec-ch-ua-mobile': '?0',
248
+ 'sec-ch-ua-platform': '"macOS"'
249
+ }
250
+
251
+ self.__available_optimizers = (
252
+ method
253
+ for method in dir(Optimizers)
254
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
255
+ )
256
+ self.client = httpx.AsyncClient(proxies=proxies, headers=self.headers)
257
+ Conversation.intro = (
258
+ AwesomePrompts().get_act(
259
+ act, raise_not_found=True, default=None, case_insensitive=True
260
+ )
261
+ if act
262
+ else intro or Conversation.intro
263
+ )
264
+ self.conversation = Conversation(
265
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
266
+ )
267
+ self.conversation.history_offset = history_offset
268
+
269
+ async def ask(
270
+ self,
271
+ prompt: str,
272
+ raw: bool = False,
273
+ optimizer: str = None,
274
+ conversationally: bool = False,
275
+ ) -> dict:
276
+ """Chat with AI
277
+
278
+ Args:
279
+ prompt (str): Prompt to be sent.
280
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
281
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
282
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
283
+ Returns:
284
+ dict : {}
285
+ """
286
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
287
+ if optimizer:
288
+ if optimizer in self.__available_optimizers:
289
+ conversation_prompt = getattr(Optimizers, optimizer)(
290
+ conversation_prompt if conversationally else prompt
291
+ )
292
+ else:
293
+ raise Exception(
294
+ f"Optimizer is not one of {self.__available_optimizers}"
295
+ )
296
+ payload = {
297
+ 'model': self.model,
298
+ 'messages': [
299
+ {"role": "system", "content": self.system_prompt},
300
+ {"role": "user", "content": conversation_prompt},
301
+ ],
302
+ 'temperature': 0.7,
303
+ 'max_tokens': 8028,
304
+ 'stop': []
305
+ }
306
+
307
+ response = await self.client.post(self.chat_endpoint, json=payload, timeout=self.timeout)
308
+ if response.status_code != 200:
309
+ raise Exception(
310
+ f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}"
311
+ )
312
+
313
+ resp = response.json()
314
+ message_load = self.get_message(resp)
315
+ self.conversation.update_chat_history(
316
+ prompt, message_load
317
+ )
318
+ return resp
319
+
320
+ async def chat(
321
+ self,
322
+ prompt: str,
323
+ optimizer: str = None,
324
+ conversationally: bool = False,
325
+ ) -> str:
326
+ """Generate response `str`
327
+ Args:
328
+ prompt (str): Prompt to be send.
329
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
330
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
331
+ Returns:
332
+ str: Response generated
333
+ """
334
+ return self.get_message(
335
+ await self.ask(
336
+ prompt,
337
+ optimizer=optimizer,
338
+ conversationally=conversationally,
339
+ )
340
+ )
341
+
342
+ def get_message(self, response: dict) -> str:
343
+ """Retrieves message only from response
344
+
345
+ Args:
346
+ response (dict): Response generated by `self.ask`
347
+
348
+ Returns:
349
+ str: Message extracted
350
+ """
351
+ assert isinstance(response, dict), "Response should be of dict data-type only"
352
+ try:
353
+ return response["choices"][0]["message"]["content"]
354
+ except KeyError:
355
+ return ""
356
+ import requests
357
+ import base64
358
+ from typing import List, Dict, Union, Any
359
+
360
+ class VLM:
361
+ def __init__(
362
+ self,
363
+ model: str = "llava-hf/llava-1.5-7b-hf",
364
+ is_conversation: bool = True,
365
+ max_tokens: int = 600,
366
+ timeout: int = 30,
367
+ system_prompt: str = "You are a Helpful AI.",
368
+ proxies: dict = {}
369
+ ):
370
+ """Instantiates VLM
371
+
372
+ Args:
373
+ model (str): VLM model name.
374
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
375
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
376
+ timeout (int, optional): Http request timeout. Defaults to 30.
377
+ system_prompt (str, optional): System prompt for VLM. Defaults to "You are a Helpful AI.".
378
+ proxies (dict, optional): Http request proxies. Defaults to {}.
379
+ """
380
+ self.model = model
381
+ self.is_conversation = is_conversation
382
+ self.max_tokens_to_sample = max_tokens
383
+ self.timeout = timeout
384
+ self.system_prompt = system_prompt
385
+ self.headers = {
386
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
387
+ 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q.0.7,es;q.0.6,en-US;q.0.5,am;q.0.4,de;q.0.3',
388
+ 'Cache-Control': 'no-cache',
389
+ 'Connection': 'keep-alive',
390
+ 'Content-Type': 'application/json',
391
+ 'Origin': 'https://deepinfra.com',
392
+ 'Pragma': 'no-cache',
393
+ 'Referer': 'https://deepinfra.com/',
394
+ 'Sec-Fetch-Dest': 'empty',
395
+ 'Sec-Fetch-Mode': 'cors',
396
+ 'Sec-Fetch-Site': 'same-site',
397
+ 'X-Deepinfra-Source': 'web-embed',
398
+ 'accept': 'text/event-stream',
399
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
400
+ 'sec-ch-ua-mobile': '?0',
401
+ 'sec-ch-ua-platform': '"macOS"'
402
+ }
403
+
404
+ self.session = requests.Session()
405
+ self.session.headers.update(self.headers)
406
+ self.session.proxies.update(proxies)
407
+
408
+ def encode_image_to_base64(self, image_path: str) -> str:
409
+ with open(image_path, "rb") as image_file:
410
+ return base64.b64encode(image_file.read()).decode("utf-8")
411
+
412
+ def get_message(self, response: dict) -> str:
413
+ """Retrieves message only from response
414
+
415
+ Args:
416
+ response (dict): Response generated by `self.ask`
417
+
418
+ Returns:
419
+ str: Message extracted
420
+ """
421
+ assert isinstance(response, dict), "Response should be of dict data-type only"
422
+ try:
423
+ return response["choices"][0]["message"]["content"]
424
+ except KeyError:
425
+ return ""
426
+
427
+ def ask(
428
+ self,
429
+ prompt: Union[str, Dict[str, str]],
430
+ raw: bool = False
431
+ ) -> dict:
432
+ """Chat with AI
433
+
434
+ Args:
435
+ prompt (Union[str, Dict[str, str]]): Prompt to be sent, can be text or a dict with base64 image.
436
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
437
+
438
+ Returns:
439
+ dict: Response from the API
440
+ """
441
+ messages = [
442
+ {"role": "system", "content": self.system_prompt},
443
+ {"role": "user", "content": prompt if isinstance(prompt, str) else prompt['content']}
444
+ ]
445
+
446
+ payload = {
447
+ 'model': self.model,
448
+ 'messages': messages,
449
+ 'temperature': 0.7,
450
+ 'max_tokens': self.max_tokens_to_sample,
451
+ 'stop': [],
452
+ 'stream': False
453
+ }
454
+
455
+ response = self.session.post(
456
+ "https://api.deepinfra.com/v1/openai/chat/completions",
457
+ json=payload,
458
+ timeout=self.timeout
459
+ )
460
+ if not response.ok:
461
+ raise Exception(
462
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
463
+ )
464
+
465
+ return response.json()
466
+
467
+ def chat(
468
+ self,
469
+ prompt: Union[str, Dict[str, str]]
470
+ ) -> str:
471
+ """Generate response `str`
472
+
473
+ Args:
474
+ prompt (Union[str, Dict[str, str]]): Prompt to be sent, can be text or a dict with base64 image.
475
+
476
+ Returns:
477
+ str: Response generated
478
+ """
479
+ return self.get_message(self.ask(prompt))
@@ -30,7 +30,7 @@ from .ChatGPTUK import ChatGPTUK
30
30
  from .Poe import POE
31
31
  from .BasedGPT import BasedGPT
32
32
  from .Deepseek import DeepSeek
33
-
33
+ from .Deepinfra import DeepInfra, VLM, AsyncDeepInfra
34
34
  __all__ = [
35
35
  'ThinkAnyAI',
36
36
  'Xjai',
@@ -62,5 +62,8 @@ __all__ = [
62
62
  'POE',
63
63
  'BasedGPT',
64
64
  'DeepSeek',
65
+ 'DeepInfra',
66
+ 'VLM',
67
+ 'AsyncDeepInfra',
65
68
 
66
69
  ]
webscout/__init__.py CHANGED
@@ -1,10 +1,11 @@
1
1
  from .webscout_search import WEBS
2
2
  from .webscout_search_async import AsyncWEBS
3
3
  from .version import __version__
4
- from .DWEBS import DeepWEBS
4
+ from .DWEBS import *
5
5
  from .transcriber import transcriber
6
6
  from .voice import play_audio
7
- # from .tempid import Client as TempMailClient, TemporaryPhoneNumber
7
+ from .websx_search import WEBSX
8
+
8
9
  from .LLM import LLM
9
10
  # from .Local import *
10
11
  import g4f
@@ -36,6 +37,7 @@ webai = [
36
37
  "poe",
37
38
  "basedgpt",
38
39
  "deepseek",
40
+ "deepinfra",
39
41
  ]
40
42
 
41
43
  gpt4free_providers = [
webscout/webai.py CHANGED
@@ -658,6 +658,21 @@ class Main(cmd.Cmd):
658
658
  history_offset=history_offset,
659
659
  act=awesome_prompt,
660
660
  )
661
+ elif provider == "deepinfra":
662
+ from webscout import DeepInfra
663
+
664
+ self.bot = DeepInfra(
665
+ is_conversation=disable_conversation,
666
+ max_tokens=max_tokens,
667
+ timeout=timeout,
668
+ intro=intro,
669
+ filepath=filepath,
670
+ update_file=update_file,
671
+ proxies=proxies,
672
+ model=getOr(model, "Qwen/Qwen2-72B-Instruct"),
673
+ history_offset=history_offset,
674
+ act=awesome_prompt,
675
+ )
661
676
  elif provider == "xjai":
662
677
  from webscout import Xjai
663
678