webscout 1.4.5__py3-none-any.whl → 2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -0,0 +1,402 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from ..AIutel import Optimizers
23
+ from ..AIutel import Conversation
24
+ from ..AIutel import AwesomePrompts, sanitize_stream
25
+ from ..AIbase import Provider, AsyncProvider
26
+ from Helpingai_T2 import Perplexity
27
+ from webscout import exceptions
28
+ from typing import Any, AsyncGenerator, Dict
29
+ import logging
30
+ import httpx
31
+ #------------------------------------------------------KOBOLDAI-----------------------------------------------------------
32
+ class KOBOLDAI(Provider):
33
+ def __init__(
34
+ self,
35
+ is_conversation: bool = True,
36
+ max_tokens: int = 600,
37
+ temperature: float = 1,
38
+ top_p: float = 1,
39
+ timeout: int = 30,
40
+ intro: str = None,
41
+ filepath: str = None,
42
+ update_file: bool = True,
43
+ proxies: dict = {},
44
+ history_offset: int = 10250,
45
+ act: str = None,
46
+ ):
47
+ """Instantiate TGPT
48
+
49
+ Args:
50
+ is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
51
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
52
+ temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
53
+ top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
54
+ timeout (int, optional): Http requesting timeout. Defaults to 30
55
+ intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
56
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
57
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
58
+ proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
59
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
60
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
61
+ """
62
+ self.session = requests.Session()
63
+ self.is_conversation = is_conversation
64
+ self.max_tokens_to_sample = max_tokens
65
+ self.temperature = temperature
66
+ self.top_p = top_p
67
+ self.chat_endpoint = (
68
+ "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
69
+ )
70
+ self.stream_chunk_size = 64
71
+ self.timeout = timeout
72
+ self.last_response = {}
73
+ self.headers = {
74
+ "Content-Type": "application/json",
75
+ "Accept": "application/json",
76
+ }
77
+
78
+ self.__available_optimizers = (
79
+ method
80
+ for method in dir(Optimizers)
81
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
82
+ )
83
+ self.session.headers.update(self.headers)
84
+ Conversation.intro = (
85
+ AwesomePrompts().get_act(
86
+ act, raise_not_found=True, default=None, case_insensitive=True
87
+ )
88
+ if act
89
+ else intro or Conversation.intro
90
+ )
91
+ self.conversation = Conversation(
92
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
93
+ )
94
+ self.conversation.history_offset = history_offset
95
+ self.session.proxies = proxies
96
+
97
+ def ask(
98
+ self,
99
+ prompt: str,
100
+ stream: bool = False,
101
+ raw: bool = False,
102
+ optimizer: str = None,
103
+ conversationally: bool = False,
104
+ ) -> dict:
105
+ """Chat with AI
106
+
107
+ Args:
108
+ prompt (str): Prompt to be send.
109
+ stream (bool, optional): Flag for streaming response. Defaults to False.
110
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
111
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
112
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
113
+ Returns:
114
+ dict : {}
115
+ ```json
116
+ {
117
+ "token" : "How may I assist you today?"
118
+ }
119
+ ```
120
+ """
121
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
122
+ if optimizer:
123
+ if optimizer in self.__available_optimizers:
124
+ conversation_prompt = getattr(Optimizers, optimizer)(
125
+ conversation_prompt if conversationally else prompt
126
+ )
127
+ else:
128
+ raise Exception(
129
+ f"Optimizer is not one of {self.__available_optimizers}"
130
+ )
131
+
132
+ self.session.headers.update(self.headers)
133
+ payload = {
134
+ "prompt": conversation_prompt,
135
+ "temperature": self.temperature,
136
+ "top_p": self.top_p,
137
+ }
138
+
139
+ def for_stream():
140
+ response = self.session.post(
141
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
142
+ )
143
+ if not response.ok:
144
+ raise Exception(
145
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
146
+ )
147
+
148
+ message_load = ""
149
+ for value in response.iter_lines(
150
+ decode_unicode=True,
151
+ delimiter="" if raw else "event: message\ndata:",
152
+ chunk_size=self.stream_chunk_size,
153
+ ):
154
+ try:
155
+ resp = json.loads(value)
156
+ message_load += self.get_message(resp)
157
+ resp["token"] = message_load
158
+ self.last_response.update(resp)
159
+ yield value if raw else resp
160
+ except json.decoder.JSONDecodeError:
161
+ pass
162
+ self.conversation.update_chat_history(
163
+ prompt, self.get_message(self.last_response)
164
+ )
165
+
166
+ def for_non_stream():
167
+ # let's make use of stream
168
+ for _ in for_stream():
169
+ pass
170
+ return self.last_response
171
+
172
+ return for_stream() if stream else for_non_stream()
173
+
174
+ def chat(
175
+ self,
176
+ prompt: str,
177
+ stream: bool = False,
178
+ optimizer: str = None,
179
+ conversationally: bool = False,
180
+ ) -> str:
181
+ """Generate response `str`
182
+ Args:
183
+ prompt (str): Prompt to be send.
184
+ stream (bool, optional): Flag for streaming response. Defaults to False.
185
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
186
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
187
+ Returns:
188
+ str: Response generated
189
+ """
190
+
191
+ def for_stream():
192
+ for response in self.ask(
193
+ prompt, True, optimizer=optimizer, conversationally=conversationally
194
+ ):
195
+ yield self.get_message(response)
196
+
197
+ def for_non_stream():
198
+ return self.get_message(
199
+ self.ask(
200
+ prompt,
201
+ False,
202
+ optimizer=optimizer,
203
+ conversationally=conversationally,
204
+ )
205
+ )
206
+
207
+ return for_stream() if stream else for_non_stream()
208
+
209
+ def get_message(self, response: dict) -> str:
210
+ """Retrieves message only from response
211
+
212
+ Args:
213
+ response (dict): Response generated by `self.ask`
214
+
215
+ Returns:
216
+ str: Message extracted
217
+ """
218
+ assert isinstance(response, dict), "Response should be of dict data-type only"
219
+ return response.get("token")
220
+ class AsyncKOBOLDAI(AsyncProvider):
221
+ def __init__(
222
+ self,
223
+ is_conversation: bool = True,
224
+ max_tokens: int = 600,
225
+ temperature: float = 1,
226
+ top_p: float = 1,
227
+ timeout: int = 30,
228
+ intro: str = None,
229
+ filepath: str = None,
230
+ update_file: bool = True,
231
+ proxies: dict = {},
232
+ history_offset: int = 10250,
233
+ act: str = None,
234
+ ):
235
+ """Instantiate TGPT
236
+
237
+ Args:
238
+ is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
239
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
240
+ temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
241
+ top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
242
+ timeout (int, optional): Http requesting timeout. Defaults to 30
243
+ intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
244
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
245
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
246
+ proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
247
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
248
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
249
+ """
250
+ self.is_conversation = is_conversation
251
+ self.max_tokens_to_sample = max_tokens
252
+ self.temperature = temperature
253
+ self.top_p = top_p
254
+ self.chat_endpoint = (
255
+ "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
256
+ )
257
+ self.stream_chunk_size = 64
258
+ self.timeout = timeout
259
+ self.last_response = {}
260
+ self.headers = {
261
+ "Content-Type": "application/json",
262
+ "Accept": "application/json",
263
+ }
264
+
265
+ self.__available_optimizers = (
266
+ method
267
+ for method in dir(Optimizers)
268
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
269
+ )
270
+ Conversation.intro = (
271
+ AwesomePrompts().get_act(
272
+ act, raise_not_found=True, default=None, case_insensitive=True
273
+ )
274
+ if act
275
+ else intro or Conversation.intro
276
+ )
277
+ self.conversation = Conversation(
278
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
279
+ )
280
+ self.conversation.history_offset = history_offset
281
+ self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
282
+
283
+ async def ask(
284
+ self,
285
+ prompt: str,
286
+ stream: bool = False,
287
+ raw: bool = False,
288
+ optimizer: str = None,
289
+ conversationally: bool = False,
290
+ ) -> dict | AsyncGenerator:
291
+ """Chat with AI asynchronously.
292
+
293
+ Args:
294
+ prompt (str): Prompt to be send.
295
+ stream (bool, optional): Flag for streaming response. Defaults to False.
296
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
297
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
298
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
299
+ Returns:
300
+ dict|AsyncGenerator : ai content
301
+ ```json
302
+ {
303
+ "token" : "How may I assist you today?"
304
+ }
305
+ ```
306
+ """
307
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
308
+ if optimizer:
309
+ if optimizer in self.__available_optimizers:
310
+ conversation_prompt = getattr(Optimizers, optimizer)(
311
+ conversation_prompt if conversationally else prompt
312
+ )
313
+ else:
314
+ raise Exception(
315
+ f"Optimizer is not one of {self.__available_optimizers}"
316
+ )
317
+
318
+ payload = {
319
+ "prompt": conversation_prompt,
320
+ "temperature": self.temperature,
321
+ "top_p": self.top_p,
322
+ }
323
+
324
+ async def for_stream():
325
+ async with self.session.stream(
326
+ "POST", self.chat_endpoint, json=payload, timeout=self.timeout
327
+ ) as response:
328
+ if not response.is_success:
329
+ raise exceptions.FailedToGenerateResponseError(
330
+ f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
331
+ )
332
+
333
+ message_load = ""
334
+ async for value in response.aiter_lines():
335
+ try:
336
+ resp = sanitize_stream(value)
337
+ message_load += await self.get_message(resp)
338
+ resp["token"] = message_load
339
+ self.last_response.update(resp)
340
+ yield value if raw else resp
341
+ except json.decoder.JSONDecodeError:
342
+ pass
343
+
344
+ self.conversation.update_chat_history(
345
+ prompt, await self.get_message(self.last_response)
346
+ )
347
+
348
+ async def for_non_stream():
349
+ # let's make use of stream
350
+ async for _ in for_stream():
351
+ pass
352
+ return self.last_response
353
+
354
+ return for_stream() if stream else await for_non_stream()
355
+
356
+ async def chat(
357
+ self,
358
+ prompt: str,
359
+ stream: bool = False,
360
+ optimizer: str = None,
361
+ conversationally: bool = False,
362
+ ) -> str | AsyncGenerator:
363
+ """Generate response `str` asynchronously.
364
+ Args:
365
+ prompt (str): Prompt to be send.
366
+ stream (bool, optional): Flag for streaming response. Defaults to False.
367
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
368
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
369
+ Returns:
370
+ str: Response generated
371
+ """
372
+
373
+ async def for_stream():
374
+ async_ask = await self.ask(
375
+ prompt, True, optimizer=optimizer, conversationally=conversationally
376
+ )
377
+ async for response in async_ask:
378
+ yield await self.get_message(response)
379
+
380
+ async def for_non_stream():
381
+ return await self.get_message(
382
+ await self.ask(
383
+ prompt,
384
+ False,
385
+ optimizer=optimizer,
386
+ conversationally=conversationally,
387
+ )
388
+ )
389
+
390
+ return for_stream() if stream else await for_non_stream()
391
+
392
+ async def get_message(self, response: dict) -> str:
393
+ """Retrieves message only from response
394
+
395
+ Args:
396
+ response (dict): Response generated by `self.ask`
397
+
398
+ Returns:
399
+ str: Message extracted
400
+ """
401
+ assert isinstance(response, dict), "Response should be of dict data-type only"
402
+ return response.get("token")