webscout 1.4.6__py3-none-any.whl → 2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -0,0 +1,518 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from ..AIutel import Optimizers
23
+ from ..AIutel import Conversation
24
+ from ..AIutel import AwesomePrompts, sanitize_stream
25
+ from ..AIbase import Provider, AsyncProvider
26
+ from Helpingai_T2 import Perplexity
27
+ from webscout import exceptions
28
+ from typing import Any, AsyncGenerator, Dict
29
+ import logging
30
+ import httpx
31
+
32
+ #------------------------------------------------------phind-------------------------------------------------------------
33
+ class PhindSearch:
34
+ # default_model = "Phind Model"
35
+ def __init__(
36
+ self,
37
+ is_conversation: bool = True,
38
+ max_tokens: int = 8000,
39
+ timeout: int = 30,
40
+ intro: str = None,
41
+ filepath: str = None,
42
+ update_file: bool = True,
43
+ proxies: dict = {},
44
+ history_offset: int = 10250,
45
+ act: str = None,
46
+ model: str = "Phind Model",
47
+ quiet: bool = False,
48
+ ):
49
+ """Instantiates PHIND
50
+
51
+ Args:
52
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
53
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
54
+ timeout (int, optional): Http request timeout. Defaults to 30.
55
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
56
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
57
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
58
+ proxies (dict, optional): Http request proxies. Defaults to {}.
59
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
60
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
61
+ model (str, optional): Model name. Defaults to "Phind Model".
62
+ quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
63
+ """
64
+ self.session = requests.Session()
65
+ self.max_tokens_to_sample = max_tokens
66
+ self.is_conversation = is_conversation
67
+ self.chat_endpoint = "https://https.extension.phind.com/agent/"
68
+ self.stream_chunk_size = 64
69
+ self.timeout = timeout
70
+ self.last_response = {}
71
+ self.model = model
72
+ self.quiet = quiet
73
+
74
+ self.headers = {
75
+ "Content-Type": "application/json",
76
+ "User-Agent": "",
77
+ "Accept": "*/*",
78
+ "Accept-Encoding": "Identity",
79
+ }
80
+
81
+ self.__available_optimizers = (
82
+ method
83
+ for method in dir(Optimizers)
84
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
85
+ )
86
+ self.session.headers.update(self.headers)
87
+ Conversation.intro = (
88
+ AwesomePrompts().get_act(
89
+ act, raise_not_found=True, default=None, case_insensitive=True
90
+ )
91
+ if act
92
+ else intro or Conversation.intro
93
+ )
94
+ self.conversation = Conversation(
95
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
96
+ )
97
+ self.conversation.history_offset = history_offset
98
+ self.session.proxies = proxies
99
+
100
+ def ask(
101
+ self,
102
+ prompt: str,
103
+ stream: bool = False,
104
+ raw: bool = False,
105
+ optimizer: str = None,
106
+ conversationally: bool = False,
107
+ ) -> dict:
108
+ """Chat with AI
109
+
110
+ Args:
111
+ prompt (str): Prompt to be send.
112
+ stream (bool, optional): Flag for streaming response. Defaults to False.
113
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
114
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
115
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
116
+ Returns:
117
+ dict : {}
118
+ ```json
119
+ {
120
+ "id": "chatcmpl-r0wujizf2i2xb60mjiwt",
121
+ "object": "chat.completion.chunk",
122
+ "created": 1706775384,
123
+ "model": "trt-llm-phind-model-serving",
124
+ "choices": [
125
+ {
126
+ "index": 0,
127
+ "delta": {
128
+ "content": "Hello! How can I assist you with your programming today?"
129
+ },
130
+ "finish_reason": null
131
+ }
132
+ ]
133
+ }
134
+ ```
135
+ """
136
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
137
+ if optimizer:
138
+ if optimizer in self.__available_optimizers:
139
+ conversation_prompt = getattr(Optimizers, optimizer)(
140
+ conversation_prompt if conversationally else prompt
141
+ )
142
+ else:
143
+ raise Exception(
144
+ f"Optimizer is not one of {self.__available_optimizers}"
145
+ )
146
+
147
+ self.session.headers.update(self.headers)
148
+ payload = {
149
+ "additional_extension_context": "",
150
+ "allow_magic_buttons": True,
151
+ "is_vscode_extension": True,
152
+ "message_history": [
153
+ {"content": conversation_prompt, "metadata": {}, "role": "user"}
154
+ ],
155
+ "requested_model": self.model,
156
+ "user_input": prompt,
157
+ }
158
+
159
+ def for_stream():
160
+ response = self.session.post(
161
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
162
+ )
163
+ if (
164
+ not response.ok
165
+ or not response.headers.get("Content-Type")
166
+ == "text/event-stream; charset=utf-8"
167
+ ):
168
+ raise exceptions.FailedToGenerateResponseError(
169
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
170
+ )
171
+ streaming_text = ""
172
+ for value in response.iter_lines(
173
+ decode_unicode=True,
174
+ chunk_size=self.stream_chunk_size,
175
+ ):
176
+ try:
177
+ modified_value = re.sub("data:", "", value)
178
+ json_modified_value = json.loads(modified_value)
179
+ retrieved_text = self.get_message(json_modified_value)
180
+ if not retrieved_text:
181
+ continue
182
+ streaming_text += retrieved_text
183
+ json_modified_value["choices"][0]["delta"][
184
+ "content"
185
+ ] = streaming_text
186
+ self.last_response.update(json_modified_value)
187
+ yield value if raw else json_modified_value
188
+ except json.decoder.JSONDecodeError:
189
+ pass
190
+ self.conversation.update_chat_history(
191
+ prompt, self.get_message(self.last_response)
192
+ )
193
+
194
+ def for_non_stream():
195
+ for _ in for_stream():
196
+ pass
197
+ return self.last_response
198
+
199
+ return for_stream() if stream else for_non_stream()
200
+
201
+ def chat(
202
+ self,
203
+ prompt: str,
204
+ stream: bool = False,
205
+ optimizer: str = None,
206
+ conversationally: bool = False,
207
+ ) -> str:
208
+ """Generate response `str`
209
+ Args:
210
+ prompt (str): Prompt to be send.
211
+ stream (bool, optional): Flag for streaming response. Defaults to False.
212
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
213
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
214
+ Returns:
215
+ str: Response generated
216
+ """
217
+
218
+ def for_stream():
219
+ for response in self.ask(
220
+ prompt, True, optimizer=optimizer, conversationally=conversationally
221
+ ):
222
+ yield self.get_message(response)
223
+
224
+ def for_non_stream():
225
+ return self.get_message(
226
+ self.ask(
227
+ prompt,
228
+ False,
229
+ optimizer=optimizer,
230
+ conversationally=conversationally,
231
+ )
232
+ )
233
+
234
+ return for_stream() if stream else for_non_stream()
235
+
236
+ def get_message(self, response: dict) -> str:
237
+ """Retrieves message only from response
238
+
239
+ Args:
240
+ response (dict): Response generated by `self.ask`
241
+
242
+ Returns:
243
+ str: Message extracted
244
+ """
245
+ assert isinstance(response, dict), "Response should be of dict data-type only"
246
+ if response.get("type", "") == "metadata":
247
+ return
248
+
249
+ delta: dict = response["choices"][0]["delta"]
250
+
251
+ if not delta:
252
+ return ""
253
+
254
+ elif delta.get("function_call"):
255
+ if self.quiet:
256
+ return ""
257
+
258
+ function_call: dict = delta["function_call"]
259
+ if function_call.get("name"):
260
+ return function_call["name"]
261
+ elif function_call.get("arguments"):
262
+ return function_call.get("arguments")
263
+
264
+ elif delta.get("metadata"):
265
+ if self.quiet:
266
+ return ""
267
+ return yaml.dump(delta["metadata"])
268
+
269
+ else:
270
+ return (
271
+ response["choices"][0]["delta"].get("content")
272
+ if response["choices"][0].get("finish_reason") is None
273
+ else ""
274
+ )
275
+ class AsyncPhindSearch(AsyncProvider):
276
+ def __init__(
277
+ self,
278
+ is_conversation: bool = True,
279
+ max_tokens: int = 600,
280
+ timeout: int = 30,
281
+ intro: str = None,
282
+ filepath: str = None,
283
+ update_file: bool = True,
284
+ proxies: dict = {},
285
+ history_offset: int = 10250,
286
+ act: str = None,
287
+ model: str = "Phind Model",
288
+ quiet: bool = False,
289
+ ):
290
+ """Instantiates PHIND
291
+
292
+ Args:
293
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
294
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
295
+ timeout (int, optional): Http request timeout. Defaults to 30.
296
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
297
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
298
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
299
+ proxies (dict, optional): Http request proxies. Defaults to {}.
300
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
301
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
302
+ model (str, optional): Model name. Defaults to "Phind Model".
303
+ quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
304
+ """
305
+ self.max_tokens_to_sample = max_tokens
306
+ self.is_conversation = is_conversation
307
+ self.chat_endpoint = "https://https.extension.phind.com/agent/"
308
+ self.stream_chunk_size = 64
309
+ self.timeout = timeout
310
+ self.last_response = {}
311
+ self.model = model
312
+ self.quiet = quiet
313
+
314
+ self.headers = {
315
+ "Content-Type": "application/json",
316
+ "User-Agent": "",
317
+ "Accept": "*/*",
318
+ "Accept-Encoding": "Identity",
319
+ }
320
+
321
+ self.__available_optimizers = (
322
+ method
323
+ for method in dir(Optimizers)
324
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
325
+ )
326
+ Conversation.intro = (
327
+ AwesomePrompts().get_act(
328
+ act, raise_not_found=True, default=None, case_insensitive=True
329
+ )
330
+ if act
331
+ else intro or Conversation.intro
332
+ )
333
+ self.conversation = Conversation(
334
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
335
+ )
336
+ self.conversation.history_offset = history_offset
337
+ self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
338
+
339
+ async def ask(
340
+ self,
341
+ prompt: str,
342
+ stream: bool = False,
343
+ raw: bool = False,
344
+ optimizer: str = None,
345
+ conversationally: bool = False,
346
+ synchronous_generator=False,
347
+ ) -> dict | AsyncGenerator:
348
+ """Asynchronously Chat with AI
349
+
350
+ Args:
351
+ prompt (str): Prompt to be send.
352
+ stream (bool, optional): Flag for streaming response. Defaults to False.
353
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
354
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
355
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
356
+ Returns:
357
+ dict|AsyncGenerator : ai content.
358
+ ```json
359
+ {
360
+ "id": "chatcmpl-r0wujizf2i2xb60mjiwt",
361
+ "object": "chat.completion.chunk",
362
+ "created": 1706775384,
363
+ "model": "trt-llm-phind-model-serving",
364
+ "choices": [
365
+ {
366
+ "index": 0,
367
+ "delta": {
368
+ "content": "Hello! How can I assist you with your programming today?"
369
+ },
370
+ "finish_reason": null
371
+ }
372
+ ]
373
+ }
374
+ ```
375
+ """
376
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
377
+ if optimizer:
378
+ if optimizer in self.__available_optimizers:
379
+ conversation_prompt = getattr(Optimizers, optimizer)(
380
+ conversation_prompt if conversationally else prompt
381
+ )
382
+ else:
383
+ raise Exception(
384
+ f"Optimizer is not one of {self.__available_optimizers}"
385
+ )
386
+
387
+ payload = {
388
+ "additional_extension_context": "",
389
+ "allow_magic_buttons": True,
390
+ "is_vscode_extension": True,
391
+ "message_history": [
392
+ {"content": conversation_prompt, "metadata": {}, "role": "user"}
393
+ ],
394
+ "requested_model": self.model,
395
+ "user_input": prompt,
396
+ }
397
+
398
+ async def for_stream():
399
+ async with self.session.stream(
400
+ "POST",
401
+ self.chat_endpoint,
402
+ json=payload,
403
+ timeout=self.timeout,
404
+ ) as response:
405
+ if (
406
+ not response.is_success
407
+ or not response.headers.get("Content-Type")
408
+ == "text/event-stream; charset=utf-8"
409
+ ):
410
+ raise exceptions.FailedToGenerateResponseError(
411
+ f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
412
+ )
413
+ streaming_text = ""
414
+ async for value in response.aiter_lines():
415
+ try:
416
+ modified_value = re.sub("data:", "", value)
417
+ json_modified_value = json.loads(modified_value)
418
+ retrieved_text = await self.get_message(json_modified_value)
419
+ if not retrieved_text:
420
+ continue
421
+ streaming_text += retrieved_text
422
+ json_modified_value["choices"][0]["delta"][
423
+ "content"
424
+ ] = streaming_text
425
+ self.last_response.update(json_modified_value)
426
+ yield value if raw else json_modified_value
427
+ except json.decoder.JSONDecodeError:
428
+ pass
429
+ self.conversation.update_chat_history(
430
+ prompt, await self.get_message(self.last_response)
431
+ )
432
+
433
+ async def for_non_stream():
434
+ async for _ in for_stream():
435
+ pass
436
+ return self.last_response
437
+
438
+ return (
439
+ for_stream()
440
+ if stream and not synchronous_generator
441
+ else await for_non_stream()
442
+ )
443
+
444
+ async def chat(
445
+ self,
446
+ prompt: str,
447
+ stream: bool = False,
448
+ optimizer: str = None,
449
+ conversationally: bool = False,
450
+ ) -> str | AsyncGenerator:
451
+ """Generate response `str`
452
+ Args:
453
+ prompt (str): Prompt to be send.
454
+ stream (bool, optional): Flag for streaming response. Defaults to False.
455
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
456
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
457
+ Returns:
458
+ str|AsyncGenerator: Response generated
459
+ """
460
+
461
+ async def for_stream():
462
+ ask_resp = await self.ask(
463
+ prompt, True, optimizer=optimizer, conversationally=conversationally
464
+ )
465
+ async for response in ask_resp:
466
+ yield await self.get_message(response)
467
+
468
+ async def for_non_stream():
469
+ return await self.get_message(
470
+ await self.ask(
471
+ prompt,
472
+ False,
473
+ optimizer=optimizer,
474
+ conversationally=conversationally,
475
+ )
476
+ )
477
+
478
+ return for_stream() if stream else await for_non_stream()
479
+
480
+ async def get_message(self, response: dict) -> str:
481
+ """Retrieves message only from response
482
+
483
+ Args:
484
+ response (dict): Response generated by `self.ask`
485
+
486
+ Returns:
487
+ str: Message extracted
488
+ """
489
+ assert isinstance(response, dict), "Response should be of dict data-type only"
490
+ if response.get("type", "") == "metadata":
491
+ return
492
+
493
+ delta: dict = response["choices"][0]["delta"]
494
+
495
+ if not delta:
496
+ return ""
497
+
498
+ elif delta.get("function_call"):
499
+ if self.quiet:
500
+ return ""
501
+
502
+ function_call: dict = delta["function_call"]
503
+ if function_call.get("name"):
504
+ return function_call["name"]
505
+ elif function_call.get("arguments"):
506
+ return function_call.get("arguments")
507
+
508
+ elif delta.get("metadata"):
509
+ if self.quiet:
510
+ return ""
511
+ return yaml.dump(delta["metadata"])
512
+
513
+ else:
514
+ return (
515
+ response["choices"][0]["delta"].get("content")
516
+ if response["choices"][0].get("finish_reason") is None
517
+ else ""
518
+ )