webscout 1.3.9__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AI.py +228 -1
- webscout/__init__.py +1 -1
- webscout/async_providers.py +32 -32
- webscout/tempid.py +157 -0
- webscout/version.py +1 -1
- {webscout-1.3.9.dist-info → webscout-1.4.1.dist-info}/METADATA +281 -62
- {webscout-1.3.9.dist-info → webscout-1.4.1.dist-info}/RECORD +11 -10
- {webscout-1.3.9.dist-info → webscout-1.4.1.dist-info}/LICENSE.md +0 -0
- {webscout-1.3.9.dist-info → webscout-1.4.1.dist-info}/WHEEL +0 -0
- {webscout-1.3.9.dist-info → webscout-1.4.1.dist-info}/entry_points.txt +0 -0
- {webscout-1.3.9.dist-info → webscout-1.4.1.dist-info}/top_level.txt +0 -0
webscout/AI.py
CHANGED
|
@@ -64,6 +64,7 @@ class LLAMA2(Provider):
|
|
|
64
64
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
65
65
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
66
66
|
"""
|
|
67
|
+
self.session = requests.Session()
|
|
67
68
|
self.is_conversation = is_conversation
|
|
68
69
|
self.max_tokens_to_sample = max_tokens
|
|
69
70
|
self.model = model
|
|
@@ -4158,7 +4159,7 @@ class YEPCHAT(Provider):
|
|
|
4158
4159
|
presence_penalty: int = 0,
|
|
4159
4160
|
frequency_penalty: int = 0,
|
|
4160
4161
|
top_p: float = 0.7,
|
|
4161
|
-
model: str ="Mixtral-8x7B-Instruct-v0.1",
|
|
4162
|
+
model: str = "Mixtral-8x7B-Instruct-v0.1",
|
|
4162
4163
|
timeout: int = 30,
|
|
4163
4164
|
intro: str = None,
|
|
4164
4165
|
filepath: str = None,
|
|
@@ -4185,6 +4186,7 @@ class YEPCHAT(Provider):
|
|
|
4185
4186
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
4186
4187
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
4187
4188
|
"""
|
|
4189
|
+
self.session = requests.Session()
|
|
4188
4190
|
self.is_conversation = is_conversation
|
|
4189
4191
|
self.max_tokens_to_sample = max_tokens
|
|
4190
4192
|
self.model = model
|
|
@@ -4371,6 +4373,231 @@ class YEPCHAT(Provider):
|
|
|
4371
4373
|
return response["choices"][0]["message"]["content"]
|
|
4372
4374
|
except KeyError:
|
|
4373
4375
|
return ""
|
|
4376
|
+
|
|
4377
|
+
|
|
4378
|
+
class AsyncYEPCHAT(AsyncProvider):
|
|
4379
|
+
def __init__(
|
|
4380
|
+
self,
|
|
4381
|
+
is_conversation: bool = True,
|
|
4382
|
+
max_tokens: int = 600,
|
|
4383
|
+
temperature: float = 0.6,
|
|
4384
|
+
presence_penalty: int = 0,
|
|
4385
|
+
frequency_penalty: int = 0,
|
|
4386
|
+
top_p: float = 0.7,
|
|
4387
|
+
model: str = "Mixtral-8x7B-Instruct-v0.1",
|
|
4388
|
+
timeout: int = 30,
|
|
4389
|
+
intro: str = None,
|
|
4390
|
+
filepath: str = None,
|
|
4391
|
+
update_file: bool = True,
|
|
4392
|
+
proxies: dict = {},
|
|
4393
|
+
history_offset: int = 10250,
|
|
4394
|
+
act: str = None,
|
|
4395
|
+
):
|
|
4396
|
+
"""Instantiates YEPCHAT
|
|
4397
|
+
|
|
4398
|
+
Args:
|
|
4399
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
4400
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
4401
|
+
temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.6.
|
|
4402
|
+
presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
|
|
4403
|
+
frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
|
|
4404
|
+
top_p (float, optional): Sampling threshold during inference time. Defaults to 0.7.
|
|
4405
|
+
model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
|
|
4406
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
4407
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
4408
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
4409
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
4410
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
4411
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
4412
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
4413
|
+
"""
|
|
4414
|
+
self.session = requests.Session()
|
|
4415
|
+
self.is_conversation = is_conversation
|
|
4416
|
+
self.max_tokens_to_sample = max_tokens
|
|
4417
|
+
self.model = model
|
|
4418
|
+
self.temperature = temperature
|
|
4419
|
+
self.presence_penalty = presence_penalty
|
|
4420
|
+
self.frequency_penalty = frequency_penalty
|
|
4421
|
+
self.top_p = top_p
|
|
4422
|
+
self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
|
|
4423
|
+
self.stream_chunk_size = 64
|
|
4424
|
+
self.timeout = timeout
|
|
4425
|
+
self.last_response = {}
|
|
4426
|
+
self.headers = {
|
|
4427
|
+
"Accept": "*/*",
|
|
4428
|
+
"Accept-Encoding": "gzip, deflate",
|
|
4429
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
4430
|
+
"Content-Type": "application/json; charset=utf-8",
|
|
4431
|
+
"Origin": "https://yep.com",
|
|
4432
|
+
"Referer": "https://yep.com/",
|
|
4433
|
+
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
4434
|
+
}
|
|
4435
|
+
|
|
4436
|
+
self.__available_optimizers = (
|
|
4437
|
+
method
|
|
4438
|
+
for method in dir(Optimizers)
|
|
4439
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
4440
|
+
)
|
|
4441
|
+
Conversation.intro = (
|
|
4442
|
+
AwesomePrompts().get_act(
|
|
4443
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
4444
|
+
)
|
|
4445
|
+
if act
|
|
4446
|
+
else intro or Conversation.intro
|
|
4447
|
+
)
|
|
4448
|
+
self.conversation = Conversation(
|
|
4449
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
4450
|
+
)
|
|
4451
|
+
self.conversation.history_offset = history_offset
|
|
4452
|
+
self.session = httpx.AsyncClient(
|
|
4453
|
+
headers=self.headers,
|
|
4454
|
+
proxies=proxies,
|
|
4455
|
+
)
|
|
4456
|
+
|
|
4457
|
+
async def ask(
|
|
4458
|
+
self,
|
|
4459
|
+
prompt: str,
|
|
4460
|
+
stream: bool = False,
|
|
4461
|
+
raw: bool = False,
|
|
4462
|
+
optimizer: str = None,
|
|
4463
|
+
conversationally: bool = False,
|
|
4464
|
+
) -> dict:
|
|
4465
|
+
"""Chat with AI asynchronously.
|
|
4466
|
+
|
|
4467
|
+
Args:
|
|
4468
|
+
prompt (str): Prompt to be send.
|
|
4469
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
4470
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
4471
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
4472
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
4473
|
+
Returns:
|
|
4474
|
+
dict : {}
|
|
4475
|
+
```json
|
|
4476
|
+
{
|
|
4477
|
+
"id": "cmpl-c61c1c88de4e4ad3a79134775d17ea0c",
|
|
4478
|
+
"object": "chat.completion.chunk",
|
|
4479
|
+
"created": 1713876886,
|
|
4480
|
+
"model": "Mixtral-8x7B-Instruct-v0.1",
|
|
4481
|
+
"choices": [
|
|
4482
|
+
{
|
|
4483
|
+
"index": 0,
|
|
4484
|
+
"delta": {
|
|
4485
|
+
"role": null,
|
|
4486
|
+
"content": " Sure, I can help with that. Are you looking for information on how to start coding, or do you need help with a specific coding problem? We can discuss various programming languages like Python, JavaScript, Java, C++, or others. Please provide more details so I can assist you better."
|
|
4487
|
+
},
|
|
4488
|
+
"finish_reason": null
|
|
4489
|
+
}
|
|
4490
|
+
]
|
|
4491
|
+
}
|
|
4492
|
+
```
|
|
4493
|
+
"""
|
|
4494
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
4495
|
+
if optimizer:
|
|
4496
|
+
if optimizer in self.__available_optimizers:
|
|
4497
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
4498
|
+
conversation_prompt if conversationally else prompt
|
|
4499
|
+
)
|
|
4500
|
+
else:
|
|
4501
|
+
raise Exception(
|
|
4502
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
4503
|
+
)
|
|
4504
|
+
payload = {
|
|
4505
|
+
"stream": True,
|
|
4506
|
+
"max_tokens": 1280,
|
|
4507
|
+
"top_p": self.top_p,
|
|
4508
|
+
"temperature": self.temperature,
|
|
4509
|
+
"messages": [{"content": conversation_prompt, "role": "user"}],
|
|
4510
|
+
"model": self.model,
|
|
4511
|
+
}
|
|
4512
|
+
|
|
4513
|
+
async def for_stream():
|
|
4514
|
+
async with self.session.stream(
|
|
4515
|
+
"POST", self.chat_endpoint, json=payload, timeout=self.timeout
|
|
4516
|
+
) as response:
|
|
4517
|
+
if not response.is_success:
|
|
4518
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
4519
|
+
f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}"
|
|
4520
|
+
)
|
|
4521
|
+
|
|
4522
|
+
message_load = ""
|
|
4523
|
+
async for value in response.aiter_lines():
|
|
4524
|
+
try:
|
|
4525
|
+
resp = sanitize_stream(value)
|
|
4526
|
+
incomplete_message = await self.get_message(resp)
|
|
4527
|
+
if incomplete_message:
|
|
4528
|
+
message_load += incomplete_message
|
|
4529
|
+
resp["choices"][0]["delta"]["content"] = message_load
|
|
4530
|
+
self.last_response.update(resp)
|
|
4531
|
+
yield value if raw else resp
|
|
4532
|
+
elif raw:
|
|
4533
|
+
yield value
|
|
4534
|
+
except json.decoder.JSONDecodeError:
|
|
4535
|
+
pass
|
|
4536
|
+
|
|
4537
|
+
self.conversation.update_chat_history(
|
|
4538
|
+
prompt, await self.get_message(self.last_response)
|
|
4539
|
+
)
|
|
4540
|
+
|
|
4541
|
+
async def for_non_stream():
|
|
4542
|
+
async for _ in for_stream():
|
|
4543
|
+
pass
|
|
4544
|
+
return self.last_response
|
|
4545
|
+
|
|
4546
|
+
return for_stream() if stream else await for_non_stream()
|
|
4547
|
+
|
|
4548
|
+
async def chat(
|
|
4549
|
+
self,
|
|
4550
|
+
prompt: str,
|
|
4551
|
+
stream: bool = False,
|
|
4552
|
+
optimizer: str = None,
|
|
4553
|
+
conversationally: bool = False,
|
|
4554
|
+
) -> str:
|
|
4555
|
+
"""Generate response `str` asynchronously.
|
|
4556
|
+
Args:
|
|
4557
|
+
prompt (str): Prompt to be send.
|
|
4558
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
4559
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
4560
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
4561
|
+
Returns:
|
|
4562
|
+
str: Response generated
|
|
4563
|
+
"""
|
|
4564
|
+
|
|
4565
|
+
async def for_stream():
|
|
4566
|
+
async_ask = await self.ask(
|
|
4567
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
4568
|
+
)
|
|
4569
|
+
|
|
4570
|
+
async for response in async_ask:
|
|
4571
|
+
yield await self.get_message(response)
|
|
4572
|
+
|
|
4573
|
+
async def for_non_stream():
|
|
4574
|
+
return await self.get_message(
|
|
4575
|
+
await self.ask(
|
|
4576
|
+
prompt,
|
|
4577
|
+
False,
|
|
4578
|
+
optimizer=optimizer,
|
|
4579
|
+
conversationally=conversationally,
|
|
4580
|
+
)
|
|
4581
|
+
)
|
|
4582
|
+
|
|
4583
|
+
return for_stream() if stream else await for_non_stream()
|
|
4584
|
+
|
|
4585
|
+
async def get_message(self, response: dict) -> str:
|
|
4586
|
+
"""Retrieves message only from response
|
|
4587
|
+
|
|
4588
|
+
Args:
|
|
4589
|
+
response (dict): Response generated by `self.ask`
|
|
4590
|
+
|
|
4591
|
+
Returns:
|
|
4592
|
+
str: Message extracted
|
|
4593
|
+
"""
|
|
4594
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
4595
|
+
try:
|
|
4596
|
+
if response["choices"][0].get("delta"):
|
|
4597
|
+
return response["choices"][0]["delta"]["content"]
|
|
4598
|
+
return response["choices"][0]["message"]["content"]
|
|
4599
|
+
except KeyError:
|
|
4600
|
+
return ""
|
|
4374
4601
|
class AsyncYEPCHAT(AsyncProvider):
|
|
4375
4602
|
def __init__(
|
|
4376
4603
|
self,
|
webscout/__init__.py
CHANGED
webscout/async_providers.py
CHANGED
|
@@ -1,33 +1,33 @@
|
|
|
1
|
-
from webscout.AI import AsyncPhindSearch
|
|
2
|
-
from webscout.AI import AsyncYEPCHAT
|
|
3
|
-
from webscout.AI import AsyncOPENGPT
|
|
4
|
-
from webscout.AI import AsyncOPENAI
|
|
5
|
-
from webscout.AI import AsyncLLAMA2
|
|
6
|
-
from webscout.AI import AsyncLEO
|
|
7
|
-
from webscout.AI import AsyncKOBOLDAI
|
|
8
|
-
from webscout.AI import AsyncGROQ
|
|
9
|
-
from webscout.AI import AsyncBLACKBOXAI
|
|
10
|
-
from webscout.AI import AsyncGPT4FREE
|
|
11
|
-
|
|
12
|
-
mapper: dict[str, object] = {
|
|
13
|
-
"phind": AsyncPhindSearch,
|
|
14
|
-
"opengpt": AsyncOPENGPT,
|
|
15
|
-
"koboldai": AsyncKOBOLDAI,
|
|
16
|
-
"blackboxai": AsyncBLACKBOXAI,
|
|
17
|
-
"gpt4free": AsyncGPT4FREE,
|
|
18
|
-
"llama2": AsyncLLAMA2,
|
|
19
|
-
"yepchat": AsyncYEPCHAT,
|
|
20
|
-
"leo": AsyncLEO,
|
|
21
|
-
"groq": AsyncGROQ,
|
|
22
|
-
"openai": AsyncOPENAI,
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
tgpt_mapper: dict[str, object] = {
|
|
26
|
-
"phind": AsyncPhindSearch,
|
|
27
|
-
"opengpt": AsyncOPENGPT,
|
|
28
|
-
"koboldai": AsyncKOBOLDAI,
|
|
29
|
-
# "gpt4free": AsyncGPT4FREE,
|
|
30
|
-
"blackboxai": AsyncBLACKBOXAI,
|
|
31
|
-
"llama2": AsyncLLAMA2,
|
|
32
|
-
"yepchat": AsyncYEPCHAT,
|
|
1
|
+
from webscout.AI import AsyncPhindSearch
|
|
2
|
+
from webscout.AI import AsyncYEPCHAT
|
|
3
|
+
from webscout.AI import AsyncOPENGPT
|
|
4
|
+
from webscout.AI import AsyncOPENAI
|
|
5
|
+
from webscout.AI import AsyncLLAMA2
|
|
6
|
+
from webscout.AI import AsyncLEO
|
|
7
|
+
from webscout.AI import AsyncKOBOLDAI
|
|
8
|
+
from webscout.AI import AsyncGROQ
|
|
9
|
+
from webscout.AI import AsyncBLACKBOXAI
|
|
10
|
+
from webscout.AI import AsyncGPT4FREE
|
|
11
|
+
|
|
12
|
+
mapper: dict[str, object] = {
|
|
13
|
+
"phind": AsyncPhindSearch,
|
|
14
|
+
"opengpt": AsyncOPENGPT,
|
|
15
|
+
"koboldai": AsyncKOBOLDAI,
|
|
16
|
+
"blackboxai": AsyncBLACKBOXAI,
|
|
17
|
+
"gpt4free": AsyncGPT4FREE,
|
|
18
|
+
"llama2": AsyncLLAMA2,
|
|
19
|
+
"yepchat": AsyncYEPCHAT,
|
|
20
|
+
"leo": AsyncLEO,
|
|
21
|
+
"groq": AsyncGROQ,
|
|
22
|
+
"openai": AsyncOPENAI,
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
tgpt_mapper: dict[str, object] = {
|
|
26
|
+
"phind": AsyncPhindSearch,
|
|
27
|
+
"opengpt": AsyncOPENGPT,
|
|
28
|
+
"koboldai": AsyncKOBOLDAI,
|
|
29
|
+
# "gpt4free": AsyncGPT4FREE,
|
|
30
|
+
"blackboxai": AsyncBLACKBOXAI,
|
|
31
|
+
"llama2": AsyncLLAMA2,
|
|
32
|
+
"yepchat": AsyncYEPCHAT,
|
|
33
33
|
}
|
webscout/tempid.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
import aiohttp
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from bs4 import BeautifulSoup
|
|
4
|
+
import tls_client
|
|
5
|
+
import random
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class DomainModel:
|
|
10
|
+
name: str
|
|
11
|
+
type: str
|
|
12
|
+
forward_available: str
|
|
13
|
+
forward_max_seconds: str
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class CreateEmailResponseModel:
|
|
18
|
+
email: str
|
|
19
|
+
token: str
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class MessageResponseModel:
|
|
24
|
+
attachments: list | None
|
|
25
|
+
body_html: str | None
|
|
26
|
+
body_text: str | None
|
|
27
|
+
cc: str | None
|
|
28
|
+
created_at: str
|
|
29
|
+
email_from: str | None
|
|
30
|
+
id: str
|
|
31
|
+
subject: str | None
|
|
32
|
+
email_to: str | None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class Client:
|
|
36
|
+
def __init__(self):
|
|
37
|
+
self._session = aiohttp.ClientSession(
|
|
38
|
+
base_url="https://api.internal.temp-mail.io",
|
|
39
|
+
headers={
|
|
40
|
+
'Host': 'api.internal.temp-mail.io',
|
|
41
|
+
'User-Agent': 'okhttp/4.5.0',
|
|
42
|
+
'Connection': 'close'
|
|
43
|
+
}
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
async def close(self) -> None:
|
|
47
|
+
if not self._session.closed:
|
|
48
|
+
await self._session.close()
|
|
49
|
+
|
|
50
|
+
async def __aenter__(self):
|
|
51
|
+
return self
|
|
52
|
+
|
|
53
|
+
async def __aexit__(self) -> None:
|
|
54
|
+
await self.close()
|
|
55
|
+
return None
|
|
56
|
+
|
|
57
|
+
async def get_domains(self) -> list[DomainModel]:
|
|
58
|
+
async with self._session.get("/api/v3/domains") as response:
|
|
59
|
+
response_json = await response.json()
|
|
60
|
+
return [DomainModel(domain['name'], domain['type'], domain['forward_available'], domain['forward_max_seconds']) for domain in response_json['domains']]
|
|
61
|
+
|
|
62
|
+
async def create_email(self, alias: str | None = None, domain: str | None = None) -> CreateEmailResponseModel:
|
|
63
|
+
async with self._session.post("/api/v3/email/new", data={'name': alias, 'domain': domain}) as response:
|
|
64
|
+
response_json = await response.json()
|
|
65
|
+
return CreateEmailResponseModel(response_json['email'], response_json['token'])
|
|
66
|
+
|
|
67
|
+
async def delete_email(self, email: str, token: str) -> bool:
|
|
68
|
+
async with self._session.delete(f"/api/v3/email/{email}", data={'token': token}) as response:
|
|
69
|
+
if response.status == 200:
|
|
70
|
+
return True
|
|
71
|
+
else:
|
|
72
|
+
return False
|
|
73
|
+
|
|
74
|
+
async def get_messages(self, email: str) -> list[MessageResponseModel] | None:
|
|
75
|
+
async with self._session.get(f"/api/v3/email/{email}/messages") as response:
|
|
76
|
+
response_json = await response.json()
|
|
77
|
+
if len(response_json) == 0:
|
|
78
|
+
return None
|
|
79
|
+
return [MessageResponseModel(message['attachments'], message['body_html'], message['body_text'], message['cc'], message['created_at'], message['from'], message['id'], message['subject'], message['to']) for message in response_json]
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class TemporaryPhoneNumber:
|
|
83
|
+
def __init__(self):
|
|
84
|
+
self.maxpages = {"UK": 59, "US": 3, "France": 73, "Netherlands": 60, "Finland": 47}
|
|
85
|
+
self.minpages = {"UK": 20, "US": 1, "France": 20, "Netherlands": 20, "Finland": 20}
|
|
86
|
+
self.plist = {"UK": "+44", "US": "+1", "France": "+33", "Netherlands": "+31", "Finland": "+358"}
|
|
87
|
+
self.countries = {"44": "UK", "1": "US", "33": "France", "31": "Netherlands", "358": "Finland"}
|
|
88
|
+
|
|
89
|
+
def get_number(self, country="UK"):
|
|
90
|
+
if country == "Random":
|
|
91
|
+
country = random.choice(list(self.countries.values()))
|
|
92
|
+
if country not in self.countries.values():
|
|
93
|
+
raise ValueError("Unsupported Country")
|
|
94
|
+
|
|
95
|
+
session = tls_client.Session(client_identifier="chrome112", random_tls_extension_order=True)
|
|
96
|
+
maxpage = self.maxpages[country]
|
|
97
|
+
minpage = self.minpages[country]
|
|
98
|
+
page = random.randint(minpage, maxpage)
|
|
99
|
+
|
|
100
|
+
if page == 1:
|
|
101
|
+
res = session.get(f"https://temporary-phone-number.com/{country}-Phone-Number")
|
|
102
|
+
else:
|
|
103
|
+
res = session.get(f"https://temporary-phone-number.com/{country}-Phone-Number/page{page}")
|
|
104
|
+
|
|
105
|
+
soup = BeautifulSoup(res.content, "lxml")
|
|
106
|
+
numbers = []
|
|
107
|
+
p = self.plist[country]
|
|
108
|
+
for a in soup.find_all("a"):
|
|
109
|
+
a = a.get("title", "none")
|
|
110
|
+
if f"{country} Phone Number {p}" in a:
|
|
111
|
+
a = a.replace(f"{country} Phone Number ", "").replace(" ", "")
|
|
112
|
+
numbers.append(a)
|
|
113
|
+
return random.choice(numbers)
|
|
114
|
+
|
|
115
|
+
def get_messages(self, number: str):
|
|
116
|
+
number = number.replace("+", "")
|
|
117
|
+
try:
|
|
118
|
+
i = int(number)
|
|
119
|
+
except:
|
|
120
|
+
raise ValueError("Wrong Number")
|
|
121
|
+
|
|
122
|
+
country = None
|
|
123
|
+
for key, value in self.countries.items():
|
|
124
|
+
if number.startswith(key):
|
|
125
|
+
country = value
|
|
126
|
+
|
|
127
|
+
if country == None:
|
|
128
|
+
raise ValueError("Unsupported Country")
|
|
129
|
+
|
|
130
|
+
session = tls_client.Session(client_identifier="chrome112", random_tls_extension_order=True)
|
|
131
|
+
res = session.get(f"https://temporary-phone-number.com/{country}-Phone-Number/{number}")
|
|
132
|
+
|
|
133
|
+
if res.status_code == 404:
|
|
134
|
+
raise ValueError("Number doesn't exist")
|
|
135
|
+
|
|
136
|
+
soup = BeautifulSoup(res.content, "lxml")
|
|
137
|
+
messages = []
|
|
138
|
+
message = {"content": None, "frm": "", "time": ""}
|
|
139
|
+
|
|
140
|
+
for div in soup.find_all("div"):
|
|
141
|
+
divclass = div.get("class", "None")[0]
|
|
142
|
+
if divclass == "direct-chat-info":
|
|
143
|
+
message["frm"] = div.text.split("\n")[1].replace("From ", "")
|
|
144
|
+
message["time"] = div.text.split("\n")[2]
|
|
145
|
+
if divclass == "direct-chat-text":
|
|
146
|
+
message["content"] = div.text
|
|
147
|
+
messages.append(sms_message(content=message["content"], frm=message["frm"], time=message["time"]))
|
|
148
|
+
message = {"content": None, "frm": "", "time": ""}
|
|
149
|
+
|
|
150
|
+
return messages
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
class sms_message:
|
|
154
|
+
def __init__(self, content, frm, time):
|
|
155
|
+
self.content = content
|
|
156
|
+
self.frm = frm
|
|
157
|
+
self.time = time
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "1.
|
|
1
|
+
__version__ = "1.4.1"
|
|
2
2
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.4.1
|
|
4
4
|
Summary: Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos, have TTS support and now has webai(terminal gpt and open interpeter) support
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -47,7 +47,8 @@ Requires-Dist: tiktoken
|
|
|
47
47
|
Requires-Dist: tldextract
|
|
48
48
|
Requires-Dist: orjson
|
|
49
49
|
Requires-Dist: PyYAML
|
|
50
|
-
Requires-Dist:
|
|
50
|
+
Requires-Dist: appdirs
|
|
51
|
+
Requires-Dist: GoogleBard1 >=2.1.4
|
|
51
52
|
Provides-Extra: dev
|
|
52
53
|
Requires-Dist: ruff >=0.1.6 ; extra == 'dev'
|
|
53
54
|
Requires-Dist: pytest >=7.4.2 ; extra == 'dev'
|
|
@@ -58,7 +59,7 @@ Requires-Dist: pytest >=7.4.2 ; extra == 'dev'
|
|
|
58
59
|
<a href="#"><img alt="Python version" src="https://img.shields.io/pypi/pyversions/webscout"/></a>
|
|
59
60
|
<a href="https://pepy.tech/project/webscout"><img alt="Downloads" src="https://static.pepy.tech/badge/webscout"></a>
|
|
60
61
|
|
|
61
|
-
Search for anything using the Google, DuckDuckGo
|
|
62
|
+
Search for anything using the Google, DuckDuckGo, phind.com. Also containes AI models, can transcribe yt videos, temporary email and phone number generation, have TTS support and webai(terminal gpt and open interpeter)
|
|
62
63
|
|
|
63
64
|
|
|
64
65
|
## Table of Contents
|
|
@@ -68,6 +69,9 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
68
69
|
- [CLI version](#cli-version)
|
|
69
70
|
- [CLI to use LLM](#cli-to-use-llm)
|
|
70
71
|
- [Regions](#regions)
|
|
72
|
+
- [Tempmail and Temp number](#tempmail-and-temp-number)
|
|
73
|
+
- [Temp number](#temp-number)
|
|
74
|
+
- [Tempmail](#tempmail)
|
|
71
75
|
- [Transcriber](#transcriber)
|
|
72
76
|
- [DeepWEBS: Advanced Web Searches](#deepwebs-advanced-web-searches)
|
|
73
77
|
- [Activating DeepWEBS](#activating-deepwebs)
|
|
@@ -90,7 +94,7 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
90
94
|
- [usage of webscout.AI](#usage-of-webscoutai)
|
|
91
95
|
- [1. `PhindSearch` - Search using Phind.com](#1-phindsearch---search-using-phindcom)
|
|
92
96
|
- [2. `YepChat` - Chat with mistral 8x7b powered by yepchat](#2-yepchat---chat-with-mistral-8x7b-powered-by-yepchat)
|
|
93
|
-
- [3. `You.com` - search with you.com](#3-youcom---search-with-youcom)
|
|
97
|
+
- [3. `You.com` - search with you.com -NOT WORKING](#3-youcom---search-with-youcom--not-working)
|
|
94
98
|
- [4. `Gemini` - search with google gemini](#4-gemini---search-with-google-gemini)
|
|
95
99
|
- [usage of image generator from Webscout.AI](#usage-of-image-generator-from-webscoutai)
|
|
96
100
|
- [5. `Prodia` - make image using prodia](#5-prodia---make-image-using-prodia)
|
|
@@ -102,6 +106,7 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
102
106
|
- [11. `Cohere` - chat with cohere](#11-cohere---chat-with-cohere)
|
|
103
107
|
- [`LLM`](#llm)
|
|
104
108
|
- [`LLM` with internet](#llm-with-internet)
|
|
109
|
+
- [LLM with deepwebs](#llm-with-deepwebs)
|
|
105
110
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
106
111
|
|
|
107
112
|
## Install
|
|
@@ -211,7 +216,91 @@ ___
|
|
|
211
216
|
|
|
212
217
|
[Go To TOP](#TOP)
|
|
213
218
|
|
|
219
|
+
## Tempmail and Temp number
|
|
214
220
|
|
|
221
|
+
### Temp number
|
|
222
|
+
```python
|
|
223
|
+
from rich.console import Console
|
|
224
|
+
from webscout import tempid
|
|
225
|
+
|
|
226
|
+
def main():
|
|
227
|
+
console = Console()
|
|
228
|
+
phone = tempid.TemporaryPhoneNumber()
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
# Get a temporary phone number for a specific country (or random)
|
|
232
|
+
number = phone.get_number(country="Finland")
|
|
233
|
+
console.print(f"Your temporary phone number: [bold cyan]{number}[/bold cyan]")
|
|
234
|
+
|
|
235
|
+
# Pause execution briefly (replace with your actual logic)
|
|
236
|
+
# import time module
|
|
237
|
+
import time
|
|
238
|
+
time.sleep(30) # Adjust the waiting time as needed
|
|
239
|
+
|
|
240
|
+
# Retrieve and print messages
|
|
241
|
+
messages = phone.get_messages(number)
|
|
242
|
+
if messages:
|
|
243
|
+
# Access individual messages using indexing:
|
|
244
|
+
console.print(f"[bold green]{messages[0].frm}:[/] {messages[0].content}")
|
|
245
|
+
# (Add more lines if you expect multiple messages)
|
|
246
|
+
else:
|
|
247
|
+
console.print("No messages received.")
|
|
248
|
+
|
|
249
|
+
except Exception as e:
|
|
250
|
+
console.print(f"[bold red]An error occurred: {e}")
|
|
251
|
+
|
|
252
|
+
if __name__ == "__main__":
|
|
253
|
+
main()
|
|
254
|
+
|
|
255
|
+
```
|
|
256
|
+
### Tempmail
|
|
257
|
+
```python
|
|
258
|
+
import asyncio
|
|
259
|
+
from rich.console import Console
|
|
260
|
+
from rich.table import Table
|
|
261
|
+
from rich.text import Text
|
|
262
|
+
from webscout import tempid
|
|
263
|
+
|
|
264
|
+
async def main() -> None:
|
|
265
|
+
console = Console()
|
|
266
|
+
client = tempid.Client()
|
|
267
|
+
|
|
268
|
+
try:
|
|
269
|
+
domains = await client.get_domains()
|
|
270
|
+
if not domains:
|
|
271
|
+
console.print("[bold red]No domains available. Please try again later.")
|
|
272
|
+
return
|
|
273
|
+
|
|
274
|
+
email = await client.create_email(domain=domains[0].name)
|
|
275
|
+
console.print(f"Your temporary email: [bold cyan]{email.email}[/bold cyan]")
|
|
276
|
+
console.print(f"Token for accessing the email: [bold cyan]{email.token}[/bold cyan]")
|
|
277
|
+
|
|
278
|
+
while True:
|
|
279
|
+
messages = await client.get_messages(email.email)
|
|
280
|
+
if messages is not None:
|
|
281
|
+
break
|
|
282
|
+
|
|
283
|
+
if messages:
|
|
284
|
+
table = Table(show_header=True, header_style="bold magenta")
|
|
285
|
+
table.add_column("From", style="bold cyan")
|
|
286
|
+
table.add_column("Subject", style="bold yellow")
|
|
287
|
+
table.add_column("Body", style="bold green")
|
|
288
|
+
for message in messages:
|
|
289
|
+
body_preview = Text(message.body_text if message.body_text else "No body")
|
|
290
|
+
table.add_row(message.email_from or "Unknown", message.subject or "No Subject", body_preview)
|
|
291
|
+
console.print(table)
|
|
292
|
+
else:
|
|
293
|
+
console.print("No messages found.")
|
|
294
|
+
|
|
295
|
+
except Exception as e:
|
|
296
|
+
console.print(f"[bold red]An error occurred: {e}")
|
|
297
|
+
|
|
298
|
+
finally:
|
|
299
|
+
await client.close()
|
|
300
|
+
|
|
301
|
+
if __name__ == '__main__':
|
|
302
|
+
asyncio.run(main())
|
|
303
|
+
```
|
|
215
304
|
## Transcriber
|
|
216
305
|
The transcriber function in webscout is a handy tool that transcribes YouTube videos. Here's an example code demonstrating its usage:
|
|
217
306
|
```python
|
|
@@ -483,19 +572,47 @@ with WEBS() as WEBS:
|
|
|
483
572
|
|
|
484
573
|
```python
|
|
485
574
|
from webscout import WEBS
|
|
575
|
+
import datetime
|
|
576
|
+
|
|
577
|
+
def fetch_news(keywords, timelimit):
|
|
578
|
+
news_list = []
|
|
579
|
+
with WEBS() as webs_instance:
|
|
580
|
+
WEBS_news_gen = webs_instance.news(
|
|
581
|
+
keywords,
|
|
582
|
+
region="wt-wt",
|
|
583
|
+
safesearch="off",
|
|
584
|
+
timelimit=timelimit,
|
|
585
|
+
max_results=20
|
|
586
|
+
)
|
|
587
|
+
for r in WEBS_news_gen:
|
|
588
|
+
# Convert the date to a human-readable format using datetime
|
|
589
|
+
r['date'] = datetime.datetime.fromisoformat(r['date']).strftime('%B %d, %Y')
|
|
590
|
+
news_list.append(r)
|
|
591
|
+
return news_list
|
|
592
|
+
|
|
593
|
+
def _format_headlines(news_list, max_headlines: int = 100):
|
|
594
|
+
headlines = []
|
|
595
|
+
for idx, news_item in enumerate(news_list):
|
|
596
|
+
if idx >= max_headlines:
|
|
597
|
+
break
|
|
598
|
+
new_headline = f"{idx + 1}. {news_item['title'].strip()} "
|
|
599
|
+
new_headline += f"(URL: {news_item['url'].strip()}) "
|
|
600
|
+
new_headline += f"{news_item['body'].strip()}"
|
|
601
|
+
new_headline += "\n"
|
|
602
|
+
headlines.append(new_headline)
|
|
603
|
+
|
|
604
|
+
headlines = "\n".join(headlines)
|
|
605
|
+
return headlines
|
|
606
|
+
|
|
607
|
+
# Example usage
|
|
608
|
+
keywords = 'latest AI news'
|
|
609
|
+
timelimit = 'd'
|
|
610
|
+
news_list = fetch_news(keywords, timelimit)
|
|
611
|
+
|
|
612
|
+
# Format and print the headlines
|
|
613
|
+
formatted_headlines = _format_headlines(news_list)
|
|
614
|
+
print(formatted_headlines)
|
|
486
615
|
|
|
487
|
-
# News search for the keyword 'holiday' using DuckDuckGo.com and yep.com
|
|
488
|
-
with WEBS() as WEBS:
|
|
489
|
-
keywords = 'holiday'
|
|
490
|
-
WEBS_news_gen = WEBS.news(
|
|
491
|
-
keywords,
|
|
492
|
-
region="wt-wt",
|
|
493
|
-
safesearch="off",
|
|
494
|
-
timelimit="m",
|
|
495
|
-
max_results=20
|
|
496
|
-
)
|
|
497
|
-
for r in WEBS_news_gen:
|
|
498
|
-
print(r)
|
|
499
616
|
```
|
|
500
617
|
|
|
501
618
|
### 6. `maps()` - map search by DuckDuckGo.com and
|
|
@@ -552,26 +669,22 @@ message = ph.get_message(response)
|
|
|
552
669
|
print(message)
|
|
553
670
|
```
|
|
554
671
|
### 2. `YepChat` - Chat with mistral 8x7b powered by yepchat
|
|
555
|
-
Thanks To Divyansh Shukla for This code
|
|
556
672
|
```python
|
|
557
|
-
from webscout.AI import
|
|
673
|
+
from webscout.AI import YEPCHAT
|
|
558
674
|
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
print(processed_response)
|
|
675
|
+
# Instantiate the YEPCHAT class with default parameters
|
|
676
|
+
YEPCHAT = YEPCHAT()
|
|
677
|
+
|
|
678
|
+
# Define a prompt to send to the AI
|
|
679
|
+
prompt = "What is the capital of France?"
|
|
680
|
+
|
|
681
|
+
# Use the 'cha' method to get a response from the AI
|
|
682
|
+
r = YEPCHAT.chat(prompt)
|
|
683
|
+
print(r)
|
|
569
684
|
|
|
570
|
-
if __name__ == "__main__":
|
|
571
|
-
main()
|
|
572
685
|
```
|
|
573
686
|
|
|
574
|
-
### 3. `You.com` - search with you.com
|
|
687
|
+
### 3. `You.com` - search with you.com -NOT WORKING
|
|
575
688
|
```python
|
|
576
689
|
from webscout.AI import youChat
|
|
577
690
|
|
|
@@ -597,15 +710,34 @@ while True:
|
|
|
597
710
|
### 4. `Gemini` - search with google gemini
|
|
598
711
|
|
|
599
712
|
```python
|
|
600
|
-
|
|
713
|
+
import webscout
|
|
714
|
+
from webscout.AI import GEMINI
|
|
601
715
|
|
|
602
|
-
#
|
|
603
|
-
|
|
716
|
+
# Replace with the path to your bard.google.com.cookies.json file
|
|
717
|
+
COOKIE_FILE = "path/to/bard.google.com.cookies.json"
|
|
604
718
|
|
|
605
|
-
#
|
|
606
|
-
|
|
719
|
+
# Optional: Provide proxy details if needed
|
|
720
|
+
PROXIES = {
|
|
721
|
+
"http": "http://proxy_server:port",
|
|
722
|
+
"https": "https://proxy_server:port",
|
|
723
|
+
}
|
|
607
724
|
|
|
608
|
-
#
|
|
725
|
+
# Initialize GEMINI with cookie file and optional proxies
|
|
726
|
+
gemini = GEMINI(cookie_file=COOKIE_FILE, proxy=PROXIES)
|
|
727
|
+
|
|
728
|
+
# Ask a question and print the response
|
|
729
|
+
response = gemini.chat("What is the meaning of life?")
|
|
730
|
+
print(response)
|
|
731
|
+
|
|
732
|
+
# Ask another question, this time streaming the response
|
|
733
|
+
for chunk in gemini.chat("Tell me a story", stream=True):
|
|
734
|
+
print(chunk, end="")
|
|
735
|
+
|
|
736
|
+
# Reset the conversation to start a new interaction
|
|
737
|
+
gemini.reset()
|
|
738
|
+
|
|
739
|
+
# Ask a question with the code optimizer
|
|
740
|
+
response = gemini.chat("Write Python code to print 'Hello, world!'", optimizer="code")
|
|
609
741
|
print(response)
|
|
610
742
|
```
|
|
611
743
|
## usage of image generator from Webscout.AI
|
|
@@ -637,17 +769,18 @@ ai = BLACKBOXAI(
|
|
|
637
769
|
model=None # You can specify a model if needed
|
|
638
770
|
)
|
|
639
771
|
|
|
640
|
-
#
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
#
|
|
650
|
-
|
|
772
|
+
# Start an infinite loop for continuous interaction
|
|
773
|
+
while True:
|
|
774
|
+
# Define a prompt to send to the AI
|
|
775
|
+
prompt = input("Enter your prompt: ")
|
|
776
|
+
|
|
777
|
+
# Check if the user wants to exit the loop
|
|
778
|
+
if prompt.lower() == "exit":
|
|
779
|
+
break
|
|
780
|
+
|
|
781
|
+
# Use the 'chat' method to send the prompt and receive a response
|
|
782
|
+
r = ai.chat(prompt)
|
|
783
|
+
print(r)
|
|
651
784
|
```
|
|
652
785
|
### 7. `PERPLEXITY` - Search With PERPLEXITY
|
|
653
786
|
```python
|
|
@@ -665,10 +798,12 @@ print(response)
|
|
|
665
798
|
from webscout.AI import OPENGPT
|
|
666
799
|
|
|
667
800
|
opengpt = OPENGPT(is_conversation=True, max_tokens=8000, timeout=30)
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
print
|
|
801
|
+
while True:
|
|
802
|
+
# Prompt the user for input
|
|
803
|
+
prompt = input("Enter your prompt: ")
|
|
804
|
+
# Send the prompt to the OPENGPT model and print the response
|
|
805
|
+
response_str = opengpt.chat(prompt)
|
|
806
|
+
print(response_str)
|
|
672
807
|
```
|
|
673
808
|
### 9. `KOBOLDIA` -
|
|
674
809
|
```python
|
|
@@ -711,7 +846,7 @@ response_str = a.chat(prompt)
|
|
|
711
846
|
print(response_str)
|
|
712
847
|
```
|
|
713
848
|
|
|
714
|
-
### `LLM`
|
|
849
|
+
### `LLM`
|
|
715
850
|
```python
|
|
716
851
|
from webscout.LLM import LLM
|
|
717
852
|
|
|
@@ -742,23 +877,19 @@ while True:
|
|
|
742
877
|
from __future__ import annotations
|
|
743
878
|
from typing import List, Optional
|
|
744
879
|
|
|
745
|
-
from webscout import LLM
|
|
880
|
+
from webscout.LLM import LLM
|
|
746
881
|
from webscout import WEBS
|
|
747
882
|
import warnings
|
|
748
883
|
|
|
749
884
|
system_message: str = (
|
|
750
|
-
"As AI,
|
|
751
|
-
"
|
|
752
|
-
"
|
|
753
|
-
"seamless interactions with users. Embrace a responsive demeanor, harnessing available online resources to address "
|
|
754
|
-
"queries, share pertinent content, and facilitate meaningful exchanges. By doing so, you create value through "
|
|
755
|
-
"connection and engagement, ultimately enhancing overall user satisfaction and experience. Additionally, "
|
|
756
|
-
"continue upholding the principles of respect, impartiality, and intellectual integrity throughout all interactions."
|
|
885
|
+
"As an AI assistant, I have been designed with advanced capabilities, including real-time access to online resources. This enables me to enrich our conversations and provide you with informed and accurate responses, drawing from a vast array of information. With each interaction, my goal is to create a seamless and meaningful connection, offering insights and sharing relevant content."
|
|
886
|
+
"My directives emphasize the importance of respect, impartiality, and intellectual integrity. I am here to provide unbiased responses, ensuring an ethical and respectful exchange. I will respect your privacy and refrain from sharing any personal information that may be obtained during our conversations or through web searches, only utilizing web search functionality when necessary to provide the most accurate and up-to-date information."
|
|
887
|
+
"Together, let's explore a diverse range of topics, creating an enjoyable and informative experience, all while maintaining the highest standards of privacy and respect"
|
|
757
888
|
)
|
|
758
889
|
|
|
759
890
|
# Ignore the specific UserWarning
|
|
760
891
|
warnings.filterwarnings("ignore", category=UserWarning, module="curl_cffi.aio", lineno=205)
|
|
761
|
-
LLM = LLM(model="
|
|
892
|
+
LLM = LLM(model="mistralai/Mixtral-8x22B-Instruct-v0.1", system_message=system_message)
|
|
762
893
|
|
|
763
894
|
|
|
764
895
|
def chat(
|
|
@@ -814,6 +945,94 @@ if __name__ == "__main__":
|
|
|
814
945
|
else:
|
|
815
946
|
print("No response")
|
|
816
947
|
```
|
|
948
|
+
### LLM with deepwebs
|
|
949
|
+
```python
|
|
950
|
+
from __future__ import annotations
|
|
951
|
+
from typing import List, Optional
|
|
952
|
+
from webscout.LLM import LLM
|
|
953
|
+
from webscout import DeepWEBS
|
|
954
|
+
import warnings
|
|
955
|
+
|
|
956
|
+
system_message: str = (
|
|
957
|
+
"As an AI assistant, I have been designed with advanced capabilities, including real-time access to online resources. This enables me to enrich our conversations and provide you with informed and accurate responses, drawing from a vast array of information. With each interaction, my goal is to create a seamless and meaningful connection, offering insights and sharing relevant content."
|
|
958
|
+
"My directives emphasize the importance of respect, impartiality, and intellectual integrity. I am here to provide unbiased responses, ensuring an ethical and respectful exchange. I will respect your privacy and refrain from sharing any personal information that may be obtained during our conversations or through web searches, only utilizing web search functionality when necessary to provide the most accurate and up-to-date information."
|
|
959
|
+
"Together, let's explore a diverse range of topics, creating an enjoyable and informative experience, all while maintaining the highest standards of privacy and respect"
|
|
960
|
+
)
|
|
961
|
+
|
|
962
|
+
# Ignore the specific UserWarning
|
|
963
|
+
warnings.filterwarnings("ignore", category=UserWarning, module="curl_cffi.aio", lineno=205)
|
|
964
|
+
|
|
965
|
+
LLM = LLM(model="mistralai/Mixtral-8x22B-Instruct-v0.1", system_message=system_message)
|
|
966
|
+
|
|
967
|
+
def perform_web_search(query):
|
|
968
|
+
# Initialize the DeepWEBS class
|
|
969
|
+
D = DeepWEBS()
|
|
970
|
+
|
|
971
|
+
# Set up the search parameters
|
|
972
|
+
search_params = D.DeepSearch(
|
|
973
|
+
queries=[query], # Query to search
|
|
974
|
+
result_num=10, # Number of search results
|
|
975
|
+
safe=True, # Enable SafeSearch
|
|
976
|
+
types=["web"], # Search type: web
|
|
977
|
+
extract_webpage=True, # True for extracting webpages
|
|
978
|
+
overwrite_query_html=True,
|
|
979
|
+
overwrite_webpage_html=True,
|
|
980
|
+
)
|
|
981
|
+
|
|
982
|
+
# Execute the search and retrieve results
|
|
983
|
+
results = D.queries_to_search_results(search_params)
|
|
984
|
+
return results
|
|
985
|
+
|
|
986
|
+
def chat(user_input: str, result_num: int = 10) -> Optional[str]:
|
|
987
|
+
"""
|
|
988
|
+
Chat function to perform a web search based on the user input and generate a response using the LLM model.
|
|
989
|
+
|
|
990
|
+
Parameters
|
|
991
|
+
----------
|
|
992
|
+
user_input : str
|
|
993
|
+
The user input to be used for the web search
|
|
994
|
+
max_results : int, optional
|
|
995
|
+
The maximum number of search results to include in the response, by default 10
|
|
996
|
+
|
|
997
|
+
Returns
|
|
998
|
+
-------
|
|
999
|
+
Optional[str]
|
|
1000
|
+
The response generated by the LLM model, or None if there is no response
|
|
1001
|
+
"""
|
|
1002
|
+
# Perform a web search based on the user input
|
|
1003
|
+
search_results = perform_web_search(user_input)
|
|
1004
|
+
|
|
1005
|
+
# Extract URLs from search results
|
|
1006
|
+
url_results = []
|
|
1007
|
+
for result in search_results[0]['query_results']:
|
|
1008
|
+
url_results.append(f"{result['title']} ({result['site']}): {result['url']}")
|
|
1009
|
+
|
|
1010
|
+
# Format search results
|
|
1011
|
+
formatted_results = "\n".join(url_results)
|
|
1012
|
+
|
|
1013
|
+
# Define the messages to be sent, including the user input, search results, and system message
|
|
1014
|
+
messages = [
|
|
1015
|
+
{"role": "user", "content": f"User question is:\n{user_input}\nwebsearch results are:\n{formatted_results}"},
|
|
1016
|
+
]
|
|
1017
|
+
|
|
1018
|
+
# Use the chat method to get the response
|
|
1019
|
+
response = LLM.chat(messages)
|
|
1020
|
+
return response
|
|
1021
|
+
|
|
1022
|
+
if __name__ == "__main__":
|
|
1023
|
+
while True:
|
|
1024
|
+
# Get the user input
|
|
1025
|
+
user_input = input("User: ")
|
|
1026
|
+
|
|
1027
|
+
# Perform a web search based on the user input
|
|
1028
|
+
response = chat(user_input)
|
|
1029
|
+
|
|
1030
|
+
# Print the response
|
|
1031
|
+
if response:
|
|
1032
|
+
print("AI:", response)
|
|
1033
|
+
else:
|
|
1034
|
+
print("No response")
|
|
1035
|
+
```
|
|
817
1036
|
## `Webai` - terminal gpt and a open interpeter
|
|
818
1037
|
|
|
819
1038
|
```python
|
|
@@ -10,28 +10,29 @@ DeepWEBS/networks/webpage_fetcher.py,sha256=vRB9T3o-nMgrMkG2NPHTDctNeXaPSKCmBXqu
|
|
|
10
10
|
DeepWEBS/utilsdw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
11
|
DeepWEBS/utilsdw/enver.py,sha256=vpI7s4_o_VL9govSryOv-z1zYK3pTEW3-H9QNN8JYtc,2472
|
|
12
12
|
DeepWEBS/utilsdw/logger.py,sha256=Z0nFUcEGyU8r28yKiIyvEtO26xxpmJgbvNToTfwZecc,8174
|
|
13
|
-
webscout/AI.py,sha256=
|
|
13
|
+
webscout/AI.py,sha256=9wtuXzOEWwZea_U4AUGj76WrBKHa_g8oEoiQqw-7W50,211764
|
|
14
14
|
webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
|
|
15
15
|
webscout/AIutel.py,sha256=nGzO4T6b7YuxOQigtjNsUBESmDKlk3_CvbIfDdd2KKo,33135
|
|
16
16
|
webscout/DWEBS.py,sha256=QT-7-dUgWhQ_H7EVZD53AVyXxyskoPMKCkFIpzkN56Q,7332
|
|
17
17
|
webscout/LLM.py,sha256=CiDz0okZNEoXuxMwadZnwRGSLpqk2zg0vzvXSxQZjcE,1910
|
|
18
|
-
webscout/__init__.py,sha256=
|
|
18
|
+
webscout/__init__.py,sha256=BKWAoz_1lX-ZoDOnnKWMGxUULC491gfLEpBcj6eHHkA,1067
|
|
19
19
|
webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
|
|
20
|
-
webscout/async_providers.py,sha256=
|
|
20
|
+
webscout/async_providers.py,sha256=pPoSdfB_4SlOYcpAtkKIyDtl7sZ9DGgWy5aIBOjBO9Q,971
|
|
21
21
|
webscout/cli.py,sha256=F888fdrFUQgczMBN4yMOSf6Nh-IbvkqpPhDsbnA2FtQ,17059
|
|
22
22
|
webscout/exceptions.py,sha256=e4hJnOEAiYuA6BTsMgv4R-vOq0Tt3f9ba0ROTNtPDl4,378
|
|
23
23
|
webscout/g4f.py,sha256=Npxf7YI0eFMxizD9VOI5cE0h4YTbHqgW2WzxVtv2jno,24451
|
|
24
24
|
webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
|
|
25
|
+
webscout/tempid.py,sha256=5oc3UbXhPGKxrMRTfRABT-V-dNzH_hOKWtLYM6iCWd4,5896
|
|
25
26
|
webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
|
|
26
27
|
webscout/utils.py,sha256=c_98M4oqpb54pUun3fpGGlCerFD6ZHUbghyp5b7Mwgo,2605
|
|
27
|
-
webscout/version.py,sha256=
|
|
28
|
+
webscout/version.py,sha256=wHltjxU1-zVn-DIx35MXt6hNYDhTgTUvsDnuaZQp1-w,25
|
|
28
29
|
webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
|
|
29
30
|
webscout/webai.py,sha256=FQQlTmTsl3V__7V9_jyG-CaggSaDgBr_8XeJOaMXITE,81661
|
|
30
31
|
webscout/webscout_search.py,sha256=3_lli-hDb8_kCGwscK29xuUcOS833ROgpNhDzrxh0dk,3085
|
|
31
32
|
webscout/webscout_search_async.py,sha256=Y5frH0k3hLqBCR-8dn7a_b7EvxdYxn6wHiKl3jWosE0,40670
|
|
32
|
-
webscout-1.
|
|
33
|
-
webscout-1.
|
|
34
|
-
webscout-1.
|
|
35
|
-
webscout-1.
|
|
36
|
-
webscout-1.
|
|
37
|
-
webscout-1.
|
|
33
|
+
webscout-1.4.1.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
|
|
34
|
+
webscout-1.4.1.dist-info/METADATA,sha256=VTQw_MG0z_psk5KpC-hKsYwyap1NXSBql90wvbliQjw,40478
|
|
35
|
+
webscout-1.4.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
36
|
+
webscout-1.4.1.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
|
|
37
|
+
webscout-1.4.1.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
|
|
38
|
+
webscout-1.4.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|