webscout 4.2__py3-none-any.whl → 4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +5 -7
- webscout/AIutel.py +6 -8
- webscout/Extra/autollama.py +20 -9
- webscout/Extra/gguf.py +5 -19
- webscout/Extra/weather.py +27 -9
- webscout/Extra/weather_ascii.py +5 -0
- webscout/GoogleS.py +342 -0
- webscout/Local/_version.py +1 -1
- webscout/Provider/FreeGemini.py +169 -0
- webscout/Provider/Llama.py +211 -0
- webscout/Provider/__init__.py +5 -5
- webscout/__init__.py +7 -4
- webscout/async_providers.py +0 -2
- webscout/cli.py +22 -21
- webscout/utils.py +13 -3
- webscout/version.py +1 -1
- webscout/webai.py +3 -0
- webscout/webscout_search.py +34 -26
- {webscout-4.2.dist-info → webscout-4.4.dist-info}/METADATA +35 -1
- {webscout-4.2.dist-info → webscout-4.4.dist-info}/RECORD +24 -22
- webscout/Provider/Llama2.py +0 -437
- {webscout-4.2.dist-info → webscout-4.4.dist-info}/LICENSE.md +0 -0
- {webscout-4.2.dist-info → webscout-4.4.dist-info}/WHEEL +0 -0
- {webscout-4.2.dist-info → webscout-4.4.dist-info}/entry_points.txt +0 -0
- {webscout-4.2.dist-info → webscout-4.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
from selenium import webdriver
|
|
4
|
+
from selenium.webdriver.chrome.options import Options
|
|
5
|
+
from selenium.webdriver.common.by import By
|
|
6
|
+
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
+
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
+
import click
|
|
9
|
+
import requests
|
|
10
|
+
from requests import get
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
from re import findall
|
|
13
|
+
from requests.exceptions import RequestException
|
|
14
|
+
from curl_cffi.requests import get, RequestsError
|
|
15
|
+
import g4f
|
|
16
|
+
from random import randint
|
|
17
|
+
from PIL import Image
|
|
18
|
+
import io
|
|
19
|
+
import re
|
|
20
|
+
import json
|
|
21
|
+
import yaml
|
|
22
|
+
from ..AIutel import Optimizers
|
|
23
|
+
from ..AIutel import Conversation
|
|
24
|
+
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from ..AIbase import Provider, AsyncProvider
|
|
26
|
+
from webscout import exceptions
|
|
27
|
+
from typing import Any, AsyncGenerator, Dict
|
|
28
|
+
import logging
|
|
29
|
+
import httpx
|
|
30
|
+
|
|
31
|
+
class FreeGemini(Provider):
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
is_conversation: bool = True,
|
|
35
|
+
max_tokens: int = 600,
|
|
36
|
+
timeout: int = 60,
|
|
37
|
+
intro: str = None,
|
|
38
|
+
filepath: str = None,
|
|
39
|
+
update_file: bool = True,
|
|
40
|
+
proxies: dict = {},
|
|
41
|
+
history_offset: int = 10250,
|
|
42
|
+
act: str = None,
|
|
43
|
+
):
|
|
44
|
+
"""Instantiates FreeGemini
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
48
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
49
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
50
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
51
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
52
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
53
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
54
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
55
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
56
|
+
"""
|
|
57
|
+
self.session = requests.Session()
|
|
58
|
+
self.is_conversation = is_conversation
|
|
59
|
+
self.max_tokens_to_sample = max_tokens
|
|
60
|
+
self.chat_endpoint = "https://api.safone.dev/bard"
|
|
61
|
+
self.timeout = timeout
|
|
62
|
+
self.last_response = {}
|
|
63
|
+
|
|
64
|
+
self.headers = {
|
|
65
|
+
"accept": "application/json",
|
|
66
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
self.__available_optimizers = (
|
|
70
|
+
method
|
|
71
|
+
for method in dir(Optimizers)
|
|
72
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
73
|
+
)
|
|
74
|
+
self.session.headers.update(self.headers)
|
|
75
|
+
Conversation.intro = (
|
|
76
|
+
AwesomePrompts().get_act(
|
|
77
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
78
|
+
)
|
|
79
|
+
if act
|
|
80
|
+
else intro or Conversation.intro
|
|
81
|
+
)
|
|
82
|
+
self.conversation = Conversation(
|
|
83
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
84
|
+
)
|
|
85
|
+
self.conversation.history_offset = history_offset
|
|
86
|
+
self.session.proxies = proxies
|
|
87
|
+
|
|
88
|
+
def ask(
|
|
89
|
+
self,
|
|
90
|
+
prompt: str,
|
|
91
|
+
stream: bool = False,
|
|
92
|
+
raw: bool = False,
|
|
93
|
+
optimizer: str = None,
|
|
94
|
+
conversationally: bool = False,
|
|
95
|
+
) -> dict:
|
|
96
|
+
"""Chat with AI
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
prompt (str): Prompt to be send.
|
|
100
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
101
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
102
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
103
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
104
|
+
"""
|
|
105
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
106
|
+
if optimizer:
|
|
107
|
+
if optimizer in self.__available_optimizers:
|
|
108
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
109
|
+
conversation_prompt if conversationally else prompt
|
|
110
|
+
)
|
|
111
|
+
else:
|
|
112
|
+
raise Exception(
|
|
113
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
self.session.headers.update(self.headers)
|
|
117
|
+
payload = {"message": conversation_prompt}
|
|
118
|
+
|
|
119
|
+
response = self.session.post(
|
|
120
|
+
self.chat_endpoint, json=payload, timeout=self.timeout
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
if not response.ok:
|
|
124
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
125
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
resp = response.json()
|
|
129
|
+
message_load = self.get_message(resp)
|
|
130
|
+
self.conversation.update_chat_history(
|
|
131
|
+
prompt, message_load
|
|
132
|
+
)
|
|
133
|
+
return resp
|
|
134
|
+
|
|
135
|
+
def chat(
|
|
136
|
+
self,
|
|
137
|
+
prompt: str,
|
|
138
|
+
stream: bool = False,
|
|
139
|
+
optimizer: str = None,
|
|
140
|
+
conversationally: bool = False,
|
|
141
|
+
) -> str:
|
|
142
|
+
"""Generate response `str`
|
|
143
|
+
Args:
|
|
144
|
+
prompt (str): Prompt to be send.
|
|
145
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
146
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
147
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
148
|
+
Returns:
|
|
149
|
+
str: Response generated
|
|
150
|
+
"""
|
|
151
|
+
return self.get_message(
|
|
152
|
+
self.ask(
|
|
153
|
+
prompt,
|
|
154
|
+
optimizer=optimizer,
|
|
155
|
+
conversationally=conversationally,
|
|
156
|
+
)
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
def get_message(self, response: dict) -> str:
|
|
160
|
+
"""Retrieves message only from response
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
response (dict): Response generated by `self.ask`
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
str: Message extracted
|
|
167
|
+
"""
|
|
168
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
169
|
+
return response["message"]
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
from selenium import webdriver
|
|
4
|
+
from selenium.webdriver.chrome.options import Options
|
|
5
|
+
from selenium.webdriver.common.by import By
|
|
6
|
+
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
+
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
+
import click
|
|
9
|
+
import requests
|
|
10
|
+
from requests import get
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
from re import findall
|
|
13
|
+
from requests.exceptions import RequestException
|
|
14
|
+
from curl_cffi.requests import get, RequestsError
|
|
15
|
+
import g4f
|
|
16
|
+
from random import randint
|
|
17
|
+
from PIL import Image
|
|
18
|
+
import io
|
|
19
|
+
import re
|
|
20
|
+
import json
|
|
21
|
+
import yaml
|
|
22
|
+
from ..AIutel import Optimizers
|
|
23
|
+
from ..AIutel import Conversation
|
|
24
|
+
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from ..AIbase import Provider, AsyncProvider
|
|
26
|
+
from webscout import exceptions
|
|
27
|
+
from typing import Any, AsyncGenerator, Dict
|
|
28
|
+
import logging
|
|
29
|
+
import httpx
|
|
30
|
+
|
|
31
|
+
class LLAMA(Provider):
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
is_conversation: bool = True,
|
|
35
|
+
max_tokens: int = 600,
|
|
36
|
+
timeout: int = 30,
|
|
37
|
+
intro: str = None,
|
|
38
|
+
filepath: str = None,
|
|
39
|
+
update_file: bool = True,
|
|
40
|
+
proxies: dict = {},
|
|
41
|
+
history_offset: int = 10250,
|
|
42
|
+
act: str = None,
|
|
43
|
+
):
|
|
44
|
+
"""Instantiates LLAMA
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
48
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
49
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
50
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
51
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
52
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
53
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
54
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
55
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
56
|
+
model (str, optional): LLM model name. Defaults to "llama3-70b-8192".
|
|
57
|
+
"""
|
|
58
|
+
self.is_conversation = is_conversation
|
|
59
|
+
self.max_tokens_to_sample = max_tokens
|
|
60
|
+
self.timeout = timeout
|
|
61
|
+
self.last_response = {}
|
|
62
|
+
self.model = "llama3-70b-8192",
|
|
63
|
+
self.api_endpoint = "https://api.safone.dev/llama"
|
|
64
|
+
self.headers = {
|
|
65
|
+
"accept": "application/json",
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
self.__available_optimizers = (
|
|
69
|
+
method
|
|
70
|
+
for method in dir(Optimizers)
|
|
71
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
72
|
+
)
|
|
73
|
+
Conversation.intro = (
|
|
74
|
+
AwesomePrompts().get_act(
|
|
75
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
76
|
+
)
|
|
77
|
+
if act
|
|
78
|
+
else intro or Conversation.intro
|
|
79
|
+
)
|
|
80
|
+
self.conversation = Conversation(
|
|
81
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
82
|
+
)
|
|
83
|
+
self.conversation.history_offset = history_offset
|
|
84
|
+
self.session = requests.Session()
|
|
85
|
+
self.session.proxies = proxies
|
|
86
|
+
|
|
87
|
+
def ask(
|
|
88
|
+
self,
|
|
89
|
+
prompt: str,
|
|
90
|
+
stream: bool = False,
|
|
91
|
+
raw: bool = False,
|
|
92
|
+
optimizer: str = None,
|
|
93
|
+
conversationally: bool = False,
|
|
94
|
+
) -> dict | AsyncGenerator:
|
|
95
|
+
"""Chat with AI
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
prompt (str): Prompt to be send.
|
|
99
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
100
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
101
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
102
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
103
|
+
Returns:
|
|
104
|
+
dict|AsyncGenerator : ai content
|
|
105
|
+
```json
|
|
106
|
+
{
|
|
107
|
+
"text" : "print('How may I help you today?')"
|
|
108
|
+
}
|
|
109
|
+
```
|
|
110
|
+
"""
|
|
111
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
112
|
+
if optimizer:
|
|
113
|
+
if optimizer in self.__available_optimizers:
|
|
114
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
115
|
+
conversation_prompt if conversationally else prompt
|
|
116
|
+
)
|
|
117
|
+
else:
|
|
118
|
+
raise Exception(
|
|
119
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
self.session.headers.update(self.headers)
|
|
123
|
+
payload = {
|
|
124
|
+
"message": conversation_prompt
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
def for_stream():
|
|
128
|
+
response = self.session.get(
|
|
129
|
+
self.api_endpoint, params=payload, stream=True, timeout=self.timeout
|
|
130
|
+
)
|
|
131
|
+
if not response.ok:
|
|
132
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
133
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
message_load = ""
|
|
137
|
+
for chunk in response.iter_lines():
|
|
138
|
+
try:
|
|
139
|
+
resp = json.loads(chunk)
|
|
140
|
+
message_load += resp['message']
|
|
141
|
+
yield chunk if raw else dict(text=message_load)
|
|
142
|
+
self.last_response.update(resp)
|
|
143
|
+
except:
|
|
144
|
+
pass
|
|
145
|
+
self.conversation.update_chat_history(
|
|
146
|
+
prompt, self.get_message(self.last_response)
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
def for_non_stream():
|
|
150
|
+
response = self.session.get(
|
|
151
|
+
self.api_endpoint, params=payload, stream=False, timeout=self.timeout
|
|
152
|
+
)
|
|
153
|
+
if not response.ok:
|
|
154
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
155
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
156
|
+
)
|
|
157
|
+
resp = response.json()
|
|
158
|
+
self.last_response.update(resp)
|
|
159
|
+
self.conversation.update_chat_history(
|
|
160
|
+
prompt, self.get_message(self.last_response)
|
|
161
|
+
)
|
|
162
|
+
return resp
|
|
163
|
+
|
|
164
|
+
return for_stream() if stream else for_non_stream()
|
|
165
|
+
|
|
166
|
+
def chat(
|
|
167
|
+
self,
|
|
168
|
+
prompt: str,
|
|
169
|
+
stream: bool = False,
|
|
170
|
+
optimizer: str = None,
|
|
171
|
+
conversationally: bool = False,
|
|
172
|
+
) -> str | AsyncGenerator:
|
|
173
|
+
"""Generate response `str`
|
|
174
|
+
Args:
|
|
175
|
+
prompt (str): Prompt to be send.
|
|
176
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
177
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
178
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
179
|
+
Returns:
|
|
180
|
+
str: Response generated
|
|
181
|
+
"""
|
|
182
|
+
|
|
183
|
+
def for_stream():
|
|
184
|
+
for response in self.ask(
|
|
185
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
186
|
+
):
|
|
187
|
+
yield self.get_message(response)
|
|
188
|
+
|
|
189
|
+
def for_non_stream():
|
|
190
|
+
return self.get_message(
|
|
191
|
+
self.ask(
|
|
192
|
+
prompt,
|
|
193
|
+
False,
|
|
194
|
+
optimizer=optimizer,
|
|
195
|
+
conversationally=conversationally,
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
return for_stream() if stream else for_non_stream()
|
|
200
|
+
|
|
201
|
+
def get_message(self, response: dict) -> str:
|
|
202
|
+
"""Retrieves message only from response
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
response (dict): Response generated by `self.ask`
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
str: Message extracted
|
|
209
|
+
"""
|
|
210
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
211
|
+
return response["message"]
|
webscout/Provider/__init__.py
CHANGED
|
@@ -2,8 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from .ThinkAnyAI import ThinkAnyAI
|
|
4
4
|
from .Xjai import Xjai
|
|
5
|
-
from .
|
|
6
|
-
from .Llama2 import AsyncLLAMA2
|
|
5
|
+
from .Llama import LLAMA
|
|
7
6
|
from .Cohere import Cohere
|
|
8
7
|
from .Reka import REKA
|
|
9
8
|
from .Groq import GROQ
|
|
@@ -38,11 +37,11 @@ from .VTLchat import VTLchat
|
|
|
38
37
|
from .Geminipro import GEMINIPRO
|
|
39
38
|
from .Geminiflash import GEMINIFLASH
|
|
40
39
|
from .OLLAMA import OLLAMA
|
|
40
|
+
from .FreeGemini import FreeGemini
|
|
41
41
|
__all__ = [
|
|
42
42
|
'ThinkAnyAI',
|
|
43
43
|
'Xjai',
|
|
44
|
-
'
|
|
45
|
-
'AsyncLLAMA2',
|
|
44
|
+
'LLAMA',
|
|
46
45
|
'Cohere',
|
|
47
46
|
'REKA',
|
|
48
47
|
'GROQ',
|
|
@@ -78,7 +77,8 @@ __all__ = [
|
|
|
78
77
|
'OPENGPTv2',
|
|
79
78
|
'GEMINIPRO',
|
|
80
79
|
'GEMINIFLASH',
|
|
81
|
-
'OLLAMA'
|
|
80
|
+
'OLLAMA',
|
|
81
|
+
'FreeGemini'
|
|
82
82
|
|
|
83
83
|
|
|
84
84
|
]
|
webscout/__init__.py
CHANGED
|
@@ -5,16 +5,16 @@ from .DWEBS import *
|
|
|
5
5
|
from .transcriber import transcriber
|
|
6
6
|
from .voice import play_audio
|
|
7
7
|
from .websx_search import WEBSX
|
|
8
|
-
|
|
9
|
-
from .LLM import LLM
|
|
8
|
+
from .LLM import VLM, LLM
|
|
10
9
|
from .YTdownloader import *
|
|
11
|
-
|
|
10
|
+
from .GoogleS import *
|
|
12
11
|
import g4f
|
|
13
12
|
from .YTdownloader import *
|
|
14
13
|
from .Provider import *
|
|
15
14
|
from .Extra import gguf
|
|
16
15
|
from .Extra import autollama
|
|
17
16
|
from .Extra import weather_ascii, weather
|
|
17
|
+
|
|
18
18
|
__repo__ = "https://github.com/OE-LUCIFER/Webscout"
|
|
19
19
|
|
|
20
20
|
webai = [
|
|
@@ -58,12 +58,15 @@ __all__ = [
|
|
|
58
58
|
"WEBS",
|
|
59
59
|
"AsyncWEBS",
|
|
60
60
|
"__version__",
|
|
61
|
-
"
|
|
61
|
+
"DWEBS",
|
|
62
62
|
"transcriber",
|
|
63
63
|
"play_audio",
|
|
64
64
|
"TempMailClient",
|
|
65
65
|
"TemporaryPhoneNumber",
|
|
66
66
|
"LLM",
|
|
67
|
+
"YTdownloader",
|
|
68
|
+
"WEBSX",
|
|
69
|
+
"VLM",
|
|
67
70
|
# Localai models and utilities
|
|
68
71
|
# "Model",
|
|
69
72
|
# "Thread",
|
webscout/async_providers.py
CHANGED
|
@@ -2,7 +2,6 @@ from webscout import AsyncPhindSearch
|
|
|
2
2
|
from webscout import AsyncYEPCHAT
|
|
3
3
|
from webscout import AsyncOPENGPT
|
|
4
4
|
from webscout import AsyncOPENAI
|
|
5
|
-
from webscout import AsyncLLAMA2
|
|
6
5
|
from webscout import AsyncLEO
|
|
7
6
|
from webscout import AsyncKOBOLDAI
|
|
8
7
|
from webscout import AsyncGROQ
|
|
@@ -15,7 +14,6 @@ mapper: dict[str, object] = {
|
|
|
15
14
|
"koboldai": AsyncKOBOLDAI,
|
|
16
15
|
"blackboxai": AsyncBLACKBOXAI,
|
|
17
16
|
"gpt4free": AsyncGPT4FREE,
|
|
18
|
-
"llama2": AsyncLLAMA2,
|
|
19
17
|
"yepchat": AsyncYEPCHAT,
|
|
20
18
|
"leo": AsyncLEO,
|
|
21
19
|
"groq": AsyncGROQ,
|
webscout/cli.py
CHANGED
|
@@ -20,8 +20,10 @@ from rich.table import Table
|
|
|
20
20
|
from rich.style import Style
|
|
21
21
|
from rich.text import Text
|
|
22
22
|
from rich.align import Align
|
|
23
|
-
from rich.progress import track
|
|
23
|
+
from rich.progress import track, Progress
|
|
24
24
|
from rich.prompt import Prompt, Confirm
|
|
25
|
+
from rich.columns import Columns
|
|
26
|
+
from pyfiglet import figlet_format
|
|
25
27
|
|
|
26
28
|
logger = logging.getLogger(__name__)
|
|
27
29
|
|
|
@@ -45,13 +47,12 @@ COLORS = {
|
|
|
45
47
|
}
|
|
46
48
|
|
|
47
49
|
def _print_data(data):
|
|
48
|
-
"""Prints data using rich panels and markdown
|
|
50
|
+
"""Prints data using rich panels and markdown."""
|
|
49
51
|
console = Console()
|
|
50
52
|
if data:
|
|
51
53
|
for i, e in enumerate(data, start=1):
|
|
52
|
-
|
|
53
|
-
table =
|
|
54
|
-
table.add_column("Key", style="cyan", no_wrap=True)
|
|
54
|
+
table = Table(show_header=False, show_lines=True, expand=True, box=None) # Removed duplicate title
|
|
55
|
+
table.add_column("Key", style="cyan", no_wrap=True, width=15)
|
|
55
56
|
table.add_column("Value", style="white")
|
|
56
57
|
|
|
57
58
|
for j, (k, v) in enumerate(e.items(), start=1):
|
|
@@ -59,21 +60,22 @@ def _print_data(data):
|
|
|
59
60
|
width = 300 if k in ("content", "href", "image", "source", "thumbnail", "url") else 78
|
|
60
61
|
k = "language" if k == "detected_language" else k
|
|
61
62
|
text = click.wrap_text(
|
|
62
|
-
f"{v}", width=width, initial_indent="", subsequent_indent=" " *
|
|
63
|
-
)
|
|
63
|
+
f"{v}", width=width, initial_indent="", subsequent_indent=" " * 18, preserve_paragraphs=True
|
|
64
|
+
).replace("\n", "\n\n")
|
|
64
65
|
else:
|
|
65
66
|
text = v
|
|
66
67
|
table.add_row(k, text)
|
|
67
68
|
|
|
68
|
-
#
|
|
69
|
-
console.print(Panel(
|
|
70
|
-
console.print("\n")
|
|
69
|
+
# Only the Panel has the title now
|
|
70
|
+
console.print(Panel(table, title=f"Result {i}", expand=False, style="green on black"))
|
|
71
|
+
console.print("\n")
|
|
72
|
+
|
|
71
73
|
|
|
72
74
|
def _sanitize_keywords(keywords):
|
|
73
75
|
"""Sanitizes keywords for file names and paths. Removes invalid characters like ':'. """
|
|
74
76
|
keywords = (
|
|
75
77
|
keywords.replace("filetype", "")
|
|
76
|
-
.replace(":", "")
|
|
78
|
+
.replace(":", "")
|
|
77
79
|
.replace('"', "'")
|
|
78
80
|
.replace("site", "")
|
|
79
81
|
.replace(" ", "_")
|
|
@@ -86,8 +88,8 @@ def _sanitize_keywords(keywords):
|
|
|
86
88
|
@click.group(chain=True)
|
|
87
89
|
def cli():
|
|
88
90
|
"""webscout CLI tool - Search the web with a rich UI."""
|
|
89
|
-
|
|
90
|
-
|
|
91
|
+
console = Console()
|
|
92
|
+
console.print(f"[bold blue]{figlet_format('Webscout')}[/]\n", justify="center")
|
|
91
93
|
|
|
92
94
|
def safe_entry_point():
|
|
93
95
|
try:
|
|
@@ -100,7 +102,7 @@ def safe_entry_point():
|
|
|
100
102
|
def version():
|
|
101
103
|
"""Shows the current version of webscout."""
|
|
102
104
|
console = Console()
|
|
103
|
-
console.print(Panel(Text(f"webscout v{__version__}", style="cyan"), title="Version"))
|
|
105
|
+
console.print(Panel(Text(f"webscout v{__version__}", style="cyan"), title="Version", expand=False))
|
|
104
106
|
|
|
105
107
|
|
|
106
108
|
@cli.command()
|
|
@@ -111,16 +113,15 @@ def chat(proxy):
|
|
|
111
113
|
client = WEBS(proxy=proxy)
|
|
112
114
|
|
|
113
115
|
console = Console()
|
|
114
|
-
console.print(Panel(Text("Available AI Models:", style="cyan"), title="DuckDuckGo AI Chat"))
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
chosen_model_idx = Prompt.ask("Choose a model by entering its number [1]", choices=[str(i) for i in range(1, len(models) + 1)], default="1")
|
|
116
|
+
console.print(Panel(Text("Available AI Models:", style="cyan"), title="DuckDuckGo AI Chat", expand=False))
|
|
117
|
+
console.print(Columns([Panel(Text(model, justify="center"), expand=True) for model in models]))
|
|
118
|
+
chosen_model_idx = Prompt.ask("[bold cyan]Choose a model by entering its number[/] [1]", choices=[str(i) for i in range(1, len(models) + 1)], default="1")
|
|
118
119
|
chosen_model_idx = int(chosen_model_idx) - 1
|
|
119
120
|
model = models[chosen_model_idx]
|
|
120
|
-
console.print(f"Using model: {model}")
|
|
121
|
+
console.print(f"[bold green]Using model:[/] {model}")
|
|
121
122
|
|
|
122
123
|
while True:
|
|
123
|
-
user_input =
|
|
124
|
+
user_input = Prompt.ask(f"{'-'*78}\n[bold blue]You:[/]")
|
|
124
125
|
if not user_input.strip():
|
|
125
126
|
break
|
|
126
127
|
|
|
@@ -129,7 +130,7 @@ def chat(proxy):
|
|
|
129
130
|
console.print(Panel(Text(f"AI: {text}", style="green"), title="AI Response"))
|
|
130
131
|
|
|
131
132
|
if "exit" in user_input.lower() or "quit" in user_input.lower():
|
|
132
|
-
console.print(Panel(Text("Exiting chat session.", style="cyan"), title="Goodbye"))
|
|
133
|
+
console.print(Panel(Text("Exiting chat session.", style="cyan"), title="Goodbye", expand=False))
|
|
133
134
|
break
|
|
134
135
|
|
|
135
136
|
|
webscout/utils.py
CHANGED
|
@@ -4,23 +4,33 @@ from html import unescape
|
|
|
4
4
|
from math import atan2, cos, radians, sin, sqrt
|
|
5
5
|
from typing import Any, Dict, List, Union
|
|
6
6
|
from urllib.parse import unquote
|
|
7
|
-
import orjson
|
|
8
7
|
|
|
9
8
|
from .exceptions import WebscoutE
|
|
10
9
|
|
|
10
|
+
try:
|
|
11
|
+
HAS_ORJSON = True
|
|
12
|
+
import orjson
|
|
13
|
+
except ImportError:
|
|
14
|
+
HAS_ORJSON = False
|
|
15
|
+
import json
|
|
16
|
+
|
|
11
17
|
REGEX_STRIP_TAGS = re.compile("<.*?>")
|
|
12
18
|
|
|
13
19
|
|
|
14
20
|
def json_dumps(obj: Any) -> str:
|
|
15
21
|
try:
|
|
16
|
-
return
|
|
22
|
+
return (
|
|
23
|
+
orjson.dumps(obj, option=orjson.OPT_INDENT_2).decode()
|
|
24
|
+
if HAS_ORJSON
|
|
25
|
+
else json.dumps(obj, ensure_ascii=False, indent=2)
|
|
26
|
+
)
|
|
17
27
|
except Exception as ex:
|
|
18
28
|
raise WebscoutE(f"{type(ex).__name__}: {ex}") from ex
|
|
19
29
|
|
|
20
30
|
|
|
21
31
|
def json_loads(obj: Union[str, bytes]) -> Any:
|
|
22
32
|
try:
|
|
23
|
-
return orjson.loads(obj)
|
|
33
|
+
return orjson.loads(obj) if HAS_ORJSON else json.loads(obj)
|
|
24
34
|
except Exception as ex:
|
|
25
35
|
raise WebscoutE(f"{type(ex).__name__}: {ex}") from ex
|
|
26
36
|
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "4.
|
|
1
|
+
__version__ = "4.3"
|
|
2
2
|
__prog__ = "webscout"
|
webscout/webai.py
CHANGED
|
@@ -37,11 +37,13 @@ from dotenv import load_dotenv
|
|
|
37
37
|
import g4f
|
|
38
38
|
import webscout
|
|
39
39
|
import webscout.AIutel
|
|
40
|
+
from pyfiglet import figlet_format
|
|
40
41
|
|
|
41
42
|
init_colorama(autoreset=True)
|
|
42
43
|
|
|
43
44
|
load_dotenv() # loads .env variables
|
|
44
45
|
|
|
46
|
+
console = Console()
|
|
45
47
|
logging.basicConfig(
|
|
46
48
|
format="%(asctime)s - %(levelname)s : %(message)s ",
|
|
47
49
|
datefmt="%H:%M:%S",
|
|
@@ -2606,6 +2608,7 @@ def make_commands():
|
|
|
2606
2608
|
# @this.handle_exception
|
|
2607
2609
|
def main(*args):
|
|
2608
2610
|
"""Fireup console programmically"""
|
|
2611
|
+
console.print(f"[bold green]{figlet_format('WebAI')}[/]\n", justify="center")
|
|
2609
2612
|
sys.argv += list(args)
|
|
2610
2613
|
args = sys.argv
|
|
2611
2614
|
if len(args) == 1:
|