webscout 5.2__py3-none-any.whl → 5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +8 -12
- webscout/AIutel.py +10 -10
- webscout/Agents/Onlinesearcher.py +5 -5
- webscout/Agents/functioncall.py +123 -97
- webscout/DWEBS.py +99 -77
- webscout/Local/_version.py +2 -2
- webscout/Provider/Andi.py +1 -21
- webscout/Provider/BasedGPT.py +1 -21
- webscout/Provider/Blackboxai.py +1 -21
- webscout/Provider/Chatify.py +175 -0
- webscout/Provider/Cloudflare.py +1 -22
- webscout/Provider/Cohere.py +2 -23
- webscout/Provider/DARKAI.py +0 -1
- webscout/Provider/Deepinfra.py +2 -16
- webscout/Provider/EDITEE.py +3 -26
- webscout/Provider/Gemini.py +1 -24
- webscout/Provider/Groq.py +0 -2
- webscout/Provider/Koboldai.py +0 -21
- webscout/Provider/Llama.py +4 -21
- webscout/Provider/NetFly.py +21 -61
- webscout/Provider/OLLAMA.py +0 -17
- webscout/Provider/Openai.py +2 -22
- webscout/Provider/Perplexity.py +1 -2
- webscout/Provider/Phind.py +3 -508
- webscout/Provider/RUBIKSAI.py +11 -5
- webscout/Provider/Reka.py +4 -21
- webscout/Provider/TTS/streamElements.py +1 -22
- webscout/Provider/TTS/voicepod.py +11 -8
- webscout/Provider/ThinkAnyAI.py +17 -78
- webscout/Provider/Youchat.py +3 -20
- webscout/Provider/__init__.py +17 -8
- webscout/Provider/ai4chat.py +14 -8
- webscout/Provider/cerebras.py +199 -0
- webscout/Provider/{Berlin4h.py → cleeai.py} +68 -73
- webscout/Provider/{liaobots.py → elmo.py} +75 -106
- webscout/Provider/felo_search.py +29 -87
- webscout/Provider/geminiapi.py +198 -0
- webscout/Provider/genspark.py +222 -0
- webscout/Provider/julius.py +3 -20
- webscout/Provider/koala.py +1 -1
- webscout/Provider/lepton.py +194 -0
- webscout/Provider/turboseek.py +4 -21
- webscout/Provider/x0gpt.py +182 -0
- webscout/Provider/xdash.py +2 -22
- webscout/Provider/yep.py +391 -149
- webscout/YTdownloader.py +2 -3
- webscout/__init__.py +2 -2
- webscout/exceptions.py +2 -1
- webscout/transcriber.py +195 -140
- webscout/version.py +1 -1
- {webscout-5.2.dist-info → webscout-5.4.dist-info}/METADATA +47 -134
- webscout-5.4.dist-info/RECORD +98 -0
- webscout/voice.py +0 -34
- webscout-5.2.dist-info/RECORD +0 -93
- {webscout-5.2.dist-info → webscout-5.4.dist-info}/LICENSE.md +0 -0
- {webscout-5.2.dist-info → webscout-5.4.dist-info}/WHEEL +0 -0
- {webscout-5.2.dist-info → webscout-5.4.dist-info}/entry_points.txt +0 -0
- {webscout-5.2.dist-info → webscout-5.4.dist-info}/top_level.txt +0 -0
webscout/DWEBS.py
CHANGED
|
@@ -8,18 +8,19 @@ import time
|
|
|
8
8
|
import random
|
|
9
9
|
|
|
10
10
|
class GoogleS:
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
|
|
11
|
+
"""
|
|
12
|
+
Class to perform Google searches and retrieve results.
|
|
13
|
+
"""
|
|
14
14
|
|
|
15
15
|
def __init__(
|
|
16
16
|
self,
|
|
17
17
|
headers: Optional[Dict[str, str]] = None,
|
|
18
18
|
proxy: Optional[str] = None,
|
|
19
19
|
timeout: Optional[int] = 10,
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
20
|
+
max_workers: int = 20 # Increased max workers for thread pool
|
|
21
|
+
):
|
|
22
|
+
"""Initializes the GoogleS object."""
|
|
23
|
+
self.proxy = proxy
|
|
23
24
|
self.headers = headers if headers else {
|
|
24
25
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62"
|
|
25
26
|
}
|
|
@@ -28,20 +29,19 @@ class GoogleS:
|
|
|
28
29
|
self.client.headers.update(self.headers)
|
|
29
30
|
self.client.proxies.update({"http": self.proxy, "https": self.proxy})
|
|
30
31
|
self.timeout = timeout
|
|
32
|
+
self._executor = ThreadPoolExecutor(max_workers=max_workers)
|
|
31
33
|
|
|
32
|
-
def __enter__(self)
|
|
34
|
+
def __enter__(self):
|
|
33
35
|
return self
|
|
34
36
|
|
|
35
37
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
36
38
|
self.client.close()
|
|
37
39
|
|
|
38
|
-
def _get_url(
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
data: Optional[Union[Dict[str, str], bytes]] = None,
|
|
44
|
-
) -> bytes:
|
|
40
|
+
def _get_url(self, method: str, url: str, params: Optional[Dict[str, str]] = None,
|
|
41
|
+
data: Optional[Union[Dict[str, str], bytes]] = None) -> bytes:
|
|
42
|
+
"""
|
|
43
|
+
Makes an HTTP request and returns the response content.
|
|
44
|
+
"""
|
|
45
45
|
try:
|
|
46
46
|
resp = self.client.request(method, url, params=params, data=data, timeout=self.timeout)
|
|
47
47
|
except Exception as ex:
|
|
@@ -50,13 +50,13 @@ class GoogleS:
|
|
|
50
50
|
return resp.content
|
|
51
51
|
raise Exception(f"{resp.url} returned status code {resp.status_code}. {params=} {data=}")
|
|
52
52
|
|
|
53
|
-
def
|
|
54
|
-
"""
|
|
55
|
-
|
|
56
|
-
|
|
53
|
+
def _extract_text_from_webpage(self, html_content: bytes, max_characters: Optional[int] = None) -> str:
|
|
54
|
+
"""
|
|
55
|
+
Extracts visible text from HTML content using lxml parser.
|
|
56
|
+
"""
|
|
57
|
+
soup = BeautifulSoup(html_content, 'lxml') # Use lxml parser
|
|
57
58
|
for tag in soup(["script", "style", "header", "footer", "nav"]):
|
|
58
59
|
tag.extract()
|
|
59
|
-
# Get the remaining visible text
|
|
60
60
|
visible_text = soup.get_text(strip=True)
|
|
61
61
|
if max_characters:
|
|
62
62
|
visible_text = visible_text[:max_characters]
|
|
@@ -64,32 +64,56 @@ class GoogleS:
|
|
|
64
64
|
|
|
65
65
|
def search(
|
|
66
66
|
self,
|
|
67
|
-
|
|
67
|
+
query: str,
|
|
68
68
|
region: str = "us-en",
|
|
69
|
-
|
|
69
|
+
language: str = "en",
|
|
70
70
|
safe: str = "off",
|
|
71
|
-
|
|
72
|
-
max_results:
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
) -> List[Dict[str, str]]:
|
|
76
|
-
"""
|
|
77
|
-
|
|
71
|
+
time_period: Optional[str] = None,
|
|
72
|
+
max_results: int = 10,
|
|
73
|
+
extract_text: bool = False,
|
|
74
|
+
max_text_length: Optional[int] = 100,
|
|
75
|
+
) -> List[Dict[str, Union[str, int]]]:
|
|
76
|
+
"""
|
|
77
|
+
Performs a Google search and returns the results.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
query (str): The search query.
|
|
81
|
+
region (str, optional): The region to search in (e.g., "us-en"). Defaults to "us-en".
|
|
82
|
+
language (str, optional): The language of the search results (e.g., "en"). Defaults to "en".
|
|
83
|
+
safe (str, optional): Safe search setting ("off", "active"). Defaults to "off".
|
|
84
|
+
time_period (Optional[str], optional): Time period filter (e.g., "h" for past hour, "d" for past day).
|
|
85
|
+
Defaults to None.
|
|
86
|
+
max_results (int, optional): The maximum number of results to retrieve. Defaults to 10.
|
|
87
|
+
extract_text (bool, optional): Whether to extract text from the linked web pages. Defaults to False.
|
|
88
|
+
max_text_length (Optional[int], optional): The maximum length of the extracted text (in characters).
|
|
89
|
+
Defaults to 100.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
List[Dict[str, Union[str, int]]]: A list of dictionaries, each representing a search result, containing:
|
|
93
|
+
- 'title': The title of the result.
|
|
94
|
+
- 'href': The URL of the result.
|
|
95
|
+
- 'abstract': The description snippet of the result.
|
|
96
|
+
- 'index': The index of the result in the list.
|
|
97
|
+
- 'type': The type of result (currently always "web").
|
|
98
|
+
- 'visible_text': The extracted text from the web page (if `extract_text` is True).
|
|
99
|
+
"""
|
|
100
|
+
assert query, "Query cannot be empty."
|
|
78
101
|
|
|
79
102
|
results = []
|
|
80
103
|
futures = []
|
|
81
104
|
start = 0
|
|
105
|
+
|
|
82
106
|
while len(results) < max_results:
|
|
83
107
|
params = {
|
|
84
|
-
"q":
|
|
85
|
-
"num": 10,
|
|
86
|
-
"hl":
|
|
108
|
+
"q": query,
|
|
109
|
+
"num": 10,
|
|
110
|
+
"hl": language,
|
|
87
111
|
"start": start,
|
|
88
112
|
"safe": safe,
|
|
89
113
|
"gl": region,
|
|
90
114
|
}
|
|
91
|
-
if
|
|
92
|
-
params["tbs"] = f"qdr:{
|
|
115
|
+
if time_period:
|
|
116
|
+
params["tbs"] = f"qdr:{time_period}"
|
|
93
117
|
|
|
94
118
|
futures.append(self._executor.submit(self._get_url, "GET", "https://www.google.com/search", params=params))
|
|
95
119
|
start += 10
|
|
@@ -97,54 +121,52 @@ class GoogleS:
|
|
|
97
121
|
for future in as_completed(futures):
|
|
98
122
|
try:
|
|
99
123
|
resp_content = future.result()
|
|
100
|
-
soup = BeautifulSoup(resp_content,
|
|
101
|
-
|
|
124
|
+
soup = BeautifulSoup(resp_content, 'lxml') # Use lxml parser
|
|
125
|
+
result_blocks = soup.find_all("div", class_="g")
|
|
102
126
|
|
|
103
|
-
if not
|
|
127
|
+
if not result_blocks:
|
|
104
128
|
break
|
|
105
129
|
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
except Exception as e:
|
|
144
|
-
print(f"Error extracting result: {e}")
|
|
130
|
+
# Extract links and titles first
|
|
131
|
+
for result_block in result_blocks:
|
|
132
|
+
link = result_block.find("a", href=True)
|
|
133
|
+
title = result_block.find("h3")
|
|
134
|
+
description_box = result_block.find(
|
|
135
|
+
"div", {"style": "-webkit-line-clamp:2"}
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
if link and title and description_box:
|
|
139
|
+
url = link["href"]
|
|
140
|
+
results.append({
|
|
141
|
+
"title": title.text,
|
|
142
|
+
"href": url,
|
|
143
|
+
"abstract": description_box.text,
|
|
144
|
+
"index": len(results),
|
|
145
|
+
"type": "web",
|
|
146
|
+
"visible_text": "" # Initialize visible_text as empty string
|
|
147
|
+
})
|
|
148
|
+
|
|
149
|
+
if len(results) >= max_results:
|
|
150
|
+
break # Stop if we have enough results
|
|
151
|
+
|
|
152
|
+
# Parallelize text extraction if needed
|
|
153
|
+
if extract_text:
|
|
154
|
+
with ThreadPoolExecutor(max_workers=self._executor._max_workers) as text_extractor:
|
|
155
|
+
extraction_futures = [
|
|
156
|
+
text_extractor.submit(self._extract_text_from_webpage,
|
|
157
|
+
self._get_url("GET", result['href']),
|
|
158
|
+
max_characters=max_text_length)
|
|
159
|
+
for result in results
|
|
160
|
+
if 'href' in result
|
|
161
|
+
]
|
|
162
|
+
for i, future in enumerate(as_completed(extraction_futures)):
|
|
163
|
+
try:
|
|
164
|
+
results[i]['visible_text'] = future.result()
|
|
165
|
+
except Exception as e:
|
|
166
|
+
print(f"Error extracting text: {e}")
|
|
145
167
|
|
|
146
168
|
except Exception as e:
|
|
147
|
-
print(f"Error
|
|
169
|
+
print(f"Error: {e}")
|
|
148
170
|
|
|
149
171
|
return results
|
|
150
172
|
|
|
@@ -152,6 +174,6 @@ class GoogleS:
|
|
|
152
174
|
if __name__ == "__main__":
|
|
153
175
|
from rich import print
|
|
154
176
|
searcher = GoogleS()
|
|
155
|
-
results = searcher.search("HelpingAI-9B", max_results=20,
|
|
177
|
+
results = searcher.search("HelpingAI-9B", max_results=20, extract_text=False, max_text_length=200)
|
|
156
178
|
for result in results:
|
|
157
179
|
print(result)
|
webscout/Local/_version.py
CHANGED
|
@@ -1,3 +1,3 @@
|
|
|
1
1
|
from llama_cpp import __version__ as __llama_cpp_version__
|
|
2
|
-
|
|
3
|
-
__version__ =
|
|
2
|
+
from webscout.version import __prog__, __version__
|
|
3
|
+
__version__ = __version__
|
webscout/Provider/Andi.py
CHANGED
|
@@ -1,32 +1,12 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
from selenium import webdriver
|
|
4
|
-
from selenium.webdriver.chrome.options import Options
|
|
5
|
-
from selenium.webdriver.common.by import By
|
|
6
|
-
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
-
import click
|
|
9
|
-
import requests
|
|
10
|
-
from requests import get
|
|
11
1
|
from uuid import uuid4
|
|
12
|
-
|
|
13
|
-
from requests.exceptions import RequestException
|
|
14
|
-
from curl_cffi.requests import get, RequestsError
|
|
15
|
-
import g4f
|
|
16
|
-
from random import randint
|
|
17
|
-
from PIL import Image
|
|
18
|
-
import io
|
|
19
|
-
import re
|
|
2
|
+
import requests
|
|
20
3
|
import json
|
|
21
|
-
import yaml
|
|
22
4
|
from webscout.AIutel import Optimizers
|
|
23
5
|
from webscout.AIutel import Conversation
|
|
24
6
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
7
|
from webscout.AIbase import Provider, AsyncProvider
|
|
26
8
|
from webscout import exceptions
|
|
27
9
|
from typing import Any, AsyncGenerator, Dict
|
|
28
|
-
import logging
|
|
29
|
-
import httpx
|
|
30
10
|
from webscout import WEBS
|
|
31
11
|
from rich import print
|
|
32
12
|
|
webscout/Provider/BasedGPT.py
CHANGED
|
@@ -1,32 +1,12 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
from selenium import webdriver
|
|
4
|
-
from selenium.webdriver.chrome.options import Options
|
|
5
|
-
from selenium.webdriver.common.by import By
|
|
6
|
-
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
-
import click
|
|
9
1
|
import requests
|
|
10
|
-
from requests import get
|
|
11
|
-
from uuid import uuid4
|
|
12
|
-
from re import findall
|
|
13
|
-
from requests.exceptions import RequestException
|
|
14
|
-
from curl_cffi.requests import get, RequestsError
|
|
15
|
-
import g4f
|
|
16
|
-
from random import randint
|
|
17
|
-
from PIL import Image
|
|
18
|
-
import io
|
|
19
|
-
import re
|
|
20
2
|
import json
|
|
21
|
-
import yaml
|
|
22
3
|
from webscout.AIutel import Optimizers
|
|
23
4
|
from webscout.AIutel import Conversation
|
|
24
5
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
6
|
from webscout.AIbase import Provider, AsyncProvider
|
|
26
7
|
from webscout import exceptions
|
|
27
8
|
from typing import Any, AsyncGenerator, Dict
|
|
28
|
-
|
|
29
|
-
import httpx
|
|
9
|
+
|
|
30
10
|
|
|
31
11
|
class BasedGPT(Provider):
|
|
32
12
|
def __init__(
|
webscout/Provider/Blackboxai.py
CHANGED
|
@@ -1,32 +1,12 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
from selenium import webdriver
|
|
4
|
-
from selenium.webdriver.chrome.options import Options
|
|
5
|
-
from selenium.webdriver.common.by import By
|
|
6
|
-
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
-
import click
|
|
9
1
|
import requests
|
|
10
|
-
from requests import get
|
|
11
|
-
from uuid import uuid4
|
|
12
|
-
from re import findall
|
|
13
|
-
from requests.exceptions import RequestException
|
|
14
|
-
from curl_cffi.requests import get, RequestsError
|
|
15
|
-
import g4f
|
|
16
|
-
from random import randint
|
|
17
|
-
from PIL import Image
|
|
18
|
-
import io
|
|
19
2
|
import re
|
|
20
3
|
import json
|
|
21
|
-
import yaml
|
|
22
4
|
from webscout.AIutel import Optimizers
|
|
23
5
|
from webscout.AIutel import Conversation
|
|
24
6
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
7
|
from webscout.AIbase import Provider, AsyncProvider
|
|
26
|
-
from Helpingai_T2 import Perplexity
|
|
27
8
|
from webscout import exceptions
|
|
28
9
|
from typing import Any, AsyncGenerator, Dict
|
|
29
|
-
import logging
|
|
30
10
|
import httpx
|
|
31
11
|
|
|
32
12
|
#------------------------------------------------------BLACKBOXAI--------------------------------------------------------
|
|
@@ -433,7 +413,7 @@ class AsyncBLACKBOXAI(AsyncProvider):
|
|
|
433
413
|
str: Message extracted
|
|
434
414
|
"""
|
|
435
415
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
436
|
-
return response["text"]
|
|
416
|
+
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
437
417
|
|
|
438
418
|
# Function to clean the response text
|
|
439
419
|
def clean_response(response_text: str) -> str:
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
from webscout.AIutel import Optimizers
|
|
2
|
+
from webscout.AIutel import Conversation
|
|
3
|
+
from webscout.AIutel import AwesomePrompts
|
|
4
|
+
from webscout.AIbase import Provider
|
|
5
|
+
from webscout import exceptions
|
|
6
|
+
import requests
|
|
7
|
+
|
|
8
|
+
class Chatify(Provider):
|
|
9
|
+
"""
|
|
10
|
+
A class to interact with the Chatify AI API.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
is_conversation: bool = True,
|
|
16
|
+
max_tokens: int = 600,
|
|
17
|
+
timeout: int = 30,
|
|
18
|
+
intro: str = None,
|
|
19
|
+
filepath: str = None,
|
|
20
|
+
update_file: bool = True,
|
|
21
|
+
proxies: dict = {},
|
|
22
|
+
history_offset: int = 10250,
|
|
23
|
+
act: str = None,
|
|
24
|
+
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
25
|
+
):
|
|
26
|
+
"""
|
|
27
|
+
Initializes the Chatify AI API with given parameters.
|
|
28
|
+
"""
|
|
29
|
+
self.session = requests.Session()
|
|
30
|
+
self.is_conversation = is_conversation
|
|
31
|
+
self.max_tokens_to_sample = max_tokens
|
|
32
|
+
self.api_endpoint = "https://chatify-ai.vercel.app/api/chat"
|
|
33
|
+
self.timeout = timeout
|
|
34
|
+
self.last_response = {}
|
|
35
|
+
self.headers = {
|
|
36
|
+
'Accept': '*/*',
|
|
37
|
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
38
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
39
|
+
'Content-Type': 'application/json',
|
|
40
|
+
'DNT': '1',
|
|
41
|
+
'Origin': 'https://chatify-ai.vercel.app',
|
|
42
|
+
'Referer': 'https://chatify-ai.vercel.app/',
|
|
43
|
+
'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
44
|
+
'Sec-CH-UA-Mobile': '?0',
|
|
45
|
+
'Sec-CH-UA-Platform': '"Windows"',
|
|
46
|
+
'Sec-Fetch-Dest': 'empty',
|
|
47
|
+
'Sec-Fetch-Mode': 'cors',
|
|
48
|
+
'Sec-Fetch-Site': 'same-origin',
|
|
49
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0',
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
self.__available_optimizers = (
|
|
53
|
+
method
|
|
54
|
+
for method in dir(Optimizers)
|
|
55
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
56
|
+
)
|
|
57
|
+
self.session.headers.update(self.headers)
|
|
58
|
+
Conversation.intro = (
|
|
59
|
+
AwesomePrompts().get_act(
|
|
60
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
61
|
+
)
|
|
62
|
+
if act
|
|
63
|
+
else intro or Conversation.intro
|
|
64
|
+
)
|
|
65
|
+
self.conversation = Conversation(
|
|
66
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
67
|
+
)
|
|
68
|
+
self.conversation.history_offset = history_offset
|
|
69
|
+
self.session.proxies = proxies
|
|
70
|
+
self.system_prompt = system_prompt
|
|
71
|
+
|
|
72
|
+
def ask(
|
|
73
|
+
self,
|
|
74
|
+
prompt: str,
|
|
75
|
+
stream: bool = False,
|
|
76
|
+
raw: bool = False,
|
|
77
|
+
optimizer: str = None,
|
|
78
|
+
conversationally: bool = False,
|
|
79
|
+
) -> dict:
|
|
80
|
+
"""
|
|
81
|
+
Sends a prompt to the Chatify API and returns the response.
|
|
82
|
+
"""
|
|
83
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
84
|
+
if optimizer:
|
|
85
|
+
if optimizer in self.__available_optimizers:
|
|
86
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
87
|
+
conversation_prompt if conversationally else prompt
|
|
88
|
+
)
|
|
89
|
+
else:
|
|
90
|
+
raise Exception(
|
|
91
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
messages = []
|
|
95
|
+
if self.system_prompt:
|
|
96
|
+
messages.append({"role": "system", "content": self.system_prompt})
|
|
97
|
+
messages.append({"role": "user", "content": conversation_prompt})
|
|
98
|
+
|
|
99
|
+
payload = {
|
|
100
|
+
"messages": messages
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
def for_stream():
|
|
104
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
|
|
105
|
+
if not response.ok:
|
|
106
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
107
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
streaming_text = ""
|
|
111
|
+
for line in response.iter_lines():
|
|
112
|
+
if line:
|
|
113
|
+
decoded_line = line.decode('utf-8') # Decode the line
|
|
114
|
+
parts = decoded_line.split(':', 1)
|
|
115
|
+
if len(parts) > 1:
|
|
116
|
+
content = parts[1].strip().strip('"')
|
|
117
|
+
streaming_text += content
|
|
118
|
+
yield content if raw else dict(text=streaming_text)
|
|
119
|
+
self.last_response.update(dict(text=streaming_text))
|
|
120
|
+
self.conversation.update_chat_history(
|
|
121
|
+
prompt, self.get_message(self.last_response)
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
def for_non_stream():
|
|
125
|
+
for _ in for_stream():
|
|
126
|
+
pass
|
|
127
|
+
return self.last_response
|
|
128
|
+
|
|
129
|
+
return for_stream() if stream else for_non_stream()
|
|
130
|
+
|
|
131
|
+
def chat(
|
|
132
|
+
self,
|
|
133
|
+
prompt: str,
|
|
134
|
+
stream: bool = False,
|
|
135
|
+
optimizer: str = None,
|
|
136
|
+
conversationally: bool = False,
|
|
137
|
+
) -> str:
|
|
138
|
+
"""
|
|
139
|
+
Generates a response from the Chatify API.
|
|
140
|
+
"""
|
|
141
|
+
|
|
142
|
+
def for_stream():
|
|
143
|
+
for response in self.ask(
|
|
144
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
145
|
+
):
|
|
146
|
+
yield self.get_message(response)
|
|
147
|
+
|
|
148
|
+
def for_non_stream():
|
|
149
|
+
return self.get_message(
|
|
150
|
+
self.ask(
|
|
151
|
+
prompt,
|
|
152
|
+
False,
|
|
153
|
+
optimizer=optimizer,
|
|
154
|
+
conversationally=conversationally,
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
return for_stream() if stream else for_non_stream()
|
|
159
|
+
|
|
160
|
+
def get_message(self, response: dict) -> str:
|
|
161
|
+
"""
|
|
162
|
+
Extracts the message from the API response and formats it.
|
|
163
|
+
"""
|
|
164
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
165
|
+
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
# Example usage
|
|
169
|
+
if __name__ == "__main__":
|
|
170
|
+
from rich import print
|
|
171
|
+
|
|
172
|
+
ai = Chatify()
|
|
173
|
+
response = ai.chat(input(">>> "))
|
|
174
|
+
for chunk in response:
|
|
175
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Cloudflare.py
CHANGED
|
@@ -1,32 +1,11 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
from selenium import webdriver
|
|
4
|
-
from selenium.webdriver.chrome.options import Options
|
|
5
|
-
from selenium.webdriver.common.by import By
|
|
6
|
-
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
-
import click
|
|
9
|
-
import requests
|
|
10
|
-
from requests import get
|
|
11
|
-
from uuid import uuid4
|
|
12
|
-
from re import findall
|
|
13
|
-
from requests.exceptions import RequestException
|
|
14
|
-
from curl_cffi.requests import get, RequestsError
|
|
15
|
-
import g4f
|
|
16
|
-
from random import randint
|
|
17
|
-
from PIL import Image
|
|
18
|
-
import io
|
|
19
|
-
import re
|
|
20
1
|
import json
|
|
21
|
-
import
|
|
2
|
+
from uuid import uuid4
|
|
22
3
|
from webscout.AIutel import Optimizers
|
|
23
4
|
from webscout.AIutel import Conversation
|
|
24
5
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
6
|
from webscout.AIbase import Provider, AsyncProvider
|
|
26
7
|
from webscout import exceptions
|
|
27
8
|
from typing import Any, AsyncGenerator, Dict
|
|
28
|
-
import logging
|
|
29
|
-
import httpx
|
|
30
9
|
import cloudscraper
|
|
31
10
|
|
|
32
11
|
class Cloudflare(Provider):
|
webscout/Provider/Cohere.py
CHANGED
|
@@ -1,33 +1,12 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
from selenium import webdriver
|
|
4
|
-
from selenium.webdriver.chrome.options import Options
|
|
5
|
-
from selenium.webdriver.common.by import By
|
|
6
|
-
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
-
import click
|
|
9
1
|
import requests
|
|
10
|
-
from requests import get
|
|
11
|
-
from uuid import uuid4
|
|
12
|
-
from re import findall
|
|
13
|
-
from requests.exceptions import RequestException
|
|
14
|
-
from curl_cffi.requests import get, RequestsError
|
|
15
|
-
import g4f
|
|
16
|
-
from random import randint
|
|
17
|
-
from PIL import Image
|
|
18
|
-
import io
|
|
19
|
-
import re
|
|
20
2
|
import json
|
|
21
|
-
import yaml
|
|
22
3
|
from webscout.AIutel import Optimizers
|
|
23
4
|
from webscout.AIutel import Conversation
|
|
24
5
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
6
|
from webscout.AIbase import Provider, AsyncProvider
|
|
26
|
-
from Helpingai_T2 import Perplexity
|
|
27
7
|
from webscout import exceptions
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
import httpx
|
|
8
|
+
|
|
9
|
+
|
|
31
10
|
#-----------------------------------------------Cohere--------------------------------------------
|
|
32
11
|
class Cohere(Provider):
|
|
33
12
|
def __init__(
|
webscout/Provider/DARKAI.py
CHANGED
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -1,26 +1,12 @@
|
|
|
1
|
-
|
|
2
|
-
import uuid
|
|
3
|
-
import click
|
|
1
|
+
|
|
4
2
|
import requests
|
|
5
|
-
|
|
6
|
-
from uuid import uuid4
|
|
7
|
-
from re import findall
|
|
8
|
-
from requests.exceptions import RequestException
|
|
9
|
-
from curl_cffi.requests import get, RequestsError
|
|
10
|
-
import g4f
|
|
11
|
-
from random import randint
|
|
12
|
-
from PIL import Image
|
|
13
|
-
import io
|
|
14
|
-
import re
|
|
15
|
-
import json
|
|
16
|
-
import yaml
|
|
3
|
+
|
|
17
4
|
from ..AIutel import Optimizers
|
|
18
5
|
from ..AIutel import Conversation
|
|
19
6
|
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
20
7
|
from ..AIbase import Provider, AsyncProvider
|
|
21
8
|
from webscout import exceptions
|
|
22
9
|
from typing import Any, AsyncGenerator
|
|
23
|
-
import logging
|
|
24
10
|
import httpx
|
|
25
11
|
|
|
26
12
|
class DeepInfra(Provider):
|