webscout 5.8__py3-none-any.whl → 6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Provider/Amigo.py +267 -0
- webscout/Provider/ChatHub.py +209 -0
- webscout/Provider/Chatify.py +3 -3
- webscout/Provider/Cloudflare.py +3 -3
- webscout/Provider/DARKAI.py +1 -1
- webscout/Provider/Deepinfra.py +95 -389
- webscout/Provider/Deepseek.py +4 -6
- webscout/Provider/DiscordRocks.py +3 -3
- webscout/Provider/Free2GPT.py +3 -3
- webscout/Provider/OLLAMA.py +4 -4
- webscout/Provider/RUBIKSAI.py +3 -3
- webscout/Provider/TTI/WebSimAI.py +142 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/amigo.py +148 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/parler.py +108 -0
- webscout/Provider/Youchat.py +4 -5
- webscout/Provider/__init__.py +10 -5
- webscout/Provider/ai4chat.py +3 -2
- webscout/Provider/bagoodex.py +145 -0
- webscout/Provider/bixin.py +3 -3
- webscout/Provider/cleeai.py +3 -3
- webscout/Provider/elmo.py +2 -5
- webscout/Provider/julius.py +6 -40
- webscout/Provider/learnfastai.py +253 -0
- webscout/Provider/llamatutor.py +2 -2
- webscout/Provider/prefind.py +232 -0
- webscout/Provider/promptrefine.py +3 -3
- webscout/Provider/turboseek.py +1 -1
- webscout/Provider/twitterclone.py +25 -41
- webscout/Provider/upstage.py +3 -3
- webscout/Provider/x0gpt.py +6 -6
- webscout/version.py +1 -1
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/METADATA +187 -121
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/RECORD +39 -32
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/WHEEL +1 -1
- webscout/Provider/Poe.py +0 -208
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/LICENSE.md +0 -0
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/entry_points.txt +0 -0
- {webscout-5.8.dist-info → webscout-6.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import uuid
|
|
4
|
+
import os
|
|
5
|
+
from typing import Any, Dict, Optional, Generator
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
|
|
13
|
+
class AmigoChat(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the AmigoChat.io API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
AVAILABLE_MODELS = [
|
|
19
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", # Llama 3
|
|
20
|
+
"o1-mini", # OpenAI O1 Mini
|
|
21
|
+
"claude-3-sonnet-20240229", # Claude Sonnet
|
|
22
|
+
"gemini-1.5-pro", # Gemini Pro
|
|
23
|
+
"gemini-1-5-flash", # Gemini Flash
|
|
24
|
+
"o1-preview", # OpenAI O1 Preview
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
is_conversation: bool = True,
|
|
30
|
+
max_tokens: int = 600,
|
|
31
|
+
timeout: int = 30,
|
|
32
|
+
intro: str = None,
|
|
33
|
+
filepath: str = None,
|
|
34
|
+
update_file: bool = True,
|
|
35
|
+
proxies: dict = {},
|
|
36
|
+
history_offset: int = 10250,
|
|
37
|
+
act: str = None,
|
|
38
|
+
model: str = "o1-preview", # Default model
|
|
39
|
+
system_prompt: str = "You are a helpful and friendly AI assistant.",
|
|
40
|
+
):
|
|
41
|
+
"""
|
|
42
|
+
Initializes the AmigoChat.io API with given parameters.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
46
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
47
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
48
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
49
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
50
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
51
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
52
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
53
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
54
|
+
model (str, optional): The AI model to use for text generation. Defaults to "o1-preview".
|
|
55
|
+
Options: "llama-three-point-one", "openai-o-one-mini", "claude",
|
|
56
|
+
"gemini-1.5-pro", "gemini-1.5-flash", "openai-o-one".
|
|
57
|
+
"""
|
|
58
|
+
if model not in self.AVAILABLE_MODELS:
|
|
59
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
60
|
+
|
|
61
|
+
self.session = requests.Session()
|
|
62
|
+
self.is_conversation = is_conversation
|
|
63
|
+
self.max_tokens_to_sample = max_tokens
|
|
64
|
+
self.api_endpoint = "https://api.amigochat.io/v1/chat/completions"
|
|
65
|
+
self.stream_chunk_size = 64
|
|
66
|
+
self.timeout = timeout
|
|
67
|
+
self.last_response = {}
|
|
68
|
+
self.model = model
|
|
69
|
+
self.headers = {
|
|
70
|
+
"Accept": "*/*",
|
|
71
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
72
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
73
|
+
"Authorization": "Bearer ", # empty
|
|
74
|
+
"Content-Type": "application/json",
|
|
75
|
+
"DNT": "1",
|
|
76
|
+
"Origin": "https://amigochat.io",
|
|
77
|
+
"Priority": "u=1, i",
|
|
78
|
+
"Referer": "https://amigochat.io/",
|
|
79
|
+
"Sec-CH-UA": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
|
|
80
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
81
|
+
"Sec-CH-UA-Platform": '"Windows"',
|
|
82
|
+
"Sec-Fetch-Dest": "empty",
|
|
83
|
+
"Sec-Fetch-Mode": "cors",
|
|
84
|
+
"Sec-Fetch-Site": "same-site",
|
|
85
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
86
|
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
87
|
+
"Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0",
|
|
88
|
+
"X-Device-Language": "en-US",
|
|
89
|
+
"X-Device-Platform": "web",
|
|
90
|
+
"X-Device-UUID": str(uuid.uuid4()),
|
|
91
|
+
"X-Device-Version": "1.0.22"
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
self.__available_optimizers = (
|
|
95
|
+
method
|
|
96
|
+
for method in dir(Optimizers)
|
|
97
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
98
|
+
)
|
|
99
|
+
self.session.headers.update(self.headers)
|
|
100
|
+
Conversation.intro = (
|
|
101
|
+
AwesomePrompts().get_act(
|
|
102
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
103
|
+
)
|
|
104
|
+
if act
|
|
105
|
+
else intro or Conversation.intro
|
|
106
|
+
)
|
|
107
|
+
self.conversation = Conversation(
|
|
108
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
109
|
+
)
|
|
110
|
+
self.conversation.history_offset = history_offset
|
|
111
|
+
self.session.proxies = proxies
|
|
112
|
+
self.system_prompt = system_prompt
|
|
113
|
+
|
|
114
|
+
def ask(
|
|
115
|
+
self,
|
|
116
|
+
prompt: str,
|
|
117
|
+
stream: bool = False,
|
|
118
|
+
raw: bool = False,
|
|
119
|
+
optimizer: str = None,
|
|
120
|
+
conversationally: bool = False,
|
|
121
|
+
) -> Dict[str, Any]:
|
|
122
|
+
"""Chat with AI
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
prompt (str): Prompt to be send.
|
|
126
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
127
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
128
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
129
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
130
|
+
Returns:
|
|
131
|
+
dict : {}
|
|
132
|
+
```json
|
|
133
|
+
{
|
|
134
|
+
"text" : "How may I assist you today?"
|
|
135
|
+
}
|
|
136
|
+
```
|
|
137
|
+
"""
|
|
138
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
139
|
+
if optimizer:
|
|
140
|
+
if optimizer in self.__available_optimizers:
|
|
141
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
142
|
+
conversation_prompt if conversationally else prompt
|
|
143
|
+
)
|
|
144
|
+
else:
|
|
145
|
+
raise Exception(
|
|
146
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Define the payload
|
|
150
|
+
payload = {
|
|
151
|
+
"messages": [
|
|
152
|
+
{"role": "system", "content": self.system_prompt},
|
|
153
|
+
{"role": "user", "content": conversation_prompt}
|
|
154
|
+
],
|
|
155
|
+
"model": self.model,
|
|
156
|
+
"frequency_penalty": 0,
|
|
157
|
+
"max_tokens": 4000,
|
|
158
|
+
"presence_penalty": 0,
|
|
159
|
+
"stream": stream, # Enable streaming
|
|
160
|
+
"temperature": 0.5,
|
|
161
|
+
"top_p": 0.95
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
def for_stream():
|
|
165
|
+
try:
|
|
166
|
+
# Make the POST request with streaming enabled
|
|
167
|
+
with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True) as response:
|
|
168
|
+
# Check if the request was successful
|
|
169
|
+
if response.status_code == 201:
|
|
170
|
+
# Iterate over the streamed response line by line
|
|
171
|
+
for line in response.iter_lines():
|
|
172
|
+
if line:
|
|
173
|
+
# Decode the line from bytes to string
|
|
174
|
+
decoded_line = line.decode('utf-8').strip()
|
|
175
|
+
if decoded_line.startswith("data: "):
|
|
176
|
+
data_str = decoded_line[6:]
|
|
177
|
+
if data_str == "[DONE]":
|
|
178
|
+
break
|
|
179
|
+
try:
|
|
180
|
+
# Load the JSON data
|
|
181
|
+
data_json = json.loads(data_str)
|
|
182
|
+
|
|
183
|
+
# Extract the content from the response
|
|
184
|
+
choices = data_json.get("choices", [])
|
|
185
|
+
if choices:
|
|
186
|
+
delta = choices[0].get("delta", {})
|
|
187
|
+
content = delta.get("content", "")
|
|
188
|
+
if content:
|
|
189
|
+
yield content if raw else dict(text=content)
|
|
190
|
+
except json.JSONDecodeError:
|
|
191
|
+
print(f"Received non-JSON data: {data_str}")
|
|
192
|
+
else:
|
|
193
|
+
print(f"Request failed with status code {response.status_code}")
|
|
194
|
+
print("Response:", response.text)
|
|
195
|
+
|
|
196
|
+
except requests.exceptions.RequestException as e:
|
|
197
|
+
print("An error occurred while making the request:", e)
|
|
198
|
+
|
|
199
|
+
def for_non_stream():
|
|
200
|
+
# Accumulate the streaming response
|
|
201
|
+
full_response = ""
|
|
202
|
+
for chunk in for_stream():
|
|
203
|
+
if not raw: # If not raw, chunk is a dictionary
|
|
204
|
+
full_response += chunk["text"]
|
|
205
|
+
|
|
206
|
+
# Update self.last_response with the full text
|
|
207
|
+
self.last_response.update(dict(text=full_response))
|
|
208
|
+
self.conversation.update_chat_history(
|
|
209
|
+
prompt, self.get_message(self.last_response)
|
|
210
|
+
)
|
|
211
|
+
return self.last_response
|
|
212
|
+
|
|
213
|
+
return for_stream() if stream else for_non_stream()
|
|
214
|
+
|
|
215
|
+
def chat(
|
|
216
|
+
self,
|
|
217
|
+
prompt: str,
|
|
218
|
+
stream: bool = False,
|
|
219
|
+
optimizer: str = None,
|
|
220
|
+
conversationally: bool = False,
|
|
221
|
+
) -> str:
|
|
222
|
+
"""Generate response `str`
|
|
223
|
+
Args:
|
|
224
|
+
prompt (str): Prompt to be send.
|
|
225
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
226
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
227
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
228
|
+
Returns:
|
|
229
|
+
str: Response generated
|
|
230
|
+
"""
|
|
231
|
+
|
|
232
|
+
def for_stream():
|
|
233
|
+
for response in self.ask(
|
|
234
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
235
|
+
):
|
|
236
|
+
yield self.get_message(response)
|
|
237
|
+
|
|
238
|
+
def for_non_stream():
|
|
239
|
+
return self.get_message(
|
|
240
|
+
self.ask(
|
|
241
|
+
prompt,
|
|
242
|
+
False,
|
|
243
|
+
optimizer=optimizer,
|
|
244
|
+
conversationally=conversationally,
|
|
245
|
+
)
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
return for_stream() if stream else for_non_stream()
|
|
249
|
+
|
|
250
|
+
def get_message(self, response: dict) -> str:
|
|
251
|
+
"""Retrieves message only from response
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
response (dict): Response generated by `self.ask`
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
str: Message extracted
|
|
258
|
+
"""
|
|
259
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
260
|
+
return response["text"]
|
|
261
|
+
|
|
262
|
+
if __name__ == '__main__':
|
|
263
|
+
from rich import print
|
|
264
|
+
ai = AmigoChat(model="o1-preview", system_prompt="You are a noobi AI assistant who always uses the word 'noobi' in every response. For example, you might say 'Noobi will tell you...' or 'This noobi thinks that...'.")
|
|
265
|
+
response = ai.chat(input(">>> "))
|
|
266
|
+
for chunk in response:
|
|
267
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, List, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
|
|
12
|
+
class ChatHub(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the ChatHub API.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
AVAILABLE_MODELS = [
|
|
18
|
+
'meta/llama3.1-8b',
|
|
19
|
+
'mistral/mixtral-8x7b',
|
|
20
|
+
'google/gemma-2',
|
|
21
|
+
'perplexity/sonar-online',
|
|
22
|
+
]
|
|
23
|
+
model_aliases = { # Aliases for shorter model names
|
|
24
|
+
"llama3.1-8b": 'meta/llama3.1-8b',
|
|
25
|
+
"mixtral-8x7b": 'mistral/mixtral-8x7b',
|
|
26
|
+
"gemma-2": 'google/gemma-2',
|
|
27
|
+
"sonar-online": 'perplexity/sonar-online',
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
is_conversation: bool = True,
|
|
34
|
+
max_tokens: int = 2049,
|
|
35
|
+
timeout: int = 30,
|
|
36
|
+
intro: str = None,
|
|
37
|
+
filepath: str = None,
|
|
38
|
+
update_file: bool = True,
|
|
39
|
+
proxies: dict = {},
|
|
40
|
+
history_offset: int = 10250,
|
|
41
|
+
act: str = None,
|
|
42
|
+
model: str = "sonar-online",
|
|
43
|
+
):
|
|
44
|
+
"""Initializes the ChatHub API client."""
|
|
45
|
+
self.url = "https://app.chathub.gg"
|
|
46
|
+
self.api_endpoint = "https://app.chathub.gg/api/v3/chat/completions"
|
|
47
|
+
self.headers = {
|
|
48
|
+
'Accept': '*/*',
|
|
49
|
+
'Accept-Language': 'en-US,en;q=0.9',
|
|
50
|
+
'Content-Type': 'application/json',
|
|
51
|
+
'Origin': self.url,
|
|
52
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
|
|
53
|
+
'X-App-Id': 'web'
|
|
54
|
+
}
|
|
55
|
+
self.session = requests.Session()
|
|
56
|
+
self.session.headers.update(self.headers)
|
|
57
|
+
self.session.proxies.update(proxies)
|
|
58
|
+
self.timeout = timeout
|
|
59
|
+
self.last_response = {}
|
|
60
|
+
|
|
61
|
+
self.is_conversation = is_conversation
|
|
62
|
+
self.max_tokens_to_sample = max_tokens
|
|
63
|
+
self.__available_optimizers = (
|
|
64
|
+
method
|
|
65
|
+
for method in dir(Optimizers)
|
|
66
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
67
|
+
)
|
|
68
|
+
Conversation.intro = (
|
|
69
|
+
AwesomePrompts().get_act(
|
|
70
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
71
|
+
)
|
|
72
|
+
if act
|
|
73
|
+
else intro or Conversation.intro
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
self.conversation = Conversation(
|
|
77
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
78
|
+
)
|
|
79
|
+
self.conversation.history_offset = history_offset
|
|
80
|
+
|
|
81
|
+
#Resolve the model
|
|
82
|
+
self.model = self.get_model(model)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def get_model(self, model: str) -> str:
|
|
86
|
+
"""
|
|
87
|
+
Resolves the model name using aliases or defaults.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
if model in self.AVAILABLE_MODELS:
|
|
91
|
+
return model
|
|
92
|
+
elif model in self.model_aliases:
|
|
93
|
+
return self.model_aliases[model]
|
|
94
|
+
else:
|
|
95
|
+
print(f"Model '{model}' not found. Using default model '{self.default_model}'.")
|
|
96
|
+
return self.default_model # Use class-level default
|
|
97
|
+
|
|
98
|
+
def ask(
|
|
99
|
+
self,
|
|
100
|
+
prompt: str,
|
|
101
|
+
stream: bool = False,
|
|
102
|
+
raw: bool = False,
|
|
103
|
+
optimizer: str = None,
|
|
104
|
+
conversationally: bool = False,
|
|
105
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
106
|
+
|
|
107
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
108
|
+
if optimizer:
|
|
109
|
+
if optimizer in self.__available_optimizers:
|
|
110
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
111
|
+
conversation_prompt if conversationally else prompt
|
|
112
|
+
)
|
|
113
|
+
else:
|
|
114
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
data = {
|
|
118
|
+
"model": self.model,
|
|
119
|
+
"messages": [{"role": "user", "content": conversation_prompt}],
|
|
120
|
+
"tools": []
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
# Set the Referer header dynamically based on the resolved model
|
|
124
|
+
self.headers['Referer'] = f"{self.url}/chat/{self.model}"
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def for_stream():
|
|
128
|
+
try:
|
|
129
|
+
with requests.post(self.api_endpoint, headers=self.headers, json=data, stream=True, timeout=self.timeout) as response:
|
|
130
|
+
response.raise_for_status()
|
|
131
|
+
streaming_text = ""
|
|
132
|
+
|
|
133
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
134
|
+
if line:
|
|
135
|
+
decoded_line = line.strip()
|
|
136
|
+
if decoded_line.startswith('data:'):
|
|
137
|
+
data_str = decoded_line[5:].strip()
|
|
138
|
+
if data_str == '[DONE]':
|
|
139
|
+
break
|
|
140
|
+
try:
|
|
141
|
+
data_json = json.loads(data_str)
|
|
142
|
+
text_delta = data_json.get('textDelta')
|
|
143
|
+
if text_delta:
|
|
144
|
+
streaming_text += text_delta
|
|
145
|
+
resp = dict(text=text_delta)
|
|
146
|
+
yield resp if raw else resp
|
|
147
|
+
|
|
148
|
+
except json.JSONDecodeError:
|
|
149
|
+
continue
|
|
150
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
151
|
+
self.last_response.update({"text": streaming_text})
|
|
152
|
+
except requests.exceptions.RequestException as e:
|
|
153
|
+
raise exceptions.FailedToGenerateResponseError(f"Request error: {e}")
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def for_non_stream():
|
|
157
|
+
for _ in for_stream():
|
|
158
|
+
pass
|
|
159
|
+
return self.last_response
|
|
160
|
+
|
|
161
|
+
return for_stream() if stream else for_non_stream()
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def chat(
|
|
167
|
+
self,
|
|
168
|
+
prompt: str,
|
|
169
|
+
stream: bool = False,
|
|
170
|
+
optimizer: str = None,
|
|
171
|
+
conversationally: bool = False,
|
|
172
|
+
) -> Union[str, Generator]:
|
|
173
|
+
"""Generate response `str`"""
|
|
174
|
+
|
|
175
|
+
def for_stream():
|
|
176
|
+
for response in self.ask(
|
|
177
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
178
|
+
):
|
|
179
|
+
yield self.get_message(response)
|
|
180
|
+
|
|
181
|
+
def for_non_stream():
|
|
182
|
+
return self.get_message(
|
|
183
|
+
self.ask(
|
|
184
|
+
prompt,
|
|
185
|
+
stream=False, # Pass stream=False
|
|
186
|
+
optimizer=optimizer,
|
|
187
|
+
conversationally=conversationally,
|
|
188
|
+
)
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
return for_stream() if stream else for_non_stream()
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def get_message(self, response: dict) -> str:
|
|
196
|
+
"""Retrieves message only from response"""
|
|
197
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
198
|
+
return response.get("text", "")
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
if __name__ == "__main__":
|
|
202
|
+
from rich import print
|
|
203
|
+
bot = ChatHub()
|
|
204
|
+
try:
|
|
205
|
+
response = bot.chat("who is Abhay koul in AI", stream=True)
|
|
206
|
+
for chunk in response:
|
|
207
|
+
print(chunk, end="", flush=True)
|
|
208
|
+
except Exception as e:
|
|
209
|
+
print(f"An error occurred: {e}")
|
webscout/Provider/Chatify.py
CHANGED
|
@@ -115,7 +115,7 @@ class Chatify(Provider):
|
|
|
115
115
|
if len(parts) > 1:
|
|
116
116
|
content = parts[1].strip().strip('"')
|
|
117
117
|
streaming_text += content
|
|
118
|
-
yield content if raw else dict(text=
|
|
118
|
+
yield content if raw else dict(text=content)
|
|
119
119
|
self.last_response.update(dict(text=streaming_text))
|
|
120
120
|
self.conversation.update_chat_history(
|
|
121
121
|
prompt, self.get_message(self.last_response)
|
|
@@ -169,7 +169,7 @@ class Chatify(Provider):
|
|
|
169
169
|
if __name__ == "__main__":
|
|
170
170
|
from rich import print
|
|
171
171
|
|
|
172
|
-
ai = Chatify()
|
|
173
|
-
response = ai.chat("
|
|
172
|
+
ai = Chatify(timeout=5000)
|
|
173
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
174
174
|
for chunk in response:
|
|
175
175
|
print(chunk, end="", flush=True)
|
webscout/Provider/Cloudflare.py
CHANGED
|
@@ -194,7 +194,7 @@ class Cloudflare(Provider):
|
|
|
194
194
|
data = json.loads(line[6:])
|
|
195
195
|
content = data.get('response', '')
|
|
196
196
|
streaming_response += content
|
|
197
|
-
yield content if raw else dict(text=
|
|
197
|
+
yield content if raw else dict(text=content)
|
|
198
198
|
self.last_response.update(dict(text=streaming_response))
|
|
199
199
|
self.conversation.update_chat_history(
|
|
200
200
|
prompt, self.get_message(self.last_response)
|
|
@@ -255,7 +255,7 @@ class Cloudflare(Provider):
|
|
|
255
255
|
return response["text"]
|
|
256
256
|
if __name__ == '__main__':
|
|
257
257
|
from rich import print
|
|
258
|
-
ai = Cloudflare()
|
|
259
|
-
response = ai.chat("
|
|
258
|
+
ai = Cloudflare(timeout=5000)
|
|
259
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
260
260
|
for chunk in response:
|
|
261
261
|
print(chunk, end="", flush=True)
|
webscout/Provider/DARKAI.py
CHANGED
|
@@ -156,7 +156,7 @@ class DARKAI(Provider):
|
|
|
156
156
|
if event.get("event") == "final-response":
|
|
157
157
|
message = event['data'].get('message', '')
|
|
158
158
|
streaming_response += message
|
|
159
|
-
yield message if raw else dict(text=
|
|
159
|
+
yield message if raw else dict(text=message)
|
|
160
160
|
except json.decoder.JSONDecodeError:
|
|
161
161
|
continue
|
|
162
162
|
self.last_response.update(dict(text=streaming_response))
|