webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +13 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +4 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +6 -8
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +52 -57
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -56
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +12 -6
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +9 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -14
- webscout/Provider/OPENAI/toolbaz.py +14 -10
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +10 -9
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +18 -11
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -15
- webscout/Provider/TogetherAI.py +136 -142
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -174
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +194 -38
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -11
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/conversation.py +22 -20
- webscout/sanitize.py +14 -10
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/copilot.py +0 -305
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -422
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
|
@@ -1,378 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import json
|
|
4
|
-
from datetime import datetime
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import base classes and utility structures
|
|
8
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
-
from .utils import (
|
|
10
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage,
|
|
12
|
-
format_prompt, count_tokens
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
# Import curl_cffi for Cloudflare bypass
|
|
16
|
-
from curl_cffi.requests import Session
|
|
17
|
-
from curl_cffi import CurlError
|
|
18
|
-
|
|
19
|
-
# Import LitAgent for user agent generation
|
|
20
|
-
from webscout.litagent import LitAgent
|
|
21
|
-
|
|
22
|
-
# ANSI escape codes for formatting
|
|
23
|
-
BOLD = "\033[1m"
|
|
24
|
-
RED = "\033[91m"
|
|
25
|
-
RESET = "\033[0m"
|
|
26
|
-
|
|
27
|
-
# Model configurations
|
|
28
|
-
MODEL_CONFIGS = {
|
|
29
|
-
"llama": {
|
|
30
|
-
"endpoint": "https://www.multichatai.com/api/chat/meta",
|
|
31
|
-
"models": {
|
|
32
|
-
"llama-3.3-70b-versatile": {"contextLength": 131072},
|
|
33
|
-
"llama-3.2-11b-vision-preview": {"contextLength": 32768},
|
|
34
|
-
"deepseek-r1-distill-llama-70b": {"contextLength": 128000},
|
|
35
|
-
},
|
|
36
|
-
},
|
|
37
|
-
"cohere": {
|
|
38
|
-
"endpoint": "https://www.multichatai.com/api/chat/cohere",
|
|
39
|
-
"models": {
|
|
40
|
-
"command-r": {"contextLength": 128000},
|
|
41
|
-
"command": {"contextLength": 4096},
|
|
42
|
-
},
|
|
43
|
-
},
|
|
44
|
-
"google": {
|
|
45
|
-
"endpoint": "https://www.multichatai.com/api/chat/google",
|
|
46
|
-
"models": {
|
|
47
|
-
"gemini-1.5-flash-002": {"contextLength": 1048576},
|
|
48
|
-
"gemma2-9b-it": {"contextLength": 8192},
|
|
49
|
-
"gemini-2.0-flash": {"contextLength": 128000},
|
|
50
|
-
},
|
|
51
|
-
"message_format": "parts",
|
|
52
|
-
},
|
|
53
|
-
"deepinfra": {
|
|
54
|
-
"endpoint": "https://www.multichatai.com/api/chat/deepinfra",
|
|
55
|
-
"models": {
|
|
56
|
-
"Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
|
|
57
|
-
"Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
|
|
58
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
|
|
59
|
-
"deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
|
|
60
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct": {"contextLength": 131072},
|
|
61
|
-
"NousResearch/Hermes-3-Llama-3.1-405B": {"contextLength": 131072},
|
|
62
|
-
"gemma-2-27b-it": {"contextLength": 8192},
|
|
63
|
-
},
|
|
64
|
-
},
|
|
65
|
-
"mistral": {
|
|
66
|
-
"endpoint": "https://www.multichatai.com/api/chat/mistral",
|
|
67
|
-
"models": {
|
|
68
|
-
"mistral-small-latest": {"contextLength": 32000},
|
|
69
|
-
"codestral-latest": {"contextLength": 32000},
|
|
70
|
-
"open-mistral-7b": {"contextLength": 8000},
|
|
71
|
-
"open-mixtral-8x7b": {"contextLength": 8000},
|
|
72
|
-
},
|
|
73
|
-
},
|
|
74
|
-
"alibaba": {
|
|
75
|
-
"endpoint": "https://www.multichatai.com/api/chat/alibaba",
|
|
76
|
-
"models": {
|
|
77
|
-
"Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
|
|
78
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
|
|
79
|
-
"Qwen/QwQ-32B-Preview": {"contextLength": 32768},
|
|
80
|
-
},
|
|
81
|
-
},
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
class Completions(BaseCompletions):
|
|
85
|
-
def __init__(self, client: 'MultiChatAI'):
|
|
86
|
-
self._client = client
|
|
87
|
-
|
|
88
|
-
def create(
|
|
89
|
-
self,
|
|
90
|
-
*,
|
|
91
|
-
model: str,
|
|
92
|
-
messages: List[Dict[str, str]],
|
|
93
|
-
max_tokens: Optional[int] = None,
|
|
94
|
-
stream: bool = False,
|
|
95
|
-
temperature: Optional[float] = None,
|
|
96
|
-
top_p: Optional[float] = None,
|
|
97
|
-
timeout: Optional[int] = None,
|
|
98
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
99
|
-
**kwargs: Any
|
|
100
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
101
|
-
"""
|
|
102
|
-
Create a chat completion using the MultiChatAI API.
|
|
103
|
-
|
|
104
|
-
Args:
|
|
105
|
-
model: The model to use
|
|
106
|
-
messages: A list of messages in the conversation
|
|
107
|
-
max_tokens: Maximum number of tokens to generate
|
|
108
|
-
stream: Whether to stream the response
|
|
109
|
-
temperature: Temperature for response generation
|
|
110
|
-
top_p: Top-p sampling parameter
|
|
111
|
-
|
|
112
|
-
Returns:
|
|
113
|
-
Either a ChatCompletion object or a generator of ChatCompletionChunk objects
|
|
114
|
-
"""
|
|
115
|
-
try:
|
|
116
|
-
# Set client parameters based on function arguments
|
|
117
|
-
self._client.model = model
|
|
118
|
-
if temperature is not None:
|
|
119
|
-
self._client.temperature = temperature
|
|
120
|
-
if max_tokens is not None:
|
|
121
|
-
self._client.max_tokens_to_sample = max_tokens
|
|
122
|
-
|
|
123
|
-
# Extract system messages and set as system prompt
|
|
124
|
-
for message in messages:
|
|
125
|
-
if message.get("role") == "system":
|
|
126
|
-
self._client.system_prompt = message.get("content", "")
|
|
127
|
-
break
|
|
128
|
-
|
|
129
|
-
# Format all messages into a single prompt
|
|
130
|
-
user_message = format_prompt(messages)
|
|
131
|
-
|
|
132
|
-
# Generate a unique request ID
|
|
133
|
-
request_id = f"multichat-{str(uuid.uuid4())}"
|
|
134
|
-
created_time = int(time.time())
|
|
135
|
-
|
|
136
|
-
# Make the API request
|
|
137
|
-
response_text = self._client._make_api_request(user_message, timeout=timeout, proxies=proxies)
|
|
138
|
-
|
|
139
|
-
# If streaming is requested, simulate streaming with the full response
|
|
140
|
-
if stream:
|
|
141
|
-
def generate_chunks():
|
|
142
|
-
# Create a single chunk with the full response
|
|
143
|
-
delta = ChoiceDelta(content=response_text)
|
|
144
|
-
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
145
|
-
chunk = ChatCompletionChunk(
|
|
146
|
-
id=request_id,
|
|
147
|
-
choices=[choice],
|
|
148
|
-
created=created_time,
|
|
149
|
-
model=model,
|
|
150
|
-
)
|
|
151
|
-
yield chunk
|
|
152
|
-
|
|
153
|
-
return generate_chunks()
|
|
154
|
-
|
|
155
|
-
# For non-streaming, create a complete response
|
|
156
|
-
message = ChatCompletionMessage(role="assistant", content=response_text)
|
|
157
|
-
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
158
|
-
|
|
159
|
-
# Estimate token usage using count_tokens
|
|
160
|
-
prompt_tokens = count_tokens(user_message)
|
|
161
|
-
completion_tokens = count_tokens(response_text)
|
|
162
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
163
|
-
|
|
164
|
-
usage = CompletionUsage(
|
|
165
|
-
prompt_tokens=prompt_tokens,
|
|
166
|
-
completion_tokens=completion_tokens,
|
|
167
|
-
total_tokens=total_tokens
|
|
168
|
-
)
|
|
169
|
-
|
|
170
|
-
# Create the completion object
|
|
171
|
-
completion = ChatCompletion(
|
|
172
|
-
id=request_id,
|
|
173
|
-
choices=[choice],
|
|
174
|
-
created=created_time,
|
|
175
|
-
model=model,
|
|
176
|
-
usage=usage,
|
|
177
|
-
)
|
|
178
|
-
|
|
179
|
-
return completion
|
|
180
|
-
|
|
181
|
-
except Exception as e:
|
|
182
|
-
print(f"{RED}Error during MultiChatAI request: {e}{RESET}")
|
|
183
|
-
raise IOError(f"MultiChatAI request failed: {e}") from e
|
|
184
|
-
|
|
185
|
-
class Chat(BaseChat):
|
|
186
|
-
def __init__(self, client: 'MultiChatAI'):
|
|
187
|
-
self.completions = Completions(client)
|
|
188
|
-
|
|
189
|
-
class MultiChatAI(OpenAICompatibleProvider):
|
|
190
|
-
"""
|
|
191
|
-
OpenAI-compatible client for MultiChatAI API.
|
|
192
|
-
|
|
193
|
-
Usage:
|
|
194
|
-
client = MultiChatAI()
|
|
195
|
-
response = client.chat.completions.create(
|
|
196
|
-
model="llama-3.3-70b-versatile",
|
|
197
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
198
|
-
)
|
|
199
|
-
print(response.choices[0].message.content)
|
|
200
|
-
"""
|
|
201
|
-
|
|
202
|
-
AVAILABLE_MODELS = [
|
|
203
|
-
# Llama Models
|
|
204
|
-
"llama-3.3-70b-versatile",
|
|
205
|
-
"llama-3.2-11b-vision-preview",
|
|
206
|
-
"deepseek-r1-distill-llama-70b",
|
|
207
|
-
|
|
208
|
-
# Google Models
|
|
209
|
-
"gemma2-9b-it",
|
|
210
|
-
"gemini-2.0-flash",
|
|
211
|
-
|
|
212
|
-
# DeepInfra Models
|
|
213
|
-
"Sao10K/L3.1-70B-Euryale-v2.2",
|
|
214
|
-
"Gryphe/MythoMax-L2-13b",
|
|
215
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
216
|
-
"deepseek-ai/DeepSeek-V3",
|
|
217
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct",
|
|
218
|
-
"NousResearch/Hermes-3-Llama-3.1-405B",
|
|
219
|
-
|
|
220
|
-
# Alibaba Models
|
|
221
|
-
"Qwen/Qwen2.5-72B-Instruct",
|
|
222
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
223
|
-
"Qwen/QwQ-32B-Preview"
|
|
224
|
-
]
|
|
225
|
-
|
|
226
|
-
def __init__(
|
|
227
|
-
self,
|
|
228
|
-
timeout: int = 30,
|
|
229
|
-
proxies: dict = {},
|
|
230
|
-
model: str = "llama-3.3-70b-versatile",
|
|
231
|
-
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
232
|
-
temperature: float = 0.5,
|
|
233
|
-
max_tokens: int = 4000
|
|
234
|
-
):
|
|
235
|
-
"""
|
|
236
|
-
Initialize the MultiChatAI client.
|
|
237
|
-
|
|
238
|
-
Args:
|
|
239
|
-
timeout: Request timeout in seconds
|
|
240
|
-
proxies: Optional proxy configuration
|
|
241
|
-
model: Default model to use
|
|
242
|
-
system_prompt: System prompt to use
|
|
243
|
-
temperature: Temperature for response generation
|
|
244
|
-
max_tokens: Maximum number of tokens to generate
|
|
245
|
-
"""
|
|
246
|
-
if model not in self.AVAILABLE_MODELS:
|
|
247
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
248
|
-
|
|
249
|
-
# Initialize curl_cffi Session
|
|
250
|
-
self.session = Session()
|
|
251
|
-
self.timeout = timeout
|
|
252
|
-
self.model = model
|
|
253
|
-
self.system_prompt = system_prompt
|
|
254
|
-
self.temperature = temperature
|
|
255
|
-
self.max_tokens_to_sample = max_tokens
|
|
256
|
-
|
|
257
|
-
# Initialize LitAgent for user agent generation
|
|
258
|
-
self.agent = LitAgent()
|
|
259
|
-
|
|
260
|
-
self.headers = {
|
|
261
|
-
"accept": "*/*",
|
|
262
|
-
"accept-language": "en-US,en;q=0.9",
|
|
263
|
-
"content-type": "text/plain;charset=UTF-8",
|
|
264
|
-
"origin": "https://www.multichatai.com",
|
|
265
|
-
"referer": "https://www.multichatai.com/",
|
|
266
|
-
"user-agent": self.agent.random(),
|
|
267
|
-
}
|
|
268
|
-
|
|
269
|
-
# Update curl_cffi session headers, proxies, and cookies
|
|
270
|
-
self.session.headers.update(self.headers)
|
|
271
|
-
self.session.proxies = proxies
|
|
272
|
-
self.session.cookies.set("session", uuid.uuid4().hex)
|
|
273
|
-
|
|
274
|
-
# Initialize the provider based on the model
|
|
275
|
-
self.provider = self._get_provider_from_model(self.model)
|
|
276
|
-
self.model_name = self.model
|
|
277
|
-
|
|
278
|
-
# Initialize the chat interface
|
|
279
|
-
self.chat = Chat(self)
|
|
280
|
-
|
|
281
|
-
@property
|
|
282
|
-
def models(self):
|
|
283
|
-
class _ModelList:
|
|
284
|
-
def list(inner_self):
|
|
285
|
-
return type(self).AVAILABLE_MODELS
|
|
286
|
-
return _ModelList()
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
def _get_endpoint(self) -> str:
|
|
290
|
-
"""Get the API endpoint for the current provider."""
|
|
291
|
-
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
292
|
-
|
|
293
|
-
def _get_chat_settings(self) -> Dict[str, Any]:
|
|
294
|
-
"""Get chat settings for the current model."""
|
|
295
|
-
base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
|
|
296
|
-
return {
|
|
297
|
-
"model": self.model,
|
|
298
|
-
"prompt": self.system_prompt,
|
|
299
|
-
"temperature": self.temperature,
|
|
300
|
-
"contextLength": base_settings["contextLength"],
|
|
301
|
-
"includeProfileContext": True,
|
|
302
|
-
"includeWorkspaceInstructions": True,
|
|
303
|
-
"embeddingsProvider": "openai"
|
|
304
|
-
}
|
|
305
|
-
|
|
306
|
-
def _get_system_message(self) -> str:
|
|
307
|
-
"""Generate system message with current date."""
|
|
308
|
-
current_date = datetime.now().strftime("%d/%m/%Y")
|
|
309
|
-
return f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
|
|
310
|
-
|
|
311
|
-
def _build_messages(self, conversation_prompt: str) -> list:
|
|
312
|
-
"""Build messages array based on provider type."""
|
|
313
|
-
if self.provider == "google":
|
|
314
|
-
return [
|
|
315
|
-
{"role": "user", "parts": self._get_system_message()},
|
|
316
|
-
{"role": "model", "parts": "I will follow your instructions."},
|
|
317
|
-
{"role": "user", "parts": conversation_prompt}
|
|
318
|
-
]
|
|
319
|
-
else:
|
|
320
|
-
return [
|
|
321
|
-
{"role": "system", "content": self._get_system_message()},
|
|
322
|
-
{"role": "user", "content": conversation_prompt}
|
|
323
|
-
]
|
|
324
|
-
|
|
325
|
-
def _get_provider_from_model(self, model: str) -> str:
|
|
326
|
-
"""Determine the provider based on the model name."""
|
|
327
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
328
|
-
if model in config["models"]:
|
|
329
|
-
return provider
|
|
330
|
-
|
|
331
|
-
available_models = []
|
|
332
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
333
|
-
for model_name in config["models"].keys():
|
|
334
|
-
available_models.append(f"{provider}/{model_name}")
|
|
335
|
-
|
|
336
|
-
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
337
|
-
raise ValueError(error_msg)
|
|
338
|
-
|
|
339
|
-
def _make_api_request(self, prompt: str) -> str:
|
|
340
|
-
"""Make the API request with proper error handling."""
|
|
341
|
-
try:
|
|
342
|
-
payload = {
|
|
343
|
-
"chatSettings": self._get_chat_settings(),
|
|
344
|
-
"messages": self._build_messages(prompt),
|
|
345
|
-
"customModelId": "",
|
|
346
|
-
}
|
|
347
|
-
|
|
348
|
-
# Use curl_cffi session post with impersonate
|
|
349
|
-
response = self.session.post(
|
|
350
|
-
self._get_endpoint(),
|
|
351
|
-
json=payload,
|
|
352
|
-
timeout=self.timeout,
|
|
353
|
-
impersonate="chrome110"
|
|
354
|
-
)
|
|
355
|
-
response.raise_for_status()
|
|
356
|
-
|
|
357
|
-
# Return the response text
|
|
358
|
-
return response.text.strip()
|
|
359
|
-
|
|
360
|
-
except CurlError as e:
|
|
361
|
-
raise IOError(f"API request failed (CurlError): {e}") from e
|
|
362
|
-
except Exception as e:
|
|
363
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
364
|
-
raise IOError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
365
|
-
|
|
366
|
-
if __name__ == "__main__":
|
|
367
|
-
print(f"{BOLD}Testing MultiChatAI OpenAI-compatible provider{RESET}")
|
|
368
|
-
|
|
369
|
-
client = MultiChatAI()
|
|
370
|
-
response = client.chat.completions.create(
|
|
371
|
-
model="llama-3.3-70b-versatile",
|
|
372
|
-
messages=[
|
|
373
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
|
374
|
-
{"role": "user", "content": "Say 'Hello' in one word"}
|
|
375
|
-
]
|
|
376
|
-
)
|
|
377
|
-
|
|
378
|
-
print(f"Response: {response.choices[0].message.content}")
|
webscout/Provider/Reka.py
DELETED
|
@@ -1,214 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
import json
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
-
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
|
|
12
|
-
#-----------------------------------------------REKA-----------------------------------------------
|
|
13
|
-
class REKA(Provider):
|
|
14
|
-
def __init__(
|
|
15
|
-
self,
|
|
16
|
-
api_key: str,
|
|
17
|
-
is_conversation: bool = True,
|
|
18
|
-
max_tokens: int = 600,
|
|
19
|
-
timeout: int = 30,
|
|
20
|
-
intro: str = None,
|
|
21
|
-
filepath: str = None,
|
|
22
|
-
update_file: bool = True,
|
|
23
|
-
proxies: dict = {},
|
|
24
|
-
history_offset: int = 10250,
|
|
25
|
-
act: str = None,
|
|
26
|
-
model: str = "reka-core",
|
|
27
|
-
system_prompt: str = "Be Helpful and Friendly. Keep your response straightforward, short and concise",
|
|
28
|
-
use_search_engine: bool = False,
|
|
29
|
-
use_code_interpreter: bool = False,
|
|
30
|
-
):
|
|
31
|
-
"""Instantiates REKA
|
|
32
|
-
|
|
33
|
-
Args:
|
|
34
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
|
|
35
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
36
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
37
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
38
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
39
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
40
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
41
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
42
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
43
|
-
model (str, optional): REKA model name. Defaults to "reka-core".
|
|
44
|
-
system_prompt (str, optional): System prompt for REKA. Defaults to "Be Helpful and Friendly. Keep your response straightforward, short and concise".
|
|
45
|
-
use_search_engine (bool, optional): Whether to use the search engine. Defaults to False.
|
|
46
|
-
use_code_interpreter (bool, optional): Whether to use the code interpreter. Defaults to False.
|
|
47
|
-
"""
|
|
48
|
-
self.session = requests.Session()
|
|
49
|
-
self.is_conversation = is_conversation
|
|
50
|
-
self.max_tokens_to_sample = max_tokens
|
|
51
|
-
self.api_endpoint = "https://chat.reka.ai/api/chat"
|
|
52
|
-
self.stream_chunk_size = 64
|
|
53
|
-
self.timeout = timeout
|
|
54
|
-
self.last_response = {}
|
|
55
|
-
self.model = model
|
|
56
|
-
self.system_prompt = system_prompt
|
|
57
|
-
self.use_search_engine = use_search_engine
|
|
58
|
-
self.use_code_interpreter = use_code_interpreter
|
|
59
|
-
self.access_token = api_key
|
|
60
|
-
self.headers = {
|
|
61
|
-
"Authorization": f"Bearer {self.access_token}",
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
self.__available_optimizers = (
|
|
65
|
-
method
|
|
66
|
-
for method in dir(Optimizers)
|
|
67
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
68
|
-
)
|
|
69
|
-
self.session.headers.update(self.headers)
|
|
70
|
-
Conversation.intro = (
|
|
71
|
-
AwesomePrompts().get_act(
|
|
72
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
73
|
-
)
|
|
74
|
-
if act
|
|
75
|
-
else intro or Conversation.intro
|
|
76
|
-
)
|
|
77
|
-
self.conversation = Conversation(
|
|
78
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
79
|
-
)
|
|
80
|
-
self.conversation.history_offset = history_offset
|
|
81
|
-
self.session.proxies = proxies
|
|
82
|
-
|
|
83
|
-
def ask(
|
|
84
|
-
self,
|
|
85
|
-
prompt: str,
|
|
86
|
-
stream: bool = False,
|
|
87
|
-
raw: bool = False,
|
|
88
|
-
optimizer: str = None,
|
|
89
|
-
conversationally: bool = False,
|
|
90
|
-
) -> dict:
|
|
91
|
-
"""Chat with AI
|
|
92
|
-
|
|
93
|
-
Args:
|
|
94
|
-
prompt (str): Prompt to be send.
|
|
95
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
96
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
97
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
98
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
99
|
-
Returns:
|
|
100
|
-
dict : {}
|
|
101
|
-
```json
|
|
102
|
-
{
|
|
103
|
-
"text" : "How may I assist you today?"
|
|
104
|
-
}
|
|
105
|
-
```
|
|
106
|
-
"""
|
|
107
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
108
|
-
if optimizer:
|
|
109
|
-
if optimizer in self.__available_optimizers:
|
|
110
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
111
|
-
conversation_prompt if conversationally else prompt
|
|
112
|
-
)
|
|
113
|
-
else:
|
|
114
|
-
raise Exception(
|
|
115
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
self.session.headers.update(self.headers)
|
|
119
|
-
payload = {
|
|
120
|
-
|
|
121
|
-
"conversation_history": [
|
|
122
|
-
{"type": "human", "text": f"## SYSTEM PROMPT: {self.system_prompt}\n\n## QUERY: {conversation_prompt}"},
|
|
123
|
-
],
|
|
124
|
-
|
|
125
|
-
"stream": stream,
|
|
126
|
-
"use_search_engine": self.use_search_engine,
|
|
127
|
-
"use_code_interpreter": self.use_code_interpreter,
|
|
128
|
-
"model_name": self.model,
|
|
129
|
-
# "model_name": "reka-flash",
|
|
130
|
-
# "model_name": "reka-edge",
|
|
131
|
-
}
|
|
132
|
-
|
|
133
|
-
def for_stream():
|
|
134
|
-
response = self.session.post(self.api_endpoint, json=payload, stream=True, timeout=self.timeout)
|
|
135
|
-
if not response.ok:
|
|
136
|
-
raise Exception(
|
|
137
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
138
|
-
)
|
|
139
|
-
|
|
140
|
-
for value in response.iter_lines(
|
|
141
|
-
decode_unicode=True,
|
|
142
|
-
chunk_size=self.stream_chunk_size,
|
|
143
|
-
):
|
|
144
|
-
try:
|
|
145
|
-
resp = json.loads(value)
|
|
146
|
-
self.last_response.update(resp)
|
|
147
|
-
yield value if raw else resp
|
|
148
|
-
except json.decoder.JSONDecodeError:
|
|
149
|
-
pass
|
|
150
|
-
self.conversation.update_chat_history(
|
|
151
|
-
prompt, self.get_message(self.last_response)
|
|
152
|
-
)
|
|
153
|
-
|
|
154
|
-
def for_non_stream():
|
|
155
|
-
# let's make use of stream
|
|
156
|
-
for _ in for_stream():
|
|
157
|
-
pass
|
|
158
|
-
return self.last_response
|
|
159
|
-
|
|
160
|
-
return for_stream() if stream else for_non_stream()
|
|
161
|
-
|
|
162
|
-
def chat(
|
|
163
|
-
self,
|
|
164
|
-
prompt: str,
|
|
165
|
-
stream: bool = False,
|
|
166
|
-
optimizer: str = None,
|
|
167
|
-
conversationally: bool = False,
|
|
168
|
-
) -> str:
|
|
169
|
-
"""Generate response `str`
|
|
170
|
-
Args:
|
|
171
|
-
prompt (str): Prompt to be send.
|
|
172
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
173
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
174
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
175
|
-
Returns:
|
|
176
|
-
str: Response generated
|
|
177
|
-
"""
|
|
178
|
-
|
|
179
|
-
def for_stream():
|
|
180
|
-
for response in self.ask(
|
|
181
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
182
|
-
):
|
|
183
|
-
yield self.get_message(response)
|
|
184
|
-
|
|
185
|
-
def for_non_stream():
|
|
186
|
-
return self.get_message(
|
|
187
|
-
self.ask(
|
|
188
|
-
prompt,
|
|
189
|
-
False,
|
|
190
|
-
optimizer=optimizer,
|
|
191
|
-
conversationally=conversationally,
|
|
192
|
-
)
|
|
193
|
-
)
|
|
194
|
-
|
|
195
|
-
return for_stream() if stream else for_non_stream()
|
|
196
|
-
|
|
197
|
-
def get_message(self, response: dict) -> str:
|
|
198
|
-
"""Retrieves message only from response
|
|
199
|
-
|
|
200
|
-
Args:
|
|
201
|
-
response (dict): Response generated by `self.ask`
|
|
202
|
-
|
|
203
|
-
Returns:
|
|
204
|
-
str: Message extracted
|
|
205
|
-
"""
|
|
206
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
207
|
-
return response.get("text")
|
|
208
|
-
if __name__ == "__main__":
|
|
209
|
-
|
|
210
|
-
from rich import print
|
|
211
|
-
ai = REKA(api_key="YOUR_API_KEY", timeout=5000)
|
|
212
|
-
response = ai.chat("write a poem about AI", stream=True)
|
|
213
|
-
for chunk in response:
|
|
214
|
-
print(chunk, end="", flush=True)
|
webscout/Provider/TTS/sthir.py
DELETED
|
@@ -1,94 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import requests
|
|
3
|
-
import pathlib
|
|
4
|
-
import tempfile
|
|
5
|
-
from io import BytesIO
|
|
6
|
-
from webscout import exceptions
|
|
7
|
-
from webscout.litagent import LitAgent
|
|
8
|
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
9
|
-
from webscout.Provider.TTS import utils
|
|
10
|
-
from webscout.Provider.TTS.base import BaseTTSProvider
|
|
11
|
-
|
|
12
|
-
class SthirTTS(BaseTTSProvider):
|
|
13
|
-
"""
|
|
14
|
-
Text-to-speech provider using the Sthir.org TTS API.
|
|
15
|
-
"""
|
|
16
|
-
headers = {
|
|
17
|
-
"Content-Type": "application/json",
|
|
18
|
-
"User-Agent": LitAgent().random(),
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
all_voices = {
|
|
22
|
-
"aura-luna-en": "Sophie (American, Feminine)",
|
|
23
|
-
"aura-stella-en": "Isabella (American, Feminine)",
|
|
24
|
-
"aura-athena-en": "Emma (British, Feminine)",
|
|
25
|
-
"aura-hera-en": "Victoria (American, Feminine)",
|
|
26
|
-
"aura-asteria-en": "Maria (American, Feminine)",
|
|
27
|
-
"aura-arcas-en": "Alex (American, Masculine)",
|
|
28
|
-
"aura-zeus-en": "Thomas (American, Masculine)",
|
|
29
|
-
"aura-perseus-en": "Michael (American, Masculine)",
|
|
30
|
-
"aura-angus-en": "Connor (Irish, Masculine)",
|
|
31
|
-
"aura-orpheus-en": "James (American, Masculine)",
|
|
32
|
-
"aura-helios-en": "William (British, Masculine)",
|
|
33
|
-
"aura-orion-en": "Daniel (American, Masculine)",
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
def __init__(self, timeout: int = 20, proxies: dict = None):
|
|
37
|
-
"""Initializes the SthirTTS client."""
|
|
38
|
-
super().__init__()
|
|
39
|
-
self.api_url = "https://sthir.org/com.api/tts-api.php"
|
|
40
|
-
self.session = requests.Session()
|
|
41
|
-
self.session.headers.update(self.headers)
|
|
42
|
-
if proxies:
|
|
43
|
-
self.session.proxies.update(proxies)
|
|
44
|
-
self.timeout = timeout
|
|
45
|
-
|
|
46
|
-
def tts(self, text: str, voice: str = "aura-luna-en") -> str:
|
|
47
|
-
"""
|
|
48
|
-
Converts text to speech using the Sthir.org API and saves it to a file.
|
|
49
|
-
|
|
50
|
-
Args:
|
|
51
|
-
text (str): The text to convert to speech
|
|
52
|
-
voice (str): The voice to use for TTS (default: "aura-luna-en")
|
|
53
|
-
|
|
54
|
-
Returns:
|
|
55
|
-
str: Path to the generated audio file
|
|
56
|
-
|
|
57
|
-
Raises:
|
|
58
|
-
exceptions.FailedToGenerateResponseError: If there is an error generating or saving the audio.
|
|
59
|
-
"""
|
|
60
|
-
assert (
|
|
61
|
-
voice in self.all_voices
|
|
62
|
-
), f"Voice '{voice}' not one of [{', '.join(self.all_voices.keys())}]"
|
|
63
|
-
|
|
64
|
-
filename = pathlib.Path(tempfile.mktemp(suffix=".mp3", dir=self.temp_dir))
|
|
65
|
-
payload = {"text": text, "voice": voice}
|
|
66
|
-
|
|
67
|
-
try:
|
|
68
|
-
response = self.session.post(
|
|
69
|
-
self.api_url,
|
|
70
|
-
headers=self.headers,
|
|
71
|
-
json=payload,
|
|
72
|
-
timeout=self.timeout
|
|
73
|
-
)
|
|
74
|
-
if response.status_code == 200 and len(response.content) > 0:
|
|
75
|
-
with open(filename, "wb") as f:
|
|
76
|
-
f.write(response.content)
|
|
77
|
-
return filename.as_posix()
|
|
78
|
-
else:
|
|
79
|
-
try:
|
|
80
|
-
error_data = response.json()
|
|
81
|
-
if "error" in error_data:
|
|
82
|
-
raise exceptions.FailedToGenerateResponseError(f"API error: {error_data['error']}")
|
|
83
|
-
except Exception:
|
|
84
|
-
pass
|
|
85
|
-
raise exceptions.FailedToGenerateResponseError(f"Sthir API error: {response.text}")
|
|
86
|
-
except Exception as e:
|
|
87
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to perform the operation: {e}")
|
|
88
|
-
|
|
89
|
-
# Example usage
|
|
90
|
-
if __name__ == "__main__":
|
|
91
|
-
sthir = SthirTTS()
|
|
92
|
-
text = "This is a test of the Sthir.org text-to-speech API. It supports multiple voices."
|
|
93
|
-
audio_file = sthir.tts(text, voice="aura-luna-en")
|
|
94
|
-
print(f"Audio saved to: {audio_file}")
|