webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +13 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +4 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +6 -8
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +52 -57
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -56
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +12 -6
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +9 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -14
- webscout/Provider/OPENAI/toolbaz.py +14 -10
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +10 -9
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +18 -11
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -15
- webscout/Provider/TogetherAI.py +136 -142
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -174
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +194 -38
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -11
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/conversation.py +22 -20
- webscout/sanitize.py +14 -10
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/copilot.py +0 -305
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -422
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
webscout/Provider/monochat.py
DELETED
|
@@ -1,275 +0,0 @@
|
|
|
1
|
-
from typing import Generator, Optional, Union, Any, Dict
|
|
2
|
-
from uuid import uuid4
|
|
3
|
-
from curl_cffi import CurlError
|
|
4
|
-
from curl_cffi.requests import Session
|
|
5
|
-
import re
|
|
6
|
-
|
|
7
|
-
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
9
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
|
-
from webscout.AIbase import Provider
|
|
11
|
-
from webscout import exceptions
|
|
12
|
-
from webscout.litagent import LitAgent
|
|
13
|
-
|
|
14
|
-
class MonoChat(Provider):
|
|
15
|
-
"""
|
|
16
|
-
MonoChat provider for interacting with the gg.is-a-furry.dev API (OpenAI-compatible).
|
|
17
|
-
"""
|
|
18
|
-
AVAILABLE_MODELS = [
|
|
19
|
-
"deepseek-r1",
|
|
20
|
-
"deepseek-v3",
|
|
21
|
-
"uncensored-r1-32b",
|
|
22
|
-
"o3-pro",
|
|
23
|
-
"o4-mini",
|
|
24
|
-
"o3",
|
|
25
|
-
"gpt-4.5-preview",
|
|
26
|
-
"gpt-4.1",
|
|
27
|
-
"gpt-4.1-mini",
|
|
28
|
-
"gpt-4.1-nano",
|
|
29
|
-
"gpt-4o",
|
|
30
|
-
"gpt-4o-mini",
|
|
31
|
-
"gpt-4o-search-preview",
|
|
32
|
-
"gpt-4o-mini-search-preview",
|
|
33
|
-
"gpt-4-turbo"
|
|
34
|
-
]
|
|
35
|
-
|
|
36
|
-
def __init__(
|
|
37
|
-
self,
|
|
38
|
-
is_conversation: bool = True,
|
|
39
|
-
max_tokens: int = 2049,
|
|
40
|
-
timeout: int = 30,
|
|
41
|
-
intro: str = None,
|
|
42
|
-
filepath: str = None,
|
|
43
|
-
update_file: bool = True,
|
|
44
|
-
proxies: dict = {},
|
|
45
|
-
history_offset: int = 10250,
|
|
46
|
-
act: str = None,
|
|
47
|
-
model: str = "gpt-4.1",
|
|
48
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
49
|
-
browser: str = "chrome"
|
|
50
|
-
):
|
|
51
|
-
if model not in self.AVAILABLE_MODELS:
|
|
52
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
53
|
-
self.session = Session()
|
|
54
|
-
self.is_conversation = is_conversation
|
|
55
|
-
self.max_tokens_to_sample = max_tokens
|
|
56
|
-
self.api_endpoint = "https://gg.is-a-furry.dev/api/chat"
|
|
57
|
-
self.timeout = timeout
|
|
58
|
-
self.last_response = {}
|
|
59
|
-
self.model = model
|
|
60
|
-
self.system_prompt = system_prompt
|
|
61
|
-
self.agent = LitAgent()
|
|
62
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
63
|
-
self.headers = {
|
|
64
|
-
"accept": "*/*",
|
|
65
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
66
|
-
"accept-language": self.fingerprint["accept_language"],
|
|
67
|
-
"content-type": "application/json",
|
|
68
|
-
"origin": "https://gg.is-a-furry.dev",
|
|
69
|
-
"referer": "https://gg.is-a-furry.dev/",
|
|
70
|
-
"user-agent": self.fingerprint["user_agent"]
|
|
71
|
-
}
|
|
72
|
-
self.session.headers.update(self.headers)
|
|
73
|
-
self.session.proxies = proxies
|
|
74
|
-
self.__available_optimizers = (
|
|
75
|
-
method
|
|
76
|
-
for method in dir(Optimizers)
|
|
77
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
78
|
-
)
|
|
79
|
-
Conversation.intro = (
|
|
80
|
-
AwesomePrompts().get_act(
|
|
81
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
82
|
-
)
|
|
83
|
-
if act
|
|
84
|
-
else intro or Conversation.intro
|
|
85
|
-
)
|
|
86
|
-
self.conversation = Conversation(
|
|
87
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
88
|
-
)
|
|
89
|
-
self.conversation.history_offset = history_offset
|
|
90
|
-
|
|
91
|
-
def refresh_identity(self, browser: str = None):
|
|
92
|
-
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
93
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
94
|
-
self.headers.update({
|
|
95
|
-
"accept-language": self.fingerprint["accept_language"],
|
|
96
|
-
"user-agent": self.fingerprint["user_agent"]
|
|
97
|
-
})
|
|
98
|
-
self.session.headers.update(self.headers)
|
|
99
|
-
return self.fingerprint
|
|
100
|
-
|
|
101
|
-
def ask(
|
|
102
|
-
self,
|
|
103
|
-
prompt: str,
|
|
104
|
-
stream: bool = False,
|
|
105
|
-
raw: bool = False,
|
|
106
|
-
optimizer: str = None,
|
|
107
|
-
conversationally: bool = False,
|
|
108
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
109
|
-
"""
|
|
110
|
-
Sends a prompt to the gg.is-a-furry.dev API and returns the response.
|
|
111
|
-
|
|
112
|
-
Args:
|
|
113
|
-
prompt (str): The prompt to send to the API.
|
|
114
|
-
stream (bool): Whether to stream the response.
|
|
115
|
-
raw (bool): Whether to return the raw response.
|
|
116
|
-
optimizer (str): Optimizer to use for the prompt.
|
|
117
|
-
conversationally (bool): Whether to generate the prompt conversationally.
|
|
118
|
-
|
|
119
|
-
Returns:
|
|
120
|
-
Dict[str, Any]: The API response.
|
|
121
|
-
"""
|
|
122
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
123
|
-
if optimizer:
|
|
124
|
-
if optimizer in self.__available_optimizers:
|
|
125
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
126
|
-
conversation_prompt if conversationally else prompt
|
|
127
|
-
)
|
|
128
|
-
else:
|
|
129
|
-
raise Exception(
|
|
130
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
131
|
-
)
|
|
132
|
-
|
|
133
|
-
payload = {
|
|
134
|
-
"messages": [
|
|
135
|
-
{"role": "system", "content": self.system_prompt},
|
|
136
|
-
{"role": "user", "content": conversation_prompt}
|
|
137
|
-
],
|
|
138
|
-
"model": self.model,
|
|
139
|
-
"max_tokens": self.max_tokens_to_sample
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
def for_stream():
|
|
143
|
-
try:
|
|
144
|
-
response = self.session.post(
|
|
145
|
-
self.api_endpoint,
|
|
146
|
-
headers=self.headers,
|
|
147
|
-
json=payload,
|
|
148
|
-
stream=True,
|
|
149
|
-
timeout=self.timeout
|
|
150
|
-
)
|
|
151
|
-
if not response.ok:
|
|
152
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
153
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
154
|
-
)
|
|
155
|
-
streaming_response = ""
|
|
156
|
-
# Use sanitize_stream with regex-based extraction and filtering (like x0gpt)
|
|
157
|
-
processed_stream = sanitize_stream(
|
|
158
|
-
data=response.iter_content(chunk_size=None),
|
|
159
|
-
intro_value=None,
|
|
160
|
-
to_json=False,
|
|
161
|
-
extract_regexes=[r'0:"(.*?)"'],
|
|
162
|
-
skip_regexes=[
|
|
163
|
-
r'^f:',
|
|
164
|
-
r'^e:',
|
|
165
|
-
r'^d:',
|
|
166
|
-
r'^\s*$',
|
|
167
|
-
r'data:\s*\[DONE\]',
|
|
168
|
-
r'event:\s*',
|
|
169
|
-
r'^\d+:\s*$',
|
|
170
|
-
r'^:\s*$',
|
|
171
|
-
r'^\s*[\x00-\x1f]+\s*$',
|
|
172
|
-
],
|
|
173
|
-
raw=raw
|
|
174
|
-
)
|
|
175
|
-
|
|
176
|
-
for content_chunk in processed_stream:
|
|
177
|
-
if isinstance(content_chunk, bytes):
|
|
178
|
-
content_chunk = content_chunk.decode('utf-8', errors='ignore')
|
|
179
|
-
if raw:
|
|
180
|
-
yield content_chunk
|
|
181
|
-
else:
|
|
182
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
183
|
-
try:
|
|
184
|
-
clean_content = content_chunk.encode().decode('unicode_escape')
|
|
185
|
-
clean_content = clean_content.replace('\\\\', '\\').replace('\\"', '"')
|
|
186
|
-
streaming_response += clean_content
|
|
187
|
-
yield dict(text=clean_content)
|
|
188
|
-
except (UnicodeDecodeError, UnicodeEncodeError):
|
|
189
|
-
streaming_response += content_chunk
|
|
190
|
-
yield dict(text=content_chunk)
|
|
191
|
-
|
|
192
|
-
self.last_response.update(dict(text=streaming_response))
|
|
193
|
-
self.conversation.update_chat_history(
|
|
194
|
-
prompt, self.get_message(self.last_response)
|
|
195
|
-
)
|
|
196
|
-
except CurlError as e:
|
|
197
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
198
|
-
except Exception as e:
|
|
199
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
200
|
-
|
|
201
|
-
def for_non_stream():
|
|
202
|
-
if stream:
|
|
203
|
-
return for_stream()
|
|
204
|
-
for _ in for_stream():
|
|
205
|
-
pass
|
|
206
|
-
return self.last_response
|
|
207
|
-
|
|
208
|
-
return for_stream() if stream else for_non_stream()
|
|
209
|
-
|
|
210
|
-
def chat(
|
|
211
|
-
self,
|
|
212
|
-
prompt: str,
|
|
213
|
-
stream: bool = False,
|
|
214
|
-
optimizer: str = None,
|
|
215
|
-
conversationally: bool = False,
|
|
216
|
-
raw: bool = False,
|
|
217
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
218
|
-
"""
|
|
219
|
-
Generates a response from the MonoChat API.
|
|
220
|
-
|
|
221
|
-
Args:
|
|
222
|
-
prompt (str): The prompt to send to the API.
|
|
223
|
-
stream (bool): Whether to stream the response.
|
|
224
|
-
optimizer (str): Optimizer to use for the prompt.
|
|
225
|
-
conversationally (bool): Whether to generate the prompt conversationally.
|
|
226
|
-
raw (bool): Whether to return raw response chunks.
|
|
227
|
-
|
|
228
|
-
Returns:
|
|
229
|
-
str: The API response.
|
|
230
|
-
"""
|
|
231
|
-
|
|
232
|
-
def for_stream():
|
|
233
|
-
for response in self.ask(
|
|
234
|
-
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
235
|
-
):
|
|
236
|
-
if raw:
|
|
237
|
-
yield response
|
|
238
|
-
else:
|
|
239
|
-
yield self.get_message(response)
|
|
240
|
-
|
|
241
|
-
def for_non_stream():
|
|
242
|
-
result = self.ask(
|
|
243
|
-
prompt,
|
|
244
|
-
False,
|
|
245
|
-
raw=raw,
|
|
246
|
-
optimizer=optimizer,
|
|
247
|
-
conversationally=conversationally,
|
|
248
|
-
)
|
|
249
|
-
if raw:
|
|
250
|
-
return result
|
|
251
|
-
else:
|
|
252
|
-
return self.get_message(result)
|
|
253
|
-
|
|
254
|
-
return for_stream() if stream else for_non_stream()
|
|
255
|
-
|
|
256
|
-
def get_message(self, response: dict) -> str:
|
|
257
|
-
"""
|
|
258
|
-
Extracts the message from the API response.
|
|
259
|
-
|
|
260
|
-
Args:
|
|
261
|
-
response (dict): The API response.
|
|
262
|
-
|
|
263
|
-
Returns:
|
|
264
|
-
str: The message content.
|
|
265
|
-
"""
|
|
266
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
267
|
-
text = response.get("text", "")
|
|
268
|
-
return text
|
|
269
|
-
|
|
270
|
-
if __name__ == "__main__":
|
|
271
|
-
from rich import print
|
|
272
|
-
ai = MonoChat(timeout=60)
|
|
273
|
-
response = ai.chat("In points tell me about humans", stream=True, raw=False)
|
|
274
|
-
for chunk in response:
|
|
275
|
-
print(chunk, end="", flush=True)
|
webscout/Provider/multichat.py
DELETED
|
@@ -1,375 +0,0 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import json
|
|
4
|
-
import uuid
|
|
5
|
-
from typing import Any, Dict, Union, Generator
|
|
6
|
-
from datetime import datetime
|
|
7
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout import exceptions
|
|
10
|
-
from webscout.litagent import LitAgent
|
|
11
|
-
|
|
12
|
-
# Model configurations
|
|
13
|
-
MODEL_CONFIGS = {
|
|
14
|
-
"llama": {
|
|
15
|
-
"endpoint": "https://www.multichatai.com/api/chat/meta",
|
|
16
|
-
"models": {
|
|
17
|
-
"llama-3.3-70b-versatile": {"contextLength": 131072},
|
|
18
|
-
"llama-3.2-11b-vision-preview": {"contextLength": 32768},
|
|
19
|
-
"deepseek-r1-distill-llama-70b": {"contextLength": 128000},
|
|
20
|
-
},
|
|
21
|
-
},
|
|
22
|
-
"cohere": {
|
|
23
|
-
"endpoint": "https://www.multichatai.com/api/chat/cohere",
|
|
24
|
-
"models": {
|
|
25
|
-
"command-r": {"contextLength": 128000},
|
|
26
|
-
"command": {"contextLength": 4096},
|
|
27
|
-
},
|
|
28
|
-
},
|
|
29
|
-
"google": {
|
|
30
|
-
"endpoint": "https://www.multichatai.com/api/chat/google",
|
|
31
|
-
"models": {
|
|
32
|
-
"gemini-1.5-flash-002": {"contextLength": 1048576},
|
|
33
|
-
"gemma2-9b-it": {"contextLength": 8192},
|
|
34
|
-
"gemini-2.0-flash": {"contextLength": 128000},
|
|
35
|
-
},
|
|
36
|
-
"message_format": "parts",
|
|
37
|
-
},
|
|
38
|
-
"deepinfra": {
|
|
39
|
-
"endpoint": "https://www.multichatai.com/api/chat/deepinfra",
|
|
40
|
-
"models": {
|
|
41
|
-
"Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
|
|
42
|
-
"Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
|
|
43
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
|
|
44
|
-
"deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
|
|
45
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct": {"contextLength": 131072},
|
|
46
|
-
"NousResearch/Hermes-3-Llama-3.1-405B": {"contextLength": 131072},
|
|
47
|
-
"gemma-2-27b-it": {"contextLength": 8192},
|
|
48
|
-
},
|
|
49
|
-
},
|
|
50
|
-
"mistral": {
|
|
51
|
-
"endpoint": "https://www.multichatai.com/api/chat/mistral",
|
|
52
|
-
"models": {
|
|
53
|
-
"mistral-small-latest": {"contextLength": 32000},
|
|
54
|
-
"codestral-latest": {"contextLength": 32000},
|
|
55
|
-
"open-mistral-7b": {"contextLength": 8000},
|
|
56
|
-
"open-mixtral-8x7b": {"contextLength": 8000},
|
|
57
|
-
},
|
|
58
|
-
},
|
|
59
|
-
"alibaba": {
|
|
60
|
-
"endpoint": "https://www.multichatai.com/api/chat/alibaba",
|
|
61
|
-
"models": {
|
|
62
|
-
"Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
|
|
63
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
|
|
64
|
-
"Qwen/QwQ-32B-Preview": {"contextLength": 32768},
|
|
65
|
-
},
|
|
66
|
-
},
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
class MultiChatAI(Provider):
|
|
70
|
-
"""
|
|
71
|
-
A class to interact with the MultiChatAI API.
|
|
72
|
-
"""
|
|
73
|
-
AVAILABLE_MODELS = [
|
|
74
|
-
# Llama Models
|
|
75
|
-
"llama-3.3-70b-versatile",
|
|
76
|
-
"llama-3.2-11b-vision-preview",
|
|
77
|
-
"deepseek-r1-distill-llama-70b",
|
|
78
|
-
|
|
79
|
-
# Cohere Models
|
|
80
|
-
# "command-r", # >>>> NOT WORKING
|
|
81
|
-
# "command", # >>>> NOT WORKING
|
|
82
|
-
|
|
83
|
-
# Google Models
|
|
84
|
-
# "gemini-1.5-flash-002", #>>>> NOT WORKING
|
|
85
|
-
"gemma2-9b-it",
|
|
86
|
-
"gemini-2.0-flash",
|
|
87
|
-
|
|
88
|
-
# DeepInfra Models
|
|
89
|
-
"Sao10K/L3.1-70B-Euryale-v2.2",
|
|
90
|
-
"Gryphe/MythoMax-L2-13b",
|
|
91
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
92
|
-
"deepseek-ai/DeepSeek-V3",
|
|
93
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct",
|
|
94
|
-
"NousResearch/Hermes-3-Llama-3.1-405B",
|
|
95
|
-
# "gemma-2-27b-it", # >>>> NOT WORKING
|
|
96
|
-
|
|
97
|
-
# Mistral Models
|
|
98
|
-
# "mistral-small-latest", # >>>> NOT WORKING
|
|
99
|
-
# "codestral-latest", # >>>> NOT WORKING
|
|
100
|
-
# "open-mistral-7b", # >>>> NOT WORKING
|
|
101
|
-
# "open-mixtral-8x7b", # >>>> NOT WORKING
|
|
102
|
-
|
|
103
|
-
# Alibaba Models
|
|
104
|
-
"Qwen/Qwen2.5-72B-Instruct",
|
|
105
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
106
|
-
"Qwen/QwQ-32B-Preview"
|
|
107
|
-
]
|
|
108
|
-
|
|
109
|
-
def __init__(
|
|
110
|
-
self,
|
|
111
|
-
is_conversation: bool = True,
|
|
112
|
-
max_tokens: int = 4000, # Note: max_tokens is not directly used by this API
|
|
113
|
-
timeout: int = 30,
|
|
114
|
-
intro: str = None,
|
|
115
|
-
filepath: str = None,
|
|
116
|
-
update_file: bool = True,
|
|
117
|
-
proxies: dict = {},
|
|
118
|
-
history_offset: int = 10250,
|
|
119
|
-
act: str = None,
|
|
120
|
-
model: str = "llama-3.3-70b-versatile",
|
|
121
|
-
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
122
|
-
temperature: float = 0.5,
|
|
123
|
-
presence_penalty: int = 0, # Note: presence_penalty is not used by this API
|
|
124
|
-
frequency_penalty: int = 0, # Note: frequency_penalty is not used by this API
|
|
125
|
-
top_p: float = 1 # Note: top_p is not used by this API
|
|
126
|
-
):
|
|
127
|
-
"""Initializes the MultiChatAI API client."""
|
|
128
|
-
if model not in self.AVAILABLE_MODELS:
|
|
129
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
130
|
-
# Initialize curl_cffi Session
|
|
131
|
-
self.session = Session()
|
|
132
|
-
self.is_conversation = is_conversation
|
|
133
|
-
self.max_tokens_to_sample = max_tokens
|
|
134
|
-
self.timeout = timeout
|
|
135
|
-
self.last_response = {}
|
|
136
|
-
self.model = model
|
|
137
|
-
self.system_prompt = system_prompt
|
|
138
|
-
self.temperature = temperature
|
|
139
|
-
self.presence_penalty = presence_penalty
|
|
140
|
-
self.frequency_penalty = frequency_penalty
|
|
141
|
-
self.top_p = top_p
|
|
142
|
-
|
|
143
|
-
# Initialize LitAgent for user agent generation (keep if needed for other headers)
|
|
144
|
-
self.agent = LitAgent()
|
|
145
|
-
|
|
146
|
-
self.headers = {
|
|
147
|
-
"accept": "*/*",
|
|
148
|
-
"accept-language": "en-US,en;q=0.9",
|
|
149
|
-
"content-type": "text/plain;charset=UTF-8", # Keep content-type
|
|
150
|
-
"origin": "https://www.multichatai.com",
|
|
151
|
-
"referer": "https://www.multichatai.com/",
|
|
152
|
-
"user-agent": self.agent.random(),
|
|
153
|
-
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
154
|
-
}
|
|
155
|
-
|
|
156
|
-
# Update curl_cffi session headers, proxies, and cookies
|
|
157
|
-
self.session.headers.update(self.headers)
|
|
158
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
159
|
-
# Set cookies on the session object for curl_cffi
|
|
160
|
-
self.session.cookies.set("session", uuid.uuid4().hex)
|
|
161
|
-
|
|
162
|
-
self.__available_optimizers = (
|
|
163
|
-
method for method in dir(Optimizers)
|
|
164
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
165
|
-
)
|
|
166
|
-
|
|
167
|
-
Conversation.intro = (
|
|
168
|
-
AwesomePrompts().get_act(
|
|
169
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
170
|
-
)
|
|
171
|
-
if act
|
|
172
|
-
else intro or Conversation.intro
|
|
173
|
-
)
|
|
174
|
-
|
|
175
|
-
self.conversation = Conversation(
|
|
176
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
177
|
-
)
|
|
178
|
-
self.conversation.history_offset = history_offset
|
|
179
|
-
|
|
180
|
-
self.provider = self._get_provider_from_model(self.model)
|
|
181
|
-
self.model_name = self.model
|
|
182
|
-
|
|
183
|
-
def _get_endpoint(self) -> str:
|
|
184
|
-
"""Get the API endpoint for the current provider."""
|
|
185
|
-
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
186
|
-
|
|
187
|
-
def _get_chat_settings(self) -> Dict[str, Any]:
|
|
188
|
-
"""Get chat settings for the current model."""
|
|
189
|
-
base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
|
|
190
|
-
return {
|
|
191
|
-
"model": self.model,
|
|
192
|
-
"prompt": self.system_prompt,
|
|
193
|
-
"temperature": self.temperature,
|
|
194
|
-
"contextLength": base_settings["contextLength"],
|
|
195
|
-
"includeProfileContext": True,
|
|
196
|
-
"includeWorkspaceInstructions": True,
|
|
197
|
-
"embeddingsProvider": "openai"
|
|
198
|
-
}
|
|
199
|
-
|
|
200
|
-
def _get_system_message(self) -> str:
|
|
201
|
-
"""Generate system message with current date."""
|
|
202
|
-
current_date = datetime.now().strftime("%d/%m/%Y")
|
|
203
|
-
return f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
|
|
204
|
-
|
|
205
|
-
def _build_messages(self, conversation_prompt: str) -> list:
|
|
206
|
-
"""Build messages array based on provider type."""
|
|
207
|
-
if self.provider == "google":
|
|
208
|
-
return [
|
|
209
|
-
{"role": "user", "parts": self._get_system_message()},
|
|
210
|
-
{"role": "model", "parts": "I will follow your instructions."},
|
|
211
|
-
{"role": "user", "parts": conversation_prompt}
|
|
212
|
-
]
|
|
213
|
-
else:
|
|
214
|
-
return [
|
|
215
|
-
{"role": "system", "content": self._get_system_message()},
|
|
216
|
-
{"role": "user", "content": conversation_prompt}
|
|
217
|
-
]
|
|
218
|
-
|
|
219
|
-
def _get_provider_from_model(self, model: str) -> str:
|
|
220
|
-
"""Determine the provider based on the model name."""
|
|
221
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
222
|
-
if model in config["models"]:
|
|
223
|
-
return provider
|
|
224
|
-
|
|
225
|
-
available_models = []
|
|
226
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
227
|
-
for model_name in config["models"].keys():
|
|
228
|
-
available_models.append(f"{provider}/{model_name}")
|
|
229
|
-
|
|
230
|
-
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
231
|
-
raise ValueError(error_msg)
|
|
232
|
-
|
|
233
|
-
def _make_request(self, payload: Dict[str, Any]) -> Any:
|
|
234
|
-
"""Make the API request with proper error handling."""
|
|
235
|
-
try:
|
|
236
|
-
# Use curl_cffi session post with impersonate
|
|
237
|
-
# Cookies are handled by the session
|
|
238
|
-
response = self.session.post(
|
|
239
|
-
self._get_endpoint(),
|
|
240
|
-
# headers are set on the session
|
|
241
|
-
json=payload,
|
|
242
|
-
timeout=self.timeout,
|
|
243
|
-
# proxies are set on the session
|
|
244
|
-
impersonate="chrome110" # Use a common impersonation profile
|
|
245
|
-
)
|
|
246
|
-
response.raise_for_status() # Check for HTTP errors
|
|
247
|
-
return response
|
|
248
|
-
except CurlError as e: # Catch CurlError
|
|
249
|
-
raise exceptions.FailedToGenerateResponseError(f"API request failed (CurlError): {e}") from e
|
|
250
|
-
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
251
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
252
|
-
raise exceptions.FailedToGenerateResponseError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
253
|
-
|
|
254
|
-
def ask(
|
|
255
|
-
self,
|
|
256
|
-
prompt: str,
|
|
257
|
-
raw: bool = False, # Keep raw param for interface consistency
|
|
258
|
-
optimizer: str = None,
|
|
259
|
-
conversationally: bool = False,
|
|
260
|
-
stream: bool = False
|
|
261
|
-
) -> Union[Dict[str, Any], str, Generator[str, None, None]]:
|
|
262
|
-
"""Sends a prompt to the MultiChatAI API and returns the response. Supports raw output and direct text streaming."""
|
|
263
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
264
|
-
if optimizer:
|
|
265
|
-
if optimizer in self.__available_optimizers:
|
|
266
|
-
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
267
|
-
else:
|
|
268
|
-
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
269
|
-
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
270
|
-
|
|
271
|
-
payload = {
|
|
272
|
-
"chatSettings": self._get_chat_settings(),
|
|
273
|
-
"messages": self._build_messages(conversation_prompt),
|
|
274
|
-
"customModelId": "",
|
|
275
|
-
}
|
|
276
|
-
|
|
277
|
-
response = self._make_request(payload)
|
|
278
|
-
try:
|
|
279
|
-
response_text_raw = response.text
|
|
280
|
-
if stream:
|
|
281
|
-
chunk_size = 64
|
|
282
|
-
text = response_text_raw
|
|
283
|
-
for i in range(0, len(text), chunk_size):
|
|
284
|
-
chunk = text[i:i+chunk_size]
|
|
285
|
-
if raw:
|
|
286
|
-
yield chunk
|
|
287
|
-
else:
|
|
288
|
-
yield {"text": chunk}
|
|
289
|
-
self.last_response = {"text": text}
|
|
290
|
-
self.conversation.update_chat_history(prompt, text)
|
|
291
|
-
else:
|
|
292
|
-
processed_stream = sanitize_stream(
|
|
293
|
-
data=response_text_raw,
|
|
294
|
-
intro_value=None,
|
|
295
|
-
to_json=False,
|
|
296
|
-
raw=raw
|
|
297
|
-
)
|
|
298
|
-
full_response = "".join(list(processed_stream)).strip()
|
|
299
|
-
self.last_response = {"text": full_response}
|
|
300
|
-
self.conversation.update_chat_history(prompt, full_response)
|
|
301
|
-
return full_response if raw else self.last_response
|
|
302
|
-
except Exception as e:
|
|
303
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to process response: {e}") from e
|
|
304
|
-
|
|
305
|
-
def chat(
|
|
306
|
-
self,
|
|
307
|
-
prompt: str,
|
|
308
|
-
optimizer: str = None,
|
|
309
|
-
conversationally: bool = False,
|
|
310
|
-
stream: bool = False,
|
|
311
|
-
raw: bool = False
|
|
312
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
313
|
-
"""Generate response. Supports raw output and streaming."""
|
|
314
|
-
if stream:
|
|
315
|
-
# Streaming mode: yield chunks from ask
|
|
316
|
-
return self.ask(
|
|
317
|
-
prompt,
|
|
318
|
-
raw=raw,
|
|
319
|
-
optimizer=optimizer,
|
|
320
|
-
conversationally=conversationally,
|
|
321
|
-
stream=True
|
|
322
|
-
)
|
|
323
|
-
else:
|
|
324
|
-
# Non-streaming mode: return full message
|
|
325
|
-
response_data = self.ask(
|
|
326
|
-
prompt,
|
|
327
|
-
raw=raw,
|
|
328
|
-
optimizer=optimizer,
|
|
329
|
-
conversationally=conversationally,
|
|
330
|
-
stream=False
|
|
331
|
-
)
|
|
332
|
-
if raw:
|
|
333
|
-
return response_data if isinstance(response_data, str) else self.get_message(response_data)
|
|
334
|
-
else:
|
|
335
|
-
return self.get_message(response_data)
|
|
336
|
-
|
|
337
|
-
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
338
|
-
"""
|
|
339
|
-
Retrieves message from response.
|
|
340
|
-
|
|
341
|
-
Args:
|
|
342
|
-
response (Union[Dict[str, Any], str]): The response to extract the message from
|
|
343
|
-
|
|
344
|
-
Returns:
|
|
345
|
-
str: The extracted message text
|
|
346
|
-
"""
|
|
347
|
-
if isinstance(response, dict):
|
|
348
|
-
return response.get("text", "")
|
|
349
|
-
return str(response)
|
|
350
|
-
|
|
351
|
-
if __name__ == "__main__":
|
|
352
|
-
print("-" * 80)
|
|
353
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
354
|
-
print("-" * 80)
|
|
355
|
-
|
|
356
|
-
# Test all available models
|
|
357
|
-
working = 0
|
|
358
|
-
total = len(MultiChatAI.AVAILABLE_MODELS)
|
|
359
|
-
|
|
360
|
-
for model in MultiChatAI.AVAILABLE_MODELS:
|
|
361
|
-
try:
|
|
362
|
-
test_ai = MultiChatAI(model=model, timeout=60)
|
|
363
|
-
response = test_ai.chat("Say 'Hello' in one word")
|
|
364
|
-
response_text = response
|
|
365
|
-
|
|
366
|
-
if response_text and len(response_text.strip()) > 0:
|
|
367
|
-
status = "✓"
|
|
368
|
-
# Truncate response if too long
|
|
369
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
370
|
-
else:
|
|
371
|
-
status = "✗"
|
|
372
|
-
display_text = "Empty or invalid response"
|
|
373
|
-
print(f"{model:<50} {status:<10} {display_text}")
|
|
374
|
-
except Exception as e:
|
|
375
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|