webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,368 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import json
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage,
|
|
12
|
+
format_prompt
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
# Import curl_cffi for Cloudflare bypass
|
|
16
|
+
from curl_cffi.requests import Session
|
|
17
|
+
from curl_cffi import CurlError
|
|
18
|
+
|
|
19
|
+
# Import LitAgent for user agent generation
|
|
20
|
+
from webscout.litagent import LitAgent
|
|
21
|
+
|
|
22
|
+
# ANSI escape codes for formatting
|
|
23
|
+
BOLD = "\033[1m"
|
|
24
|
+
RED = "\033[91m"
|
|
25
|
+
RESET = "\033[0m"
|
|
26
|
+
|
|
27
|
+
# Model configurations
|
|
28
|
+
MODEL_CONFIGS = {
|
|
29
|
+
"llama": {
|
|
30
|
+
"endpoint": "https://www.multichatai.com/api/chat/meta",
|
|
31
|
+
"models": {
|
|
32
|
+
"llama-3.3-70b-versatile": {"contextLength": 131072},
|
|
33
|
+
"llama-3.2-11b-vision-preview": {"contextLength": 32768},
|
|
34
|
+
"deepseek-r1-distill-llama-70b": {"contextLength": 128000},
|
|
35
|
+
},
|
|
36
|
+
},
|
|
37
|
+
"cohere": {
|
|
38
|
+
"endpoint": "https://www.multichatai.com/api/chat/cohere",
|
|
39
|
+
"models": {
|
|
40
|
+
"command-r": {"contextLength": 128000},
|
|
41
|
+
"command": {"contextLength": 4096},
|
|
42
|
+
},
|
|
43
|
+
},
|
|
44
|
+
"google": {
|
|
45
|
+
"endpoint": "https://www.multichatai.com/api/chat/google",
|
|
46
|
+
"models": {
|
|
47
|
+
"gemini-1.5-flash-002": {"contextLength": 1048576},
|
|
48
|
+
"gemma2-9b-it": {"contextLength": 8192},
|
|
49
|
+
"gemini-2.0-flash": {"contextLength": 128000},
|
|
50
|
+
},
|
|
51
|
+
"message_format": "parts",
|
|
52
|
+
},
|
|
53
|
+
"deepinfra": {
|
|
54
|
+
"endpoint": "https://www.multichatai.com/api/chat/deepinfra",
|
|
55
|
+
"models": {
|
|
56
|
+
"Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
|
|
57
|
+
"Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
|
|
58
|
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
|
|
59
|
+
"deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
|
|
60
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct": {"contextLength": 131072},
|
|
61
|
+
"NousResearch/Hermes-3-Llama-3.1-405B": {"contextLength": 131072},
|
|
62
|
+
"gemma-2-27b-it": {"contextLength": 8192},
|
|
63
|
+
},
|
|
64
|
+
},
|
|
65
|
+
"mistral": {
|
|
66
|
+
"endpoint": "https://www.multichatai.com/api/chat/mistral",
|
|
67
|
+
"models": {
|
|
68
|
+
"mistral-small-latest": {"contextLength": 32000},
|
|
69
|
+
"codestral-latest": {"contextLength": 32000},
|
|
70
|
+
"open-mistral-7b": {"contextLength": 8000},
|
|
71
|
+
"open-mixtral-8x7b": {"contextLength": 8000},
|
|
72
|
+
},
|
|
73
|
+
},
|
|
74
|
+
"alibaba": {
|
|
75
|
+
"endpoint": "https://www.multichatai.com/api/chat/alibaba",
|
|
76
|
+
"models": {
|
|
77
|
+
"Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
|
|
78
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
|
|
79
|
+
"Qwen/QwQ-32B-Preview": {"contextLength": 32768},
|
|
80
|
+
},
|
|
81
|
+
},
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
class Completions(BaseCompletions):
|
|
85
|
+
def __init__(self, client: 'MultiChatAI'):
|
|
86
|
+
self._client = client
|
|
87
|
+
|
|
88
|
+
def create(
|
|
89
|
+
self,
|
|
90
|
+
*,
|
|
91
|
+
model: str,
|
|
92
|
+
messages: List[Dict[str, str]],
|
|
93
|
+
max_tokens: Optional[int] = None,
|
|
94
|
+
stream: bool = False,
|
|
95
|
+
temperature: Optional[float] = None,
|
|
96
|
+
top_p: Optional[float] = None,
|
|
97
|
+
**kwargs: Any
|
|
98
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
99
|
+
"""
|
|
100
|
+
Create a chat completion using the MultiChatAI API.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
model: The model to use
|
|
104
|
+
messages: A list of messages in the conversation
|
|
105
|
+
max_tokens: Maximum number of tokens to generate
|
|
106
|
+
stream: Whether to stream the response
|
|
107
|
+
temperature: Temperature for response generation
|
|
108
|
+
top_p: Top-p sampling parameter
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
Either a ChatCompletion object or a generator of ChatCompletionChunk objects
|
|
112
|
+
"""
|
|
113
|
+
try:
|
|
114
|
+
# Set client parameters based on function arguments
|
|
115
|
+
self._client.model = model
|
|
116
|
+
if temperature is not None:
|
|
117
|
+
self._client.temperature = temperature
|
|
118
|
+
if max_tokens is not None:
|
|
119
|
+
self._client.max_tokens_to_sample = max_tokens
|
|
120
|
+
|
|
121
|
+
# Extract system messages and set as system prompt
|
|
122
|
+
for message in messages:
|
|
123
|
+
if message.get("role") == "system":
|
|
124
|
+
self._client.system_prompt = message.get("content", "")
|
|
125
|
+
break
|
|
126
|
+
|
|
127
|
+
# Format all messages into a single prompt
|
|
128
|
+
user_message = format_prompt(messages)
|
|
129
|
+
|
|
130
|
+
# Generate a unique request ID
|
|
131
|
+
request_id = f"multichat-{str(uuid.uuid4())}"
|
|
132
|
+
created_time = int(time.time())
|
|
133
|
+
|
|
134
|
+
# Make the API request
|
|
135
|
+
response_text = self._client._make_api_request(user_message)
|
|
136
|
+
|
|
137
|
+
# If streaming is requested, simulate streaming with the full response
|
|
138
|
+
if stream:
|
|
139
|
+
def generate_chunks():
|
|
140
|
+
# Create a single chunk with the full response
|
|
141
|
+
delta = ChoiceDelta(content=response_text)
|
|
142
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
143
|
+
chunk = ChatCompletionChunk(
|
|
144
|
+
id=request_id,
|
|
145
|
+
choices=[choice],
|
|
146
|
+
created=created_time,
|
|
147
|
+
model=model,
|
|
148
|
+
)
|
|
149
|
+
yield chunk
|
|
150
|
+
|
|
151
|
+
return generate_chunks()
|
|
152
|
+
|
|
153
|
+
# For non-streaming, create a complete response
|
|
154
|
+
message = ChatCompletionMessage(role="assistant", content=response_text)
|
|
155
|
+
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
156
|
+
|
|
157
|
+
# Estimate token usage (this is approximate)
|
|
158
|
+
prompt_tokens = len(user_message) // 4 # Rough estimate
|
|
159
|
+
completion_tokens = len(response_text) // 4 # Rough estimate
|
|
160
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
161
|
+
|
|
162
|
+
usage = CompletionUsage(
|
|
163
|
+
prompt_tokens=prompt_tokens,
|
|
164
|
+
completion_tokens=completion_tokens,
|
|
165
|
+
total_tokens=total_tokens
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# Create the completion object
|
|
169
|
+
completion = ChatCompletion(
|
|
170
|
+
id=request_id,
|
|
171
|
+
choices=[choice],
|
|
172
|
+
created=created_time,
|
|
173
|
+
model=model,
|
|
174
|
+
usage=usage,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
return completion
|
|
178
|
+
|
|
179
|
+
except Exception as e:
|
|
180
|
+
print(f"{RED}Error during MultiChatAI request: {e}{RESET}")
|
|
181
|
+
raise IOError(f"MultiChatAI request failed: {e}") from e
|
|
182
|
+
|
|
183
|
+
class Chat(BaseChat):
|
|
184
|
+
def __init__(self, client: 'MultiChatAI'):
|
|
185
|
+
self.completions = Completions(client)
|
|
186
|
+
|
|
187
|
+
class MultiChatAI(OpenAICompatibleProvider):
|
|
188
|
+
"""
|
|
189
|
+
OpenAI-compatible client for MultiChatAI API.
|
|
190
|
+
|
|
191
|
+
Usage:
|
|
192
|
+
client = MultiChatAI()
|
|
193
|
+
response = client.chat.completions.create(
|
|
194
|
+
model="llama-3.3-70b-versatile",
|
|
195
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
196
|
+
)
|
|
197
|
+
print(response.choices[0].message.content)
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
AVAILABLE_MODELS = [
|
|
201
|
+
# Llama Models
|
|
202
|
+
"llama-3.3-70b-versatile",
|
|
203
|
+
"llama-3.2-11b-vision-preview",
|
|
204
|
+
"deepseek-r1-distill-llama-70b",
|
|
205
|
+
|
|
206
|
+
# Google Models
|
|
207
|
+
"gemma2-9b-it",
|
|
208
|
+
"gemini-2.0-flash",
|
|
209
|
+
|
|
210
|
+
# DeepInfra Models
|
|
211
|
+
"Sao10K/L3.1-70B-Euryale-v2.2",
|
|
212
|
+
"Gryphe/MythoMax-L2-13b",
|
|
213
|
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
214
|
+
"deepseek-ai/DeepSeek-V3",
|
|
215
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct",
|
|
216
|
+
"NousResearch/Hermes-3-Llama-3.1-405B",
|
|
217
|
+
|
|
218
|
+
# Alibaba Models
|
|
219
|
+
"Qwen/Qwen2.5-72B-Instruct",
|
|
220
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
221
|
+
"Qwen/QwQ-32B-Preview"
|
|
222
|
+
]
|
|
223
|
+
|
|
224
|
+
def __init__(
|
|
225
|
+
self,
|
|
226
|
+
timeout: int = 30,
|
|
227
|
+
proxies: dict = {},
|
|
228
|
+
model: str = "llama-3.3-70b-versatile",
|
|
229
|
+
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
230
|
+
temperature: float = 0.5,
|
|
231
|
+
max_tokens: int = 4000
|
|
232
|
+
):
|
|
233
|
+
"""
|
|
234
|
+
Initialize the MultiChatAI client.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
timeout: Request timeout in seconds
|
|
238
|
+
proxies: Optional proxy configuration
|
|
239
|
+
model: Default model to use
|
|
240
|
+
system_prompt: System prompt to use
|
|
241
|
+
temperature: Temperature for response generation
|
|
242
|
+
max_tokens: Maximum number of tokens to generate
|
|
243
|
+
"""
|
|
244
|
+
if model not in self.AVAILABLE_MODELS:
|
|
245
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
246
|
+
|
|
247
|
+
# Initialize curl_cffi Session
|
|
248
|
+
self.session = Session()
|
|
249
|
+
self.timeout = timeout
|
|
250
|
+
self.model = model
|
|
251
|
+
self.system_prompt = system_prompt
|
|
252
|
+
self.temperature = temperature
|
|
253
|
+
self.max_tokens_to_sample = max_tokens
|
|
254
|
+
|
|
255
|
+
# Initialize LitAgent for user agent generation
|
|
256
|
+
self.agent = LitAgent()
|
|
257
|
+
|
|
258
|
+
self.headers = {
|
|
259
|
+
"accept": "*/*",
|
|
260
|
+
"accept-language": "en-US,en;q=0.9",
|
|
261
|
+
"content-type": "text/plain;charset=UTF-8",
|
|
262
|
+
"origin": "https://www.multichatai.com",
|
|
263
|
+
"referer": "https://www.multichatai.com/",
|
|
264
|
+
"user-agent": self.agent.random(),
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
# Update curl_cffi session headers, proxies, and cookies
|
|
268
|
+
self.session.headers.update(self.headers)
|
|
269
|
+
self.session.proxies = proxies
|
|
270
|
+
self.session.cookies.set("session", uuid.uuid4().hex)
|
|
271
|
+
|
|
272
|
+
# Initialize the provider based on the model
|
|
273
|
+
self.provider = self._get_provider_from_model(self.model)
|
|
274
|
+
self.model_name = self.model
|
|
275
|
+
|
|
276
|
+
# Initialize the chat interface
|
|
277
|
+
self.chat = Chat(self)
|
|
278
|
+
|
|
279
|
+
def _get_endpoint(self) -> str:
|
|
280
|
+
"""Get the API endpoint for the current provider."""
|
|
281
|
+
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
282
|
+
|
|
283
|
+
def _get_chat_settings(self) -> Dict[str, Any]:
|
|
284
|
+
"""Get chat settings for the current model."""
|
|
285
|
+
base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
|
|
286
|
+
return {
|
|
287
|
+
"model": self.model,
|
|
288
|
+
"prompt": self.system_prompt,
|
|
289
|
+
"temperature": self.temperature,
|
|
290
|
+
"contextLength": base_settings["contextLength"],
|
|
291
|
+
"includeProfileContext": True,
|
|
292
|
+
"includeWorkspaceInstructions": True,
|
|
293
|
+
"embeddingsProvider": "openai"
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
def _get_system_message(self) -> str:
|
|
297
|
+
"""Generate system message with current date."""
|
|
298
|
+
current_date = datetime.now().strftime("%d/%m/%Y")
|
|
299
|
+
return f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
|
|
300
|
+
|
|
301
|
+
def _build_messages(self, conversation_prompt: str) -> list:
|
|
302
|
+
"""Build messages array based on provider type."""
|
|
303
|
+
if self.provider == "google":
|
|
304
|
+
return [
|
|
305
|
+
{"role": "user", "parts": self._get_system_message()},
|
|
306
|
+
{"role": "model", "parts": "I will follow your instructions."},
|
|
307
|
+
{"role": "user", "parts": conversation_prompt}
|
|
308
|
+
]
|
|
309
|
+
else:
|
|
310
|
+
return [
|
|
311
|
+
{"role": "system", "content": self._get_system_message()},
|
|
312
|
+
{"role": "user", "content": conversation_prompt}
|
|
313
|
+
]
|
|
314
|
+
|
|
315
|
+
def _get_provider_from_model(self, model: str) -> str:
|
|
316
|
+
"""Determine the provider based on the model name."""
|
|
317
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
318
|
+
if model in config["models"]:
|
|
319
|
+
return provider
|
|
320
|
+
|
|
321
|
+
available_models = []
|
|
322
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
323
|
+
for model_name in config["models"].keys():
|
|
324
|
+
available_models.append(f"{provider}/{model_name}")
|
|
325
|
+
|
|
326
|
+
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
327
|
+
raise ValueError(error_msg)
|
|
328
|
+
|
|
329
|
+
def _make_api_request(self, prompt: str) -> str:
|
|
330
|
+
"""Make the API request with proper error handling."""
|
|
331
|
+
try:
|
|
332
|
+
payload = {
|
|
333
|
+
"chatSettings": self._get_chat_settings(),
|
|
334
|
+
"messages": self._build_messages(prompt),
|
|
335
|
+
"customModelId": "",
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
# Use curl_cffi session post with impersonate
|
|
339
|
+
response = self.session.post(
|
|
340
|
+
self._get_endpoint(),
|
|
341
|
+
json=payload,
|
|
342
|
+
timeout=self.timeout,
|
|
343
|
+
impersonate="chrome110"
|
|
344
|
+
)
|
|
345
|
+
response.raise_for_status()
|
|
346
|
+
|
|
347
|
+
# Return the response text
|
|
348
|
+
return response.text.strip()
|
|
349
|
+
|
|
350
|
+
except CurlError as e:
|
|
351
|
+
raise IOError(f"API request failed (CurlError): {e}") from e
|
|
352
|
+
except Exception as e:
|
|
353
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
354
|
+
raise IOError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
355
|
+
|
|
356
|
+
if __name__ == "__main__":
|
|
357
|
+
print(f"{BOLD}Testing MultiChatAI OpenAI-compatible provider{RESET}")
|
|
358
|
+
|
|
359
|
+
client = MultiChatAI()
|
|
360
|
+
response = client.chat.completions.create(
|
|
361
|
+
model="llama-3.3-70b-versatile",
|
|
362
|
+
messages=[
|
|
363
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
364
|
+
{"role": "user", "content": "Say 'Hello' in one word"}
|
|
365
|
+
]
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
print(f"Response: {response.choices[0].message.content}")
|
|
@@ -203,12 +203,14 @@ class Netwrck(OpenAICompatibleProvider):
|
|
|
203
203
|
"x-ai/grok-2",
|
|
204
204
|
"anthropic/claude-3-7-sonnet-20250219",
|
|
205
205
|
"sao10k/l3-euryale-70b",
|
|
206
|
-
"openai/gpt-
|
|
206
|
+
"openai/gpt-4.1-mini",
|
|
207
207
|
"gryphe/mythomax-l2-13b",
|
|
208
208
|
"google/gemini-pro-1.5",
|
|
209
|
+
"google/gemini-2.5-flash-preview-04-17",
|
|
209
210
|
"nvidia/llama-3.1-nemotron-70b-instruct",
|
|
210
211
|
"deepseek/deepseek-r1",
|
|
211
212
|
"deepseek/deepseek-chat"
|
|
213
|
+
|
|
212
214
|
]
|
|
213
215
|
|
|
214
216
|
# Default greeting used by Netwrck
|
webscout/Provider/OpenGPT.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
from typing import Dict, Generator, Union
|
|
4
5
|
|
|
@@ -9,6 +10,7 @@ from webscout.AIbase import Provider
|
|
|
9
10
|
from webscout import exceptions
|
|
10
11
|
from webscout.litagent import LitAgent
|
|
11
12
|
|
|
13
|
+
|
|
12
14
|
class OpenGPT(Provider):
|
|
13
15
|
"""
|
|
14
16
|
A class to interact with the Open-GPT API.
|
|
@@ -17,7 +19,7 @@ class OpenGPT(Provider):
|
|
|
17
19
|
def __init__(
|
|
18
20
|
self,
|
|
19
21
|
is_conversation: bool = True,
|
|
20
|
-
max_tokens: int = 600,
|
|
22
|
+
max_tokens: int = 600, # Note: max_tokens is not used by this API
|
|
21
23
|
timeout: int = 30,
|
|
22
24
|
intro: str = None,
|
|
23
25
|
filepath: str = None,
|
|
@@ -41,8 +43,9 @@ class OpenGPT(Provider):
|
|
|
41
43
|
act (str, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
42
44
|
app_id (str, optional): The OpenGPT application ID. Defaults to "clf3yg8730000ih08ndbdi2v4".
|
|
43
45
|
"""
|
|
44
|
-
|
|
45
|
-
self.
|
|
46
|
+
# Initialize curl_cffi Session
|
|
47
|
+
self.session = Session()
|
|
48
|
+
self.agent = LitAgent() # Keep for potential future use or other headers
|
|
46
49
|
|
|
47
50
|
self.is_conversation = is_conversation
|
|
48
51
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -50,15 +53,17 @@ class OpenGPT(Provider):
|
|
|
50
53
|
self.last_response = {}
|
|
51
54
|
self.app_id = app_id
|
|
52
55
|
|
|
53
|
-
# Set up headers
|
|
56
|
+
# Set up headers (remove User-Agent if using impersonate)
|
|
54
57
|
self.headers = {
|
|
55
58
|
"Content-Type": "application/json",
|
|
56
|
-
"User-Agent": self.agent.random(),
|
|
57
|
-
"Referer": f"https://open-gpt.app/id/app/{app_id}"
|
|
59
|
+
# "User-Agent": self.agent.random(), # Removed, handled by impersonate
|
|
60
|
+
"Referer": f"https://open-gpt.app/id/app/{self.app_id}",
|
|
61
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
58
62
|
}
|
|
59
63
|
|
|
64
|
+
# Update curl_cffi session headers and proxies
|
|
60
65
|
self.session.headers.update(self.headers)
|
|
61
|
-
self.session.proxies
|
|
66
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
62
67
|
|
|
63
68
|
# Initialize optimizers
|
|
64
69
|
self.__available_optimizers = (
|
|
@@ -82,7 +87,7 @@ class OpenGPT(Provider):
|
|
|
82
87
|
def ask(
|
|
83
88
|
self,
|
|
84
89
|
prompt: str,
|
|
85
|
-
stream: bool = False,
|
|
90
|
+
stream: bool = False, # Note: API does not support streaming
|
|
86
91
|
raw: bool = False,
|
|
87
92
|
optimizer: str = None,
|
|
88
93
|
conversationally: bool = False,
|
|
@@ -117,31 +122,34 @@ class OpenGPT(Provider):
|
|
|
117
122
|
"userKey": "" # Assuming userKey is meant to be empty as in the original code
|
|
118
123
|
}
|
|
119
124
|
|
|
125
|
+
# API does not stream, implement non-stream logic directly
|
|
120
126
|
def for_non_stream():
|
|
121
127
|
try:
|
|
128
|
+
# Use curl_cffi session post with impersonate
|
|
122
129
|
response = self.session.post(
|
|
123
130
|
"https://open-gpt.app/api/generate",
|
|
124
|
-
|
|
125
|
-
|
|
131
|
+
# headers are set on the session
|
|
132
|
+
data=json.dumps(payload), # Keep data as JSON string
|
|
133
|
+
timeout=self.timeout,
|
|
134
|
+
# proxies are set on the session
|
|
135
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
126
136
|
)
|
|
127
137
|
|
|
128
|
-
#
|
|
129
|
-
response.raise_for_status()
|
|
138
|
+
response.raise_for_status() # Check for HTTP errors
|
|
130
139
|
|
|
140
|
+
# Use response.text which is already decoded
|
|
131
141
|
response_text = response.text
|
|
132
142
|
self.last_response = {"text": response_text}
|
|
133
143
|
self.conversation.update_chat_history(prompt, response_text)
|
|
134
144
|
|
|
135
|
-
|
|
145
|
+
# Return dict or raw string based on raw flag
|
|
146
|
+
return {"raw": response_text} if raw else {"text": response_text}
|
|
136
147
|
|
|
137
|
-
except
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
# Catch any other unexpected errors
|
|
143
|
-
error_msg = f"An unexpected error occurred: {e}"
|
|
144
|
-
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
148
|
+
except CurlError as e: # Catch CurlError
|
|
149
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
150
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
|
|
151
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
152
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
|
|
145
153
|
|
|
146
154
|
# This provider doesn't support streaming, so just return non-stream
|
|
147
155
|
return for_non_stream()
|
|
@@ -149,7 +157,7 @@ class OpenGPT(Provider):
|
|
|
149
157
|
def chat(
|
|
150
158
|
self,
|
|
151
159
|
prompt: str,
|
|
152
|
-
stream: bool = False,
|
|
160
|
+
stream: bool = False, # Keep stream param for interface consistency
|
|
153
161
|
optimizer: str = None,
|
|
154
162
|
conversationally: bool = False,
|
|
155
163
|
) -> Union[str, Generator[str, None, None]]:
|
|
@@ -165,10 +173,22 @@ class OpenGPT(Provider):
|
|
|
165
173
|
Returns:
|
|
166
174
|
A string with the response text.
|
|
167
175
|
"""
|
|
168
|
-
|
|
169
|
-
|
|
176
|
+
# Since ask() now handles both stream=True/False by returning the full response dict:
|
|
177
|
+
response_data = self.ask(
|
|
178
|
+
prompt,
|
|
179
|
+
stream=False, # Call ask in non-stream mode internally
|
|
180
|
+
raw=False, # Ensure ask returns dict with 'text' key
|
|
181
|
+
optimizer=optimizer,
|
|
182
|
+
conversationally=conversationally
|
|
170
183
|
)
|
|
171
|
-
|
|
184
|
+
# If stream=True was requested, simulate streaming by yielding the full message at once
|
|
185
|
+
if stream:
|
|
186
|
+
def stream_wrapper():
|
|
187
|
+
yield self.get_message(response_data)
|
|
188
|
+
return stream_wrapper()
|
|
189
|
+
else:
|
|
190
|
+
# If stream=False, return the full message directly
|
|
191
|
+
return self.get_message(response_data)
|
|
172
192
|
|
|
173
193
|
def get_message(self, response: dict) -> str:
|
|
174
194
|
"""
|
|
@@ -185,15 +205,5 @@ class OpenGPT(Provider):
|
|
|
185
205
|
|
|
186
206
|
|
|
187
207
|
if __name__ == "__main__":
|
|
188
|
-
|
|
189
|
-
print("
|
|
190
|
-
print("Testing OpenGPT provider")
|
|
191
|
-
print("-" * 80)
|
|
192
|
-
|
|
193
|
-
try:
|
|
194
|
-
test_ai = OpenGPT()
|
|
195
|
-
response = test_ai.chat("Explain quantum physics simply.")
|
|
196
|
-
print(response)
|
|
197
|
-
except Exception as e:
|
|
198
|
-
print(f"Error: {e}")
|
|
199
|
-
|
|
208
|
+
ai = OpenGPT()
|
|
209
|
+
print(ai.chat("Hello, how are you?"))
|