webscout 2025.10.17__py3-none-any.whl → 2025.10.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Provider/DeepAI.py +362 -0
- webscout/Provider/Gradient.py +231 -0
- webscout/Provider/OPENAI/DeepAI.py +300 -0
- webscout/Provider/TogetherAI.py +139 -199
- webscout/version.py +1 -1
- webscout/version.py.bak +1 -1
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/METADATA +1 -1
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/RECORD +12 -9
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/WHEEL +0 -0
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/entry_points.txt +0 -0
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,362 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
from typing import Any, Dict, List, Optional, Union, Iterator
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DeepAI(Provider):
|
|
15
|
+
"""
|
|
16
|
+
DeepAI Chat Provider
|
|
17
|
+
|
|
18
|
+
A provider for DeepAI's chat functionality, supporting both streaming and non-streaming responses.
|
|
19
|
+
Structured similarly to other providers like DeepInfra and X0GPT.
|
|
20
|
+
"""
|
|
21
|
+
required_auth = False
|
|
22
|
+
AVAILABLE_MODELS = [
|
|
23
|
+
"standard",
|
|
24
|
+
"genius",
|
|
25
|
+
"online",
|
|
26
|
+
"supergenius",
|
|
27
|
+
"onlinegenius",
|
|
28
|
+
"deepseek-v3.2",
|
|
29
|
+
"gemini-2.5-flash-lite",
|
|
30
|
+
"qwen3-30b-a3b",
|
|
31
|
+
"gpt-5-nano",
|
|
32
|
+
"gpt-oss-120b",
|
|
33
|
+
"gpt-5-chat-latest",
|
|
34
|
+
"claude-opus-4-1",
|
|
35
|
+
"llama-4-scout",
|
|
36
|
+
"claude-4.5-sonnet",
|
|
37
|
+
"deepseek-v3.1-terminus",
|
|
38
|
+
"llama-3.3-70b-instruct",
|
|
39
|
+
"grok-4",
|
|
40
|
+
"claude-sonnet-4",
|
|
41
|
+
"qwen3-coder",
|
|
42
|
+
"gpt-5",
|
|
43
|
+
"kimi-k2-0905",
|
|
44
|
+
"claude-opus-4",
|
|
45
|
+
"gpt-5-mini",
|
|
46
|
+
"gemini-2.5-pro",
|
|
47
|
+
"grok-code-fast-1",
|
|
48
|
+
"gpt-4.1",
|
|
49
|
+
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
def __init__(
|
|
53
|
+
self,
|
|
54
|
+
api_key: str = "tryit-53926507126-2c8a2543c7b5638ca6b92b6e53ef2d2b",
|
|
55
|
+
timeout: int = 30,
|
|
56
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
57
|
+
model: str = "standard",
|
|
58
|
+
chat_style: str = "chat",
|
|
59
|
+
enabled_tools: Optional[List[str]] = None,
|
|
60
|
+
is_conversation: bool = True,
|
|
61
|
+
max_tokens: int = 2048,
|
|
62
|
+
intro: Optional[str] = None,
|
|
63
|
+
filepath: Optional[str] = None,
|
|
64
|
+
update_file: bool = True,
|
|
65
|
+
history_offset: int = 10250,
|
|
66
|
+
act: Optional[str] = None,
|
|
67
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
68
|
+
browser: str = "chrome",
|
|
69
|
+
**kwargs
|
|
70
|
+
):
|
|
71
|
+
"""
|
|
72
|
+
Initialize the DeepAI provider.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
api_key: API key for authentication (trial key provided by default)
|
|
76
|
+
timeout: Request timeout in seconds
|
|
77
|
+
proxies: Proxy configuration
|
|
78
|
+
model: Model to use (default: "standard")
|
|
79
|
+
chat_style: Chat style (default: "chat")
|
|
80
|
+
enabled_tools: List of enabled tools (default: ["image_generator"])
|
|
81
|
+
is_conversation: Whether to maintain conversation history
|
|
82
|
+
max_tokens: Maximum tokens for conversation
|
|
83
|
+
intro: Introduction prompt
|
|
84
|
+
filepath: Path to conversation history file
|
|
85
|
+
update_file: Whether to update history file
|
|
86
|
+
history_offset: History offset for truncation
|
|
87
|
+
act: Act prompt from AwesomePrompts
|
|
88
|
+
system_prompt: System prompt for the AI
|
|
89
|
+
browser: Browser type for fingerprinting
|
|
90
|
+
**kwargs: Additional arguments
|
|
91
|
+
"""
|
|
92
|
+
if model not in self.AVAILABLE_MODELS:
|
|
93
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
94
|
+
|
|
95
|
+
self.url = "https://api.deepai.org/hacking_is_a_serious_crime"
|
|
96
|
+
self.api_key = api_key
|
|
97
|
+
self.proxies = proxies or {}
|
|
98
|
+
self.model = model
|
|
99
|
+
self.chat_style = chat_style
|
|
100
|
+
self.enabled_tools = enabled_tools or ["image_generator"]
|
|
101
|
+
self.system_prompt = system_prompt
|
|
102
|
+
self.is_conversation = is_conversation
|
|
103
|
+
self.max_tokens_to_sample = max_tokens
|
|
104
|
+
self.timeout = timeout
|
|
105
|
+
self.last_response = {}
|
|
106
|
+
|
|
107
|
+
# LitAgent for fingerprinting
|
|
108
|
+
self.agent = LitAgent()
|
|
109
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
110
|
+
|
|
111
|
+
# Setup headers similar to other providers
|
|
112
|
+
self.headers = {
|
|
113
|
+
"User-Agent": self.fingerprint.get("user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"),
|
|
114
|
+
"Accept": "*/*",
|
|
115
|
+
"Accept-Language": self.fingerprint.get("accept_language", "en-US,en;q=0.9"),
|
|
116
|
+
"Origin": "https://deepai.org",
|
|
117
|
+
"Referer": "https://deepai.org/",
|
|
118
|
+
"Sec-CH-UA": self.fingerprint.get("sec_ch_ua", '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"'),
|
|
119
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
120
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "Windows")}"',
|
|
121
|
+
"api-key": self.api_key
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
# Setup session
|
|
125
|
+
self.session = Session()
|
|
126
|
+
self.session.headers.update(self.headers)
|
|
127
|
+
self.session.proxies = self.proxies
|
|
128
|
+
|
|
129
|
+
# Optimizers
|
|
130
|
+
self.__available_optimizers = (
|
|
131
|
+
method
|
|
132
|
+
for method in dir(Optimizers)
|
|
133
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# Conversation setup similar to other providers
|
|
137
|
+
Conversation.intro = (
|
|
138
|
+
AwesomePrompts().get_act(
|
|
139
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
140
|
+
)
|
|
141
|
+
if act
|
|
142
|
+
else intro or Conversation.intro
|
|
143
|
+
)
|
|
144
|
+
self.conversation = Conversation(
|
|
145
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
146
|
+
)
|
|
147
|
+
self.conversation.history_offset = history_offset
|
|
148
|
+
|
|
149
|
+
def refresh_identity(self, browser: str = None):
|
|
150
|
+
"""
|
|
151
|
+
Refreshes the browser identity fingerprint.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
browser: Specific browser to use for the new fingerprint
|
|
155
|
+
"""
|
|
156
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
157
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
158
|
+
|
|
159
|
+
# Update relevant headers
|
|
160
|
+
self.headers.update({
|
|
161
|
+
"User-Agent": self.fingerprint.get("user_agent"),
|
|
162
|
+
"Accept-Language": self.fingerprint.get("accept_language"),
|
|
163
|
+
"Sec-CH-UA": self.fingerprint.get("sec_ch_ua"),
|
|
164
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform")}"',
|
|
165
|
+
})
|
|
166
|
+
|
|
167
|
+
self.session.headers.update(self.headers)
|
|
168
|
+
return self.fingerprint
|
|
169
|
+
|
|
170
|
+
def ask(
|
|
171
|
+
self,
|
|
172
|
+
prompt: str,
|
|
173
|
+
stream: bool = False,
|
|
174
|
+
raw: bool = False,
|
|
175
|
+
optimizer: Optional[str] = None,
|
|
176
|
+
conversationally: bool = False,
|
|
177
|
+
**kwargs
|
|
178
|
+
) -> Union[Dict[str, Any], Iterator[Dict[str, Any]]]:
|
|
179
|
+
"""
|
|
180
|
+
Send a prompt to DeepAI and get the response.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
prompt: The prompt to send
|
|
184
|
+
stream: Whether to stream the response (fake streaming: yields full response in one chunk)
|
|
185
|
+
raw: Whether to return raw response
|
|
186
|
+
optimizer: Optimizer to use
|
|
187
|
+
conversationally: Whether to apply optimizer to full conversation
|
|
188
|
+
**kwargs: Additional arguments
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
Response dictionary with the AI response or generator for streaming
|
|
192
|
+
"""
|
|
193
|
+
# Generate conversation prompt similar to other providers
|
|
194
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
195
|
+
|
|
196
|
+
if optimizer:
|
|
197
|
+
if optimizer in self.__available_optimizers:
|
|
198
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
199
|
+
conversation_prompt if conversationally else prompt
|
|
200
|
+
)
|
|
201
|
+
else:
|
|
202
|
+
raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
|
|
203
|
+
|
|
204
|
+
# Prepare form data
|
|
205
|
+
# Use conversation_prompt as user content in chatHistory
|
|
206
|
+
chat_history = [
|
|
207
|
+
{"role": "system", "content": self.system_prompt},
|
|
208
|
+
{"role": "user", "content": conversation_prompt}
|
|
209
|
+
]
|
|
210
|
+
data = {
|
|
211
|
+
"chat_style": self.chat_style,
|
|
212
|
+
"chatHistory": json.dumps(chat_history),
|
|
213
|
+
"model": self.model,
|
|
214
|
+
"hacker_is_stinky": "very_stinky",
|
|
215
|
+
"enabled_tools": json.dumps(self.enabled_tools)
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
# Always perform non-streaming request
|
|
219
|
+
try:
|
|
220
|
+
# Make request with curl_cffi
|
|
221
|
+
response = self.session.post(
|
|
222
|
+
self.url,
|
|
223
|
+
data=data,
|
|
224
|
+
timeout=self.timeout,
|
|
225
|
+
impersonate="chrome110"
|
|
226
|
+
)
|
|
227
|
+
response.raise_for_status()
|
|
228
|
+
|
|
229
|
+
# Get response text
|
|
230
|
+
result = response.text.strip()
|
|
231
|
+
|
|
232
|
+
# Update last response and conversation history
|
|
233
|
+
self.last_response = {"text": result}
|
|
234
|
+
self.conversation.update_chat_history(prompt, result)
|
|
235
|
+
|
|
236
|
+
if stream:
|
|
237
|
+
# Fake streaming: yield the full response in one chunk
|
|
238
|
+
if raw:
|
|
239
|
+
yield result
|
|
240
|
+
else:
|
|
241
|
+
yield self.last_response
|
|
242
|
+
else:
|
|
243
|
+
return self.last_response if not raw else result
|
|
244
|
+
|
|
245
|
+
except CurlError as e:
|
|
246
|
+
raise exceptions.FailedToGenerateResponseError(f"DeepAI API request failed (CurlError): {str(e)}")
|
|
247
|
+
except Exception as e:
|
|
248
|
+
raise exceptions.FailedToGenerateResponseError(f"DeepAI API request failed ({type(e).__name__}): {str(e)}")
|
|
249
|
+
|
|
250
|
+
def chat(
|
|
251
|
+
self,
|
|
252
|
+
prompt: str,
|
|
253
|
+
stream: bool = False,
|
|
254
|
+
optimizer: Optional[str] = None,
|
|
255
|
+
conversationally: bool = False,
|
|
256
|
+
raw: bool = False,
|
|
257
|
+
**kwargs
|
|
258
|
+
) -> Union[str, Iterator[str]]:
|
|
259
|
+
"""
|
|
260
|
+
Send a chat message to DeepAI and get the response.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
prompt: The prompt to send
|
|
264
|
+
stream: Whether to stream the response (fake streaming: yields full response in one chunk)
|
|
265
|
+
optimizer: Optimizer to use
|
|
266
|
+
conversationally: Whether to apply optimizer to full conversation
|
|
267
|
+
raw: Whether to return raw response
|
|
268
|
+
**kwargs: Additional arguments
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
The AI response as a string or generator for streaming
|
|
272
|
+
"""
|
|
273
|
+
if stream:
|
|
274
|
+
for resp in self.ask(
|
|
275
|
+
prompt=prompt,
|
|
276
|
+
stream=True,
|
|
277
|
+
raw=raw,
|
|
278
|
+
optimizer=optimizer,
|
|
279
|
+
conversationally=conversationally,
|
|
280
|
+
**kwargs
|
|
281
|
+
):
|
|
282
|
+
if raw:
|
|
283
|
+
yield resp
|
|
284
|
+
else:
|
|
285
|
+
yield self.get_message(resp)
|
|
286
|
+
else:
|
|
287
|
+
response = self.ask(
|
|
288
|
+
prompt=prompt,
|
|
289
|
+
stream=False,
|
|
290
|
+
raw=raw,
|
|
291
|
+
optimizer=optimizer,
|
|
292
|
+
conversationally=conversationally,
|
|
293
|
+
**kwargs
|
|
294
|
+
)
|
|
295
|
+
if raw:
|
|
296
|
+
return response
|
|
297
|
+
else:
|
|
298
|
+
return self.get_message(response)
|
|
299
|
+
|
|
300
|
+
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
301
|
+
"""
|
|
302
|
+
Extract the message from the response.
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
response: Response dictionary from ask method or str if raw
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
The message text
|
|
309
|
+
"""
|
|
310
|
+
if isinstance(response, dict):
|
|
311
|
+
return response.get("text", "")
|
|
312
|
+
elif isinstance(response, str):
|
|
313
|
+
return response
|
|
314
|
+
else:
|
|
315
|
+
raise ValueError(f"Unexpected response type: {type(response)}")
|
|
316
|
+
|
|
317
|
+
@classmethod
|
|
318
|
+
def get_models(cls) -> List[str]:
|
|
319
|
+
"""
|
|
320
|
+
Get available models.
|
|
321
|
+
|
|
322
|
+
Returns:
|
|
323
|
+
List of available model names
|
|
324
|
+
"""
|
|
325
|
+
return cls.AVAILABLE_MODELS
|
|
326
|
+
|
|
327
|
+
@classmethod
|
|
328
|
+
def get_chat_styles(cls) -> List[str]:
|
|
329
|
+
"""
|
|
330
|
+
Get available chat styles.
|
|
331
|
+
|
|
332
|
+
Returns:
|
|
333
|
+
List of available chat styles
|
|
334
|
+
"""
|
|
335
|
+
return ["chat"]
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
if __name__ == "__main__":
|
|
339
|
+
# Test similar to other providers, using stream=True for consistency
|
|
340
|
+
print("-" * 80)
|
|
341
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
342
|
+
print("-" * 80)
|
|
343
|
+
|
|
344
|
+
for model in DeepAI.AVAILABLE_MODELS:
|
|
345
|
+
try:
|
|
346
|
+
test_ai = DeepAI(model=model, timeout=60)
|
|
347
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
348
|
+
response_text = ""
|
|
349
|
+
for chunk in response:
|
|
350
|
+
response_text += chunk
|
|
351
|
+
|
|
352
|
+
if response_text and len(response_text.strip()) > 0:
|
|
353
|
+
status = "✓"
|
|
354
|
+
# Clean and truncate response
|
|
355
|
+
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
356
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
357
|
+
else:
|
|
358
|
+
status = "✗"
|
|
359
|
+
display_text = "Empty or invalid response"
|
|
360
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
361
|
+
except Exception as e:
|
|
362
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Gradient Network Chat API Provider
|
|
3
|
+
Reverse engineered from https://chat.gradient.network/
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from curl_cffi.requests import Session
|
|
7
|
+
from curl_cffi import CurlError
|
|
8
|
+
from typing import Optional, Generator, Dict, Any, Union
|
|
9
|
+
|
|
10
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
|
|
11
|
+
from webscout.AIbase import Provider
|
|
12
|
+
from webscout import exceptions
|
|
13
|
+
from webscout.litagent import LitAgent
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Gradient(Provider):
|
|
17
|
+
"""
|
|
18
|
+
Provider for Gradient Network chat API
|
|
19
|
+
Supports real-time streaming responses from distributed GPU clusters
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
required_auth = False
|
|
23
|
+
AVAILABLE_MODELS = [
|
|
24
|
+
"GPT-OSS-120B",
|
|
25
|
+
"Qwen3-235B",
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
model: str = "GPT-OSS-120B",
|
|
31
|
+
is_conversation: bool = True,
|
|
32
|
+
max_tokens: int = 2049,
|
|
33
|
+
timeout: int = 30,
|
|
34
|
+
intro: str = None,
|
|
35
|
+
filepath: str = None,
|
|
36
|
+
update_file: bool = True,
|
|
37
|
+
proxies: dict = {},
|
|
38
|
+
history_offset: int = 10250,
|
|
39
|
+
act: str = None,
|
|
40
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
41
|
+
cluster_mode: str = "nvidia",
|
|
42
|
+
enable_thinking: bool = True,
|
|
43
|
+
):
|
|
44
|
+
if model not in self.AVAILABLE_MODELS:
|
|
45
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
46
|
+
|
|
47
|
+
self.model = model
|
|
48
|
+
self.is_conversation = is_conversation
|
|
49
|
+
self.max_tokens_to_sample = max_tokens
|
|
50
|
+
self.timeout = timeout
|
|
51
|
+
self.proxies = proxies
|
|
52
|
+
self.system_prompt = system_prompt
|
|
53
|
+
self.cluster_mode = cluster_mode
|
|
54
|
+
self.enable_thinking = enable_thinking
|
|
55
|
+
|
|
56
|
+
self.session = Session()
|
|
57
|
+
self.session.proxies = proxies
|
|
58
|
+
|
|
59
|
+
self.agent = LitAgent()
|
|
60
|
+
self.fingerprint = self.agent.generate_fingerprint("chrome")
|
|
61
|
+
|
|
62
|
+
self.headers = {
|
|
63
|
+
"User-Agent": self.fingerprint.get("user_agent", ""),
|
|
64
|
+
"Accept": "*/*",
|
|
65
|
+
"Accept-Language": self.fingerprint.get("accept_language", ""),
|
|
66
|
+
"Content-Type": "application/json",
|
|
67
|
+
"Origin": "https://chat.gradient.network",
|
|
68
|
+
"Referer": "https://chat.gradient.network/",
|
|
69
|
+
"Sec-Fetch-Dest": "empty",
|
|
70
|
+
"Sec-Fetch-Mode": "cors",
|
|
71
|
+
"Sec-Fetch-Site": "same-origin",
|
|
72
|
+
}
|
|
73
|
+
self.session.headers.update(self.headers)
|
|
74
|
+
|
|
75
|
+
self.__available_optimizers = (
|
|
76
|
+
method
|
|
77
|
+
for method in dir(Optimizers)
|
|
78
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
79
|
+
)
|
|
80
|
+
Conversation.intro = (
|
|
81
|
+
AwesomePrompts().get_act(
|
|
82
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
83
|
+
)
|
|
84
|
+
if act
|
|
85
|
+
else intro or Conversation.intro
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
self.conversation = Conversation(
|
|
89
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
90
|
+
)
|
|
91
|
+
self.conversation.history_offset = history_offset
|
|
92
|
+
|
|
93
|
+
def ask(
|
|
94
|
+
self,
|
|
95
|
+
prompt: str,
|
|
96
|
+
stream: bool = False,
|
|
97
|
+
raw: bool = False,
|
|
98
|
+
optimizer: str = None,
|
|
99
|
+
conversationally: bool = False,
|
|
100
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
101
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
102
|
+
if optimizer:
|
|
103
|
+
if optimizer in self.__available_optimizers:
|
|
104
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
105
|
+
conversation_prompt if conversationally else prompt
|
|
106
|
+
)
|
|
107
|
+
else:
|
|
108
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
109
|
+
|
|
110
|
+
messages = [
|
|
111
|
+
{"role": "system", "content": self.system_prompt},
|
|
112
|
+
{"role": "user", "content": conversation_prompt},
|
|
113
|
+
]
|
|
114
|
+
|
|
115
|
+
payload = {
|
|
116
|
+
"model": self.model,
|
|
117
|
+
"clusterMode": self.cluster_mode,
|
|
118
|
+
"messages": messages,
|
|
119
|
+
"enableThinking": self.enable_thinking,
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
def for_stream():
|
|
123
|
+
streaming_text = ""
|
|
124
|
+
try:
|
|
125
|
+
response = self.session.post(
|
|
126
|
+
"https://chat.gradient.network/api/generate",
|
|
127
|
+
json=payload,
|
|
128
|
+
stream=True,
|
|
129
|
+
timeout=self.timeout,
|
|
130
|
+
impersonate="chrome110",
|
|
131
|
+
)
|
|
132
|
+
response.raise_for_status()
|
|
133
|
+
|
|
134
|
+
processed_stream = sanitize_stream(
|
|
135
|
+
data=response.iter_content(chunk_size=None),
|
|
136
|
+
intro_value=None,
|
|
137
|
+
to_json=True,
|
|
138
|
+
skip_markers=[],
|
|
139
|
+
content_extractor=self._Gradient_extractor,
|
|
140
|
+
yield_raw_on_error=False,
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
for content_chunk in processed_stream:
|
|
144
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
145
|
+
streaming_text += content_chunk
|
|
146
|
+
resp = dict(text=content_chunk)
|
|
147
|
+
yield resp if not raw else content_chunk
|
|
148
|
+
|
|
149
|
+
except CurlError as e:
|
|
150
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
151
|
+
except Exception as e:
|
|
152
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
|
|
153
|
+
finally:
|
|
154
|
+
if streaming_text:
|
|
155
|
+
self.last_response = {"text": streaming_text}
|
|
156
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
157
|
+
|
|
158
|
+
def for_non_stream():
|
|
159
|
+
try:
|
|
160
|
+
full_response = ""
|
|
161
|
+
for chunk in for_stream():
|
|
162
|
+
full_response += self.get_message(chunk)
|
|
163
|
+
|
|
164
|
+
self.last_response = {"text": full_response}
|
|
165
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
166
|
+
return self.last_response if not raw else full_response
|
|
167
|
+
|
|
168
|
+
except Exception as e:
|
|
169
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
|
|
170
|
+
|
|
171
|
+
return for_stream() if stream else for_non_stream()
|
|
172
|
+
|
|
173
|
+
def chat(
|
|
174
|
+
self,
|
|
175
|
+
prompt: str,
|
|
176
|
+
stream: bool = False,
|
|
177
|
+
optimizer: str = None,
|
|
178
|
+
conversationally: bool = False,
|
|
179
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
180
|
+
def for_stream_chat():
|
|
181
|
+
gen = self.ask(
|
|
182
|
+
prompt, stream=True, raw=False,
|
|
183
|
+
optimizer=optimizer, conversationally=conversationally
|
|
184
|
+
)
|
|
185
|
+
for response_dict in gen:
|
|
186
|
+
yield self.get_message(response_dict)
|
|
187
|
+
|
|
188
|
+
def for_non_stream_chat():
|
|
189
|
+
response_data = self.ask(
|
|
190
|
+
prompt, stream=False, raw=False,
|
|
191
|
+
optimizer=optimizer, conversationally=conversationally
|
|
192
|
+
)
|
|
193
|
+
return self.get_message(response_data)
|
|
194
|
+
|
|
195
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
196
|
+
|
|
197
|
+
def get_message(self, response: dict) -> str:
|
|
198
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
199
|
+
return response.get("text", "")
|
|
200
|
+
|
|
201
|
+
@staticmethod
|
|
202
|
+
def _Gradient_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
203
|
+
if isinstance(chunk, dict):
|
|
204
|
+
chunk_type = chunk.get("type")
|
|
205
|
+
if chunk_type == "reply":
|
|
206
|
+
return chunk.get("data", {}).get("reasoningContent", "")
|
|
207
|
+
return None
|
|
208
|
+
|
|
209
|
+
if __name__ == "__main__":
|
|
210
|
+
print("-" * 80)
|
|
211
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
212
|
+
print("-" * 80)
|
|
213
|
+
|
|
214
|
+
for model in Gradient.AVAILABLE_MODELS:
|
|
215
|
+
try:
|
|
216
|
+
test_ai = Gradient(model=model, timeout=60)
|
|
217
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
218
|
+
response_text = ""
|
|
219
|
+
for chunk in response:
|
|
220
|
+
response_text += chunk
|
|
221
|
+
|
|
222
|
+
if response_text and len(response_text.strip()) > 0:
|
|
223
|
+
status = "v"
|
|
224
|
+
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
225
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
226
|
+
else:
|
|
227
|
+
status = "x"
|
|
228
|
+
display_text = "Empty or invalid response"
|
|
229
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
230
|
+
except Exception as e:
|
|
231
|
+
print(f"\r{model:<50} {'x':<10} {str(e)}")
|