webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +13 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +4 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +6 -8
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +52 -57
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -56
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +12 -6
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +9 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -14
- webscout/Provider/OPENAI/toolbaz.py +14 -10
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +10 -9
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +18 -11
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -15
- webscout/Provider/TogetherAI.py +136 -142
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -174
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +194 -38
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -11
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/conversation.py +22 -20
- webscout/sanitize.py +14 -10
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/copilot.py +0 -305
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -422
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
webscout/Provider/cerebras.py
CHANGED
|
@@ -1,34 +1,101 @@
|
|
|
1
|
-
|
|
2
1
|
import re
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
# Import trio before curl_cffi to prevent eventlet socket monkey-patching conflicts
|
|
5
|
+
# See: https://github.com/python-trio/trio/issues/3015
|
|
6
|
+
try:
|
|
7
|
+
import trio # noqa: F401
|
|
8
|
+
except ImportError:
|
|
9
|
+
pass # trio is optional, ignore if not available
|
|
10
|
+
import json
|
|
11
|
+
from typing import Any, Dict, Generator, List, Optional, Union
|
|
12
|
+
|
|
3
13
|
import curl_cffi
|
|
4
14
|
from curl_cffi.requests import Session
|
|
5
|
-
|
|
6
|
-
import os
|
|
7
|
-
from typing import Any, Dict, Optional, Generator, List, Union
|
|
8
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
|
-
from webscout.AIbase import Provider
|
|
15
|
+
|
|
10
16
|
from webscout import exceptions
|
|
17
|
+
from webscout.AIbase import Provider
|
|
18
|
+
from webscout.AIutel import ( # Import sanitize_stream
|
|
19
|
+
AwesomePrompts,
|
|
20
|
+
Conversation,
|
|
21
|
+
Optimizers,
|
|
22
|
+
sanitize_stream,
|
|
23
|
+
)
|
|
11
24
|
from webscout.litagent import LitAgent as UserAgent
|
|
12
25
|
|
|
26
|
+
|
|
13
27
|
class Cerebras(Provider):
|
|
14
28
|
"""
|
|
15
29
|
A class to interact with the Cerebras API using a cookie for authentication.
|
|
16
30
|
"""
|
|
17
|
-
|
|
31
|
+
required_auth = True
|
|
18
32
|
AVAILABLE_MODELS = [
|
|
19
|
-
"
|
|
33
|
+
"qwen-3-coder-480b",
|
|
34
|
+
"qwen-3-235b-a22b-instruct-2507",
|
|
35
|
+
"qwen-3-235b-a22b-thinking-2507",
|
|
36
|
+
"qwen-3-32b",
|
|
20
37
|
"llama-3.3-70b",
|
|
21
|
-
"
|
|
38
|
+
"llama-4-maverick-17b-128e-instruct",
|
|
39
|
+
"gpt-oss-120b",
|
|
22
40
|
"llama-4-scout-17b-16e-instruct",
|
|
23
|
-
"
|
|
41
|
+
"llama3.1-8b"
|
|
42
|
+
]
|
|
24
43
|
|
|
44
|
+
@classmethod
|
|
45
|
+
def get_models(cls, api_key: str = None):
|
|
46
|
+
"""Fetch available models from Cerebras API.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
api_key (str, optional): Cerebras API key. If not provided, returns default models.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
list: List of available model IDs
|
|
53
|
+
"""
|
|
54
|
+
if not api_key:
|
|
55
|
+
return cls.AVAILABLE_MODELS
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
# Use a temporary curl_cffi session for this class method
|
|
59
|
+
temp_session = Session()
|
|
60
|
+
headers = {
|
|
61
|
+
"Content-Type": "application/json",
|
|
62
|
+
"Authorization": f"Bearer {api_key}",
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
response = temp_session.get(
|
|
66
|
+
"https://api.cerebras.ai/v1/models",
|
|
67
|
+
headers=headers,
|
|
68
|
+
impersonate="chrome120"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
if response.status_code != 200:
|
|
72
|
+
return cls.AVAILABLE_MODELS
|
|
73
|
+
|
|
74
|
+
data = response.json()
|
|
75
|
+
if "data" in data and isinstance(data["data"], list):
|
|
76
|
+
return [model['id'] for model in data['data']]
|
|
77
|
+
return cls.AVAILABLE_MODELS
|
|
78
|
+
|
|
79
|
+
except Exception:
|
|
80
|
+
# Fallback to default models list if fetching fails
|
|
81
|
+
return cls.AVAILABLE_MODELS
|
|
25
82
|
|
|
26
|
-
|
|
83
|
+
@classmethod
|
|
84
|
+
def update_available_models(cls, api_key=None):
|
|
85
|
+
"""Update the available models list from Cerebras API"""
|
|
86
|
+
try:
|
|
87
|
+
models = cls.get_models(api_key)
|
|
88
|
+
if models and len(models) > 0:
|
|
89
|
+
cls.AVAILABLE_MODELS = models
|
|
90
|
+
except Exception:
|
|
91
|
+
# Fallback to default models list if fetching fails
|
|
92
|
+
pass
|
|
27
93
|
|
|
28
94
|
def __init__(
|
|
29
95
|
self,
|
|
96
|
+
cookie_path: str = None,
|
|
30
97
|
is_conversation: bool = True,
|
|
31
|
-
max_tokens: int =
|
|
98
|
+
max_tokens: int = 40000,
|
|
32
99
|
timeout: int = 30,
|
|
33
100
|
intro: str = None,
|
|
34
101
|
filepath: str = None,
|
|
@@ -36,31 +103,47 @@ class Cerebras(Provider):
|
|
|
36
103
|
proxies: dict = {},
|
|
37
104
|
history_offset: int = 10250,
|
|
38
105
|
act: str = None,
|
|
39
|
-
|
|
40
|
-
model: str = "
|
|
106
|
+
api_key: str = None,
|
|
107
|
+
model: str = "qwen-3-coder-480b",
|
|
41
108
|
system_prompt: str = "You are a helpful assistant.",
|
|
109
|
+
temperature: float = 0.7,
|
|
110
|
+
top_p: float = 0.8,
|
|
42
111
|
):
|
|
43
|
-
# Validate model choice
|
|
44
|
-
if model not in self.AVAILABLE_MODELS:
|
|
45
|
-
raise ValueError(
|
|
46
|
-
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
47
|
-
)
|
|
48
|
-
|
|
49
112
|
# Initialize basic settings first
|
|
50
113
|
self.timeout = timeout
|
|
51
114
|
self.model = model
|
|
52
115
|
self.system_prompt = system_prompt
|
|
53
116
|
self.is_conversation = is_conversation
|
|
54
117
|
self.max_tokens_to_sample = max_tokens
|
|
118
|
+
self.temperature = temperature
|
|
119
|
+
self.top_p = top_p
|
|
55
120
|
self.last_response = {}
|
|
56
121
|
|
|
57
122
|
self.session = Session() # Initialize curl_cffi session
|
|
58
123
|
|
|
59
|
-
#
|
|
60
|
-
|
|
61
|
-
self.api_key =
|
|
62
|
-
|
|
63
|
-
|
|
124
|
+
# Handle API key - either provided directly or retrieved from cookies
|
|
125
|
+
if api_key:
|
|
126
|
+
self.api_key = api_key.strip()
|
|
127
|
+
# Basic validation for API key format
|
|
128
|
+
if not self.api_key or len(self.api_key) < 10:
|
|
129
|
+
raise ValueError("Invalid API key format. API key must be at least 10 characters long.")
|
|
130
|
+
elif cookie_path:
|
|
131
|
+
# Get API key from cookies
|
|
132
|
+
try:
|
|
133
|
+
self.api_key = self.get_demo_api_key(cookie_path)
|
|
134
|
+
except Exception as e:
|
|
135
|
+
raise exceptions.APIConnectionError(f"Failed to initialize Cerebras client: {e}")
|
|
136
|
+
else:
|
|
137
|
+
raise ValueError("Either api_key must be provided or cookie_path must be specified")
|
|
138
|
+
|
|
139
|
+
# Update available models from API
|
|
140
|
+
self.update_available_models(self.api_key)
|
|
141
|
+
|
|
142
|
+
# Validate model choice after updating models
|
|
143
|
+
if model not in self.AVAILABLE_MODELS:
|
|
144
|
+
raise ValueError(
|
|
145
|
+
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
146
|
+
)
|
|
64
147
|
|
|
65
148
|
# Initialize optimizers
|
|
66
149
|
self.__available_optimizers = (
|
|
@@ -72,19 +155,69 @@ class Cerebras(Provider):
|
|
|
72
155
|
# Initialize conversation settings
|
|
73
156
|
Conversation.intro = (
|
|
74
157
|
AwesomePrompts().get_act(
|
|
75
|
-
act, raise_not_found=True, default=
|
|
158
|
+
act, raise_not_found=True, default="You are a helpful assistant.", case_insensitive=True
|
|
76
159
|
)
|
|
77
160
|
if act
|
|
78
|
-
else
|
|
161
|
+
else "You are a helpful assistant."
|
|
79
162
|
)
|
|
80
163
|
self.conversation = Conversation(
|
|
81
164
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
82
165
|
)
|
|
83
166
|
self.conversation.history_offset = history_offset
|
|
84
|
-
|
|
167
|
+
|
|
85
168
|
# Apply proxies to the session
|
|
86
169
|
self.session.proxies = proxies
|
|
87
170
|
|
|
171
|
+
@classmethod
|
|
172
|
+
def get_models(cls, api_key: str = None):
|
|
173
|
+
"""Fetch available models from Cerebras API.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
api_key (str, optional): Cerebras API key. If not provided, returns default models.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
list: List of available model IDs
|
|
180
|
+
"""
|
|
181
|
+
if not api_key:
|
|
182
|
+
return cls.AVAILABLE_MODELS
|
|
183
|
+
|
|
184
|
+
try:
|
|
185
|
+
# Use a temporary curl_cffi session for this class method
|
|
186
|
+
temp_session = Session()
|
|
187
|
+
headers = {
|
|
188
|
+
"Content-Type": "application/json",
|
|
189
|
+
"Authorization": f"Bearer {api_key}",
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
response = temp_session.get(
|
|
193
|
+
"https://api.cerebras.ai/v1/models",
|
|
194
|
+
headers=headers,
|
|
195
|
+
impersonate="chrome120"
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
if response.status_code != 200:
|
|
199
|
+
return cls.AVAILABLE_MODELS
|
|
200
|
+
|
|
201
|
+
data = response.json()
|
|
202
|
+
if "data" in data and isinstance(data["data"], list):
|
|
203
|
+
return [model['id'] for model in data['data']]
|
|
204
|
+
return cls.AVAILABLE_MODELS
|
|
205
|
+
|
|
206
|
+
except Exception:
|
|
207
|
+
# Fallback to default models list if fetching fails
|
|
208
|
+
return cls.AVAILABLE_MODELS
|
|
209
|
+
|
|
210
|
+
@classmethod
|
|
211
|
+
def update_available_models(cls, api_key=None):
|
|
212
|
+
"""Update the available models list from Cerebras API"""
|
|
213
|
+
try:
|
|
214
|
+
models = cls.get_models(api_key)
|
|
215
|
+
if models and len(models) > 0:
|
|
216
|
+
cls.AVAILABLE_MODELS = models
|
|
217
|
+
except Exception:
|
|
218
|
+
# Fallback to default models list if fetching fails
|
|
219
|
+
pass
|
|
220
|
+
|
|
88
221
|
# Rest of the class implementation remains the same...
|
|
89
222
|
@staticmethod
|
|
90
223
|
def extract_query(text: str) -> str:
|
|
@@ -105,8 +238,10 @@ class Cerebras(Provider):
|
|
|
105
238
|
return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
|
|
106
239
|
return None
|
|
107
240
|
|
|
108
|
-
def get_demo_api_key(self, cookie_path: str) -> str: # Keep this using requests or switch to curl_cffi
|
|
241
|
+
def get_demo_api_key(self, cookie_path: str = None) -> str: # Keep this using requests or switch to curl_cffi
|
|
109
242
|
"""Retrieves the demo API key using the provided cookie."""
|
|
243
|
+
if not cookie_path:
|
|
244
|
+
raise ValueError("cookie_path must be provided when using cookie-based authentication")
|
|
110
245
|
try:
|
|
111
246
|
with open(cookie_path, "r") as file:
|
|
112
247
|
cookies = {item["name"]: item["value"] for item in json.load(file)}
|
|
@@ -159,7 +294,10 @@ class Cerebras(Provider):
|
|
|
159
294
|
payload = {
|
|
160
295
|
"model": self.model,
|
|
161
296
|
"messages": messages,
|
|
162
|
-
"stream": stream
|
|
297
|
+
"stream": stream,
|
|
298
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
299
|
+
"temperature": self.temperature,
|
|
300
|
+
"top_p": self.top_p
|
|
163
301
|
}
|
|
164
302
|
|
|
165
303
|
try:
|
|
@@ -197,8 +335,26 @@ class Cerebras(Provider):
|
|
|
197
335
|
|
|
198
336
|
except curl_cffi.CurlError as e:
|
|
199
337
|
raise exceptions.APIConnectionError(f"Request failed (CurlError): {e}") from e
|
|
200
|
-
except Exception as e:
|
|
201
|
-
|
|
338
|
+
except Exception as e:
|
|
339
|
+
# Check if it's an HTTP error with status code
|
|
340
|
+
if hasattr(e, 'response') and hasattr(e.response, 'status_code'):
|
|
341
|
+
status_code = e.response.status_code
|
|
342
|
+
if status_code == 401:
|
|
343
|
+
raise exceptions.APIConnectionError(
|
|
344
|
+
"Authentication failed (401): Invalid API key. Please check your API key and try again."
|
|
345
|
+
) from e
|
|
346
|
+
elif status_code == 403:
|
|
347
|
+
raise exceptions.APIConnectionError(
|
|
348
|
+
"Access forbidden (403): Your API key may not have permission to access this resource."
|
|
349
|
+
) from e
|
|
350
|
+
elif status_code == 429:
|
|
351
|
+
raise exceptions.APIConnectionError(
|
|
352
|
+
"Rate limit exceeded (429): Too many requests. Please wait and try again."
|
|
353
|
+
) from e
|
|
354
|
+
else:
|
|
355
|
+
raise exceptions.APIConnectionError(f"HTTP {status_code} error: {e}") from e
|
|
356
|
+
else:
|
|
357
|
+
raise exceptions.APIConnectionError(f"Request failed: {e}") from e
|
|
202
358
|
|
|
203
359
|
def ask(
|
|
204
360
|
self,
|
|
@@ -225,7 +381,7 @@ class Cerebras(Provider):
|
|
|
225
381
|
|
|
226
382
|
try:
|
|
227
383
|
response = self._make_request(messages, stream)
|
|
228
|
-
|
|
384
|
+
|
|
229
385
|
if stream:
|
|
230
386
|
# Wrap the generator to yield dicts or raw strings
|
|
231
387
|
def stream_wrapper():
|
|
@@ -256,7 +412,7 @@ class Cerebras(Provider):
|
|
|
256
412
|
"""Chat with the model."""
|
|
257
413
|
# Ask returns a generator for stream=True, dict/str for stream=False
|
|
258
414
|
response_gen_or_dict = self.ask(prompt, stream, raw=False, optimizer=optimizer, conversationally=conversationally)
|
|
259
|
-
|
|
415
|
+
|
|
260
416
|
if stream:
|
|
261
417
|
# Wrap the generator from ask() to get message text
|
|
262
418
|
def stream_wrapper():
|
|
@@ -276,14 +432,14 @@ class Cerebras(Provider):
|
|
|
276
432
|
|
|
277
433
|
if __name__ == "__main__":
|
|
278
434
|
from rich import print
|
|
279
|
-
|
|
435
|
+
|
|
280
436
|
# Example usage
|
|
281
437
|
cerebras = Cerebras(
|
|
282
|
-
|
|
283
|
-
model='
|
|
438
|
+
api_key='csk-**********************', # Replace with your actual API key
|
|
439
|
+
model='qwen-3-235b-a22b-instruct-2507',
|
|
284
440
|
system_prompt="You are a helpful AI assistant."
|
|
285
441
|
)
|
|
286
|
-
|
|
442
|
+
|
|
287
443
|
# Test with streaming
|
|
288
444
|
response = cerebras.chat("Hello!", stream=True)
|
|
289
445
|
for chunk in response:
|