webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/Groq.py
CHANGED
|
@@ -1,12 +1,15 @@
|
|
|
1
1
|
from typing import Any, AsyncGenerator, Dict, Optional, Callable, List, Union
|
|
2
2
|
|
|
3
3
|
import httpx
|
|
4
|
-
import requests
|
|
5
4
|
import json
|
|
6
5
|
|
|
6
|
+
# Import curl_cffi for improved request handling
|
|
7
|
+
from curl_cffi.requests import Session
|
|
8
|
+
from curl_cffi import CurlError
|
|
9
|
+
|
|
7
10
|
from webscout.AIutel import Optimizers
|
|
8
11
|
from webscout.AIutel import Conversation
|
|
9
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
12
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
10
13
|
from webscout.AIbase import Provider, AsyncProvider
|
|
11
14
|
from webscout import exceptions
|
|
12
15
|
|
|
@@ -15,6 +18,7 @@ class GROQ(Provider):
|
|
|
15
18
|
A class to interact with the GROQ AI API.
|
|
16
19
|
"""
|
|
17
20
|
|
|
21
|
+
# Default models list (will be updated dynamically)
|
|
18
22
|
AVAILABLE_MODELS = [
|
|
19
23
|
"distil-whisper-large-v3-en",
|
|
20
24
|
"gemma2-9b-it",
|
|
@@ -42,6 +46,45 @@ class GROQ(Provider):
|
|
|
42
46
|
"llama-3.2-90b-vision-preview",
|
|
43
47
|
"mixtral-8x7b-32768"
|
|
44
48
|
]
|
|
49
|
+
|
|
50
|
+
@classmethod
|
|
51
|
+
def get_models(cls, api_key: str = None):
|
|
52
|
+
"""Fetch available models from Groq API.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
api_key (str, optional): Groq API key. If not provided, returns default models.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
list: List of available model IDs
|
|
59
|
+
"""
|
|
60
|
+
if not api_key:
|
|
61
|
+
return cls.AVAILABLE_MODELS
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
# Use a temporary curl_cffi session for this class method
|
|
65
|
+
temp_session = Session()
|
|
66
|
+
headers = {
|
|
67
|
+
"Content-Type": "application/json",
|
|
68
|
+
"Authorization": f"Bearer {api_key}",
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
response = temp_session.get(
|
|
72
|
+
"https://api.groq.com/openai/v1/models",
|
|
73
|
+
headers=headers,
|
|
74
|
+
impersonate="chrome110" # Use impersonate for fetching
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
if response.status_code != 200:
|
|
78
|
+
return cls.AVAILABLE_MODELS
|
|
79
|
+
|
|
80
|
+
data = response.json()
|
|
81
|
+
if "data" in data and isinstance(data["data"], list):
|
|
82
|
+
return [model["id"] for model in data["data"]]
|
|
83
|
+
return cls.AVAILABLE_MODELS
|
|
84
|
+
|
|
85
|
+
except (CurlError, Exception):
|
|
86
|
+
# Fallback to default models list if fetching fails
|
|
87
|
+
return cls.AVAILABLE_MODELS
|
|
45
88
|
|
|
46
89
|
def __init__(
|
|
47
90
|
self,
|
|
@@ -82,10 +125,15 @@ class GROQ(Provider):
|
|
|
82
125
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
83
126
|
system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
|
|
84
127
|
"""
|
|
128
|
+
# Update available models from API
|
|
129
|
+
self.update_available_models(api_key)
|
|
130
|
+
|
|
131
|
+
# Validate model after updating available models
|
|
85
132
|
if model not in self.AVAILABLE_MODELS:
|
|
86
133
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
87
134
|
|
|
88
|
-
|
|
135
|
+
# Initialize curl_cffi Session
|
|
136
|
+
self.session = Session()
|
|
89
137
|
self.is_conversation = is_conversation
|
|
90
138
|
self.max_tokens_to_sample = max_tokens
|
|
91
139
|
self.api_key = api_key
|
|
@@ -110,7 +158,11 @@ class GROQ(Provider):
|
|
|
110
158
|
for method in dir(Optimizers)
|
|
111
159
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
112
160
|
)
|
|
161
|
+
|
|
162
|
+
# Update curl_cffi session headers
|
|
113
163
|
self.session.headers.update(self.headers)
|
|
164
|
+
|
|
165
|
+
# Set up conversation
|
|
114
166
|
Conversation.intro = (
|
|
115
167
|
AwesomePrompts().get_act(
|
|
116
168
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -122,7 +174,28 @@ class GROQ(Provider):
|
|
|
122
174
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
123
175
|
)
|
|
124
176
|
self.conversation.history_offset = history_offset
|
|
177
|
+
|
|
178
|
+
# Set proxies for curl_cffi session
|
|
125
179
|
self.session.proxies = proxies
|
|
180
|
+
|
|
181
|
+
@staticmethod
|
|
182
|
+
def _groq_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[Dict]:
|
|
183
|
+
"""Extracts the 'delta' object from Groq stream JSON chunks."""
|
|
184
|
+
if isinstance(chunk, dict):
|
|
185
|
+
# Return the delta object itself, or None if not found
|
|
186
|
+
return chunk.get("choices", [{}])[0].get("delta")
|
|
187
|
+
return None
|
|
188
|
+
|
|
189
|
+
@classmethod
|
|
190
|
+
def update_available_models(cls, api_key=None):
|
|
191
|
+
"""Update the available models list from Groq API"""
|
|
192
|
+
try:
|
|
193
|
+
models = cls.get_models(api_key)
|
|
194
|
+
if models and len(models) > 0:
|
|
195
|
+
cls.AVAILABLE_MODELS = models
|
|
196
|
+
except Exception:
|
|
197
|
+
# Fallback to default models list if fetching fails
|
|
198
|
+
pass
|
|
126
199
|
|
|
127
200
|
def add_function(self, function_name: str, function: Callable):
|
|
128
201
|
"""Add a function to the available functions dictionary.
|
|
@@ -183,32 +256,45 @@ class GROQ(Provider):
|
|
|
183
256
|
}
|
|
184
257
|
|
|
185
258
|
def for_stream():
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
259
|
+
try:
|
|
260
|
+
response = self.session.post(
|
|
261
|
+
self.chat_endpoint,
|
|
262
|
+
json=payload,
|
|
263
|
+
stream=True,
|
|
264
|
+
timeout=self.timeout,
|
|
265
|
+
impersonate="chrome110" # Use impersonate for better compatibility
|
|
192
266
|
)
|
|
267
|
+
if not response.status_code == 200:
|
|
268
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
269
|
+
# Removed response.reason_phrase
|
|
270
|
+
f"Failed to generate response - ({response.status_code}) - {response.text}"
|
|
271
|
+
)
|
|
193
272
|
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
273
|
+
streaming_text = ""
|
|
274
|
+
# Use sanitize_stream
|
|
275
|
+
processed_stream = sanitize_stream(
|
|
276
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
277
|
+
intro_value="data:",
|
|
278
|
+
to_json=True, # Stream sends JSON
|
|
279
|
+
content_extractor=self._groq_extractor, # Use the delta extractor
|
|
280
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
for delta in processed_stream:
|
|
284
|
+
# delta is the extracted 'delta' object or None
|
|
285
|
+
if delta and isinstance(delta, dict):
|
|
286
|
+
content = delta.get("content")
|
|
287
|
+
if content:
|
|
288
|
+
streaming_text += content
|
|
289
|
+
resp = {"text": content} # Yield only the new chunk text
|
|
290
|
+
self.last_response = {"choices": [{"delta": {"content": streaming_text}}]} # Update last_response structure
|
|
291
|
+
yield resp if not raw else content # Yield dict or raw string chunk
|
|
292
|
+
# Note: Tool calls in streaming delta are less common in OpenAI format, usually in final message
|
|
293
|
+
|
|
294
|
+
except CurlError as e:
|
|
295
|
+
raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
|
|
296
|
+
except Exception as e:
|
|
297
|
+
raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
|
|
212
298
|
|
|
213
299
|
# Handle tool calls if any
|
|
214
300
|
if 'tool_calls' in self.last_response.get('choices', [{}])[0].get('message', {}):
|
|
@@ -226,32 +312,71 @@ class GROQ(Provider):
|
|
|
226
312
|
})
|
|
227
313
|
payload['messages'] = messages
|
|
228
314
|
# Make a second call to get the final response
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
236
|
-
f"Failed to execute tool - {second_response.text}"
|
|
315
|
+
try:
|
|
316
|
+
second_response = self.session.post(
|
|
317
|
+
self.chat_endpoint,
|
|
318
|
+
json=payload,
|
|
319
|
+
timeout=self.timeout,
|
|
320
|
+
impersonate="chrome110" # Use impersonate for better compatibility
|
|
237
321
|
)
|
|
322
|
+
if second_response.status_code == 200:
|
|
323
|
+
self.last_response = second_response.json()
|
|
324
|
+
else:
|
|
325
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
326
|
+
f"Failed to execute tool - {second_response.text}"
|
|
327
|
+
)
|
|
328
|
+
except CurlError as e:
|
|
329
|
+
raise exceptions.FailedToGenerateResponseError(f"CurlError during tool execution: {str(e)}")
|
|
330
|
+
except Exception as e:
|
|
331
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during tool execution: {str(e)}")
|
|
238
332
|
|
|
239
333
|
self.conversation.update_chat_history(
|
|
240
334
|
prompt, self.get_message(self.last_response)
|
|
241
335
|
)
|
|
242
336
|
|
|
243
337
|
def for_non_stream():
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
252
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
338
|
+
try:
|
|
339
|
+
response = self.session.post(
|
|
340
|
+
self.chat_endpoint,
|
|
341
|
+
json=payload,
|
|
342
|
+
stream=False,
|
|
343
|
+
timeout=self.timeout,
|
|
344
|
+
impersonate="chrome110" # Use impersonate for better compatibility
|
|
253
345
|
)
|
|
254
|
-
|
|
346
|
+
if (
|
|
347
|
+
not response.status_code == 200
|
|
348
|
+
):
|
|
349
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
350
|
+
# Removed response.reason_phrase
|
|
351
|
+
f"Failed to generate response - ({response.status_code}) - {response.text}"
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
response_text = response.text # Get raw text
|
|
355
|
+
|
|
356
|
+
# Use sanitize_stream to parse the non-streaming JSON response
|
|
357
|
+
processed_stream = sanitize_stream(
|
|
358
|
+
data=response_text,
|
|
359
|
+
to_json=True, # Parse the whole text as JSON
|
|
360
|
+
intro_value=None,
|
|
361
|
+
# Extractor for non-stream structure (returns the whole parsed dict)
|
|
362
|
+
content_extractor=lambda chunk: chunk if isinstance(chunk, dict) else None,
|
|
363
|
+
yield_raw_on_error=False
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
# Extract the single result (the parsed JSON dictionary)
|
|
367
|
+
resp = next(processed_stream, None)
|
|
368
|
+
if resp is None:
|
|
369
|
+
raise exceptions.FailedToGenerateResponseError("Failed to parse non-stream JSON response")
|
|
370
|
+
|
|
371
|
+
except CurlError as e:
|
|
372
|
+
raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
|
|
373
|
+
except Exception as e:
|
|
374
|
+
# Catch the original AttributeError here if it happens before the raise
|
|
375
|
+
if isinstance(e, AttributeError) and 'reason_phrase' in str(e):
|
|
376
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
377
|
+
f"Failed to generate response - ({response.status_code}) - {response.text}"
|
|
378
|
+
)
|
|
379
|
+
raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
|
|
255
380
|
|
|
256
381
|
# Handle tool calls if any
|
|
257
382
|
if 'tool_calls' in resp.get('choices', [{}])[0].get('message', {}):
|
|
@@ -269,15 +394,23 @@ class GROQ(Provider):
|
|
|
269
394
|
})
|
|
270
395
|
payload['messages'] = messages
|
|
271
396
|
# Make a second call to get the final response
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
279
|
-
f"Failed to execute tool - {second_response.text}"
|
|
397
|
+
try:
|
|
398
|
+
second_response = self.session.post(
|
|
399
|
+
self.chat_endpoint,
|
|
400
|
+
json=payload,
|
|
401
|
+
timeout=self.timeout,
|
|
402
|
+
impersonate="chrome110" # Use impersonate for better compatibility
|
|
280
403
|
)
|
|
404
|
+
if second_response.status_code == 200:
|
|
405
|
+
resp = second_response.json()
|
|
406
|
+
else:
|
|
407
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
408
|
+
f"Failed to execute tool - {second_response.text}"
|
|
409
|
+
)
|
|
410
|
+
except CurlError as e:
|
|
411
|
+
raise exceptions.FailedToGenerateResponseError(f"CurlError during tool execution: {str(e)}")
|
|
412
|
+
except Exception as e:
|
|
413
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during tool execution: {str(e)}")
|
|
281
414
|
|
|
282
415
|
self.last_response.update(resp)
|
|
283
416
|
self.conversation.update_chat_history(
|
|
@@ -287,7 +420,6 @@ class GROQ(Provider):
|
|
|
287
420
|
|
|
288
421
|
return for_stream() if stream else for_non_stream()
|
|
289
422
|
|
|
290
|
-
|
|
291
423
|
def chat(
|
|
292
424
|
self,
|
|
293
425
|
prompt: str,
|
|
@@ -337,11 +469,16 @@ class GROQ(Provider):
|
|
|
337
469
|
"""
|
|
338
470
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
339
471
|
try:
|
|
340
|
-
|
|
472
|
+
# Check delta first for streaming
|
|
473
|
+
if response.get("choices") and response["choices"][0].get("delta") and response["choices"][0]["delta"].get("content"):
|
|
341
474
|
return response["choices"][0]["delta"]["content"]
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
475
|
+
# Check message content for non-streaming or final message
|
|
476
|
+
if response.get("choices") and response["choices"][0].get("message") and response["choices"][0]["message"].get("content"):
|
|
477
|
+
return response["choices"][0]["message"]["content"]
|
|
478
|
+
except (KeyError, IndexError, TypeError):
|
|
479
|
+
# Handle cases where the structure might be different or content is null/missing
|
|
480
|
+
pass
|
|
481
|
+
return "" # Return empty string if no content found
|
|
345
482
|
|
|
346
483
|
|
|
347
484
|
class AsyncGROQ(AsyncProvider):
|
|
@@ -349,33 +486,8 @@ class AsyncGROQ(AsyncProvider):
|
|
|
349
486
|
An asynchronous class to interact with the GROQ AI API.
|
|
350
487
|
"""
|
|
351
488
|
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
"gemma2-9b-it",
|
|
355
|
-
"llama-3.3-70b-versatile",
|
|
356
|
-
"llama-3.1-8b-instant",
|
|
357
|
-
"llama-guard-3-8b",
|
|
358
|
-
"llama3-70b-8192",
|
|
359
|
-
"llama3-8b-8192",
|
|
360
|
-
"whisper-large-v3",
|
|
361
|
-
"whisper-large-v3-turbo",
|
|
362
|
-
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
363
|
-
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
364
|
-
"playai-tts",
|
|
365
|
-
"playai-tts-arabic",
|
|
366
|
-
"qwen-qwq-32b",
|
|
367
|
-
"mistral-saba-24b",
|
|
368
|
-
"qwen-2.5-coder-32b",
|
|
369
|
-
"qwen-2.5-32b",
|
|
370
|
-
"deepseek-r1-distill-qwen-32b",
|
|
371
|
-
"deepseek-r1-distill-llama-70b",
|
|
372
|
-
"llama-3.3-70b-specdec",
|
|
373
|
-
"llama-3.2-1b-preview",
|
|
374
|
-
"llama-3.2-3b-preview",
|
|
375
|
-
"llama-3.2-11b-vision-preview",
|
|
376
|
-
"llama-3.2-90b-vision-preview",
|
|
377
|
-
"mixtral-8x7b-32768"
|
|
378
|
-
]
|
|
489
|
+
# Use the same model list as the synchronous class
|
|
490
|
+
AVAILABLE_MODELS = GROQ.AVAILABLE_MODELS
|
|
379
491
|
|
|
380
492
|
def __init__(
|
|
381
493
|
self,
|
|
@@ -416,6 +528,10 @@ class AsyncGROQ(AsyncProvider):
|
|
|
416
528
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
417
529
|
system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
|
|
418
530
|
"""
|
|
531
|
+
# Update available models from API
|
|
532
|
+
GROQ.update_available_models(api_key)
|
|
533
|
+
|
|
534
|
+
# Validate model after updating available models
|
|
419
535
|
if model not in self.AVAILABLE_MODELS:
|
|
420
536
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
421
537
|
|
|
@@ -518,7 +634,8 @@ class AsyncGROQ(AsyncProvider):
|
|
|
518
634
|
) as response:
|
|
519
635
|
if not response.is_success:
|
|
520
636
|
raise exceptions.FailedToGenerateResponseError(
|
|
521
|
-
|
|
637
|
+
# Removed response.reason_phrase (not available in httpx response)
|
|
638
|
+
f"Failed to generate response - ({response.status_code})"
|
|
522
639
|
)
|
|
523
640
|
|
|
524
641
|
message_load = ""
|
|
@@ -575,7 +692,8 @@ class AsyncGROQ(AsyncProvider):
|
|
|
575
692
|
)
|
|
576
693
|
if not response.is_success:
|
|
577
694
|
raise exceptions.FailedToGenerateResponseError(
|
|
578
|
-
|
|
695
|
+
# Removed response.reason_phrase (not available in httpx response)
|
|
696
|
+
f"Failed to generate response - ({response.status_code})"
|
|
579
697
|
)
|
|
580
698
|
resp = response.json()
|
|
581
699
|
|
|
@@ -663,8 +781,21 @@ class AsyncGROQ(AsyncProvider):
|
|
|
663
781
|
"""
|
|
664
782
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
665
783
|
try:
|
|
666
|
-
|
|
784
|
+
# Check delta first for streaming
|
|
785
|
+
if response.get("choices") and response["choices"][0].get("delta") and response["choices"][0]["delta"].get("content"):
|
|
667
786
|
return response["choices"][0]["delta"]["content"]
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
787
|
+
# Check message content for non-streaming or final message
|
|
788
|
+
if response.get("choices") and response["choices"][0].get("message") and response["choices"][0]["message"].get("content"):
|
|
789
|
+
return response["choices"][0]["message"]["content"]
|
|
790
|
+
except (KeyError, IndexError, TypeError):
|
|
791
|
+
# Handle cases where the structure might be different or content is null/missing
|
|
792
|
+
pass
|
|
793
|
+
return "" # Return empty string if no content found
|
|
794
|
+
|
|
795
|
+
if __name__ == "__main__":
|
|
796
|
+
# Example usage
|
|
797
|
+
api_key = "gsk_*******************************"
|
|
798
|
+
groq = GROQ(api_key=api_key, model="compound-beta")
|
|
799
|
+
prompt = "What is the capital of France?"
|
|
800
|
+
response = groq.chat(prompt)
|
|
801
|
+
print(response)
|