webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
webscout/Provider/OPENAI/groq.py
CHANGED
|
@@ -1,364 +1,364 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import time
|
|
4
|
-
import uuid
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import curl_cffi for improved request handling
|
|
8
|
-
from curl_cffi.requests import Session
|
|
9
|
-
from curl_cffi import CurlError
|
|
10
|
-
|
|
11
|
-
# Import base classes and utility structures
|
|
12
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
13
|
-
from .utils import (
|
|
14
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
15
|
-
ChatCompletionMessage, CompletionUsage
|
|
16
|
-
)
|
|
17
|
-
|
|
18
|
-
# Attempt to import LitAgent, fallback if not available
|
|
19
|
-
try:
|
|
20
|
-
from webscout.litagent import LitAgent
|
|
21
|
-
except ImportError:
|
|
22
|
-
pass
|
|
23
|
-
|
|
24
|
-
# --- Groq Client ---
|
|
25
|
-
|
|
26
|
-
class Completions(BaseCompletions):
|
|
27
|
-
def __init__(self, client: 'Groq'):
|
|
28
|
-
self._client = client
|
|
29
|
-
|
|
30
|
-
def create(
|
|
31
|
-
self,
|
|
32
|
-
*,
|
|
33
|
-
model: str,
|
|
34
|
-
messages: List[Dict[str, str]],
|
|
35
|
-
max_tokens: Optional[int] = 2049,
|
|
36
|
-
stream: bool = False,
|
|
37
|
-
temperature: Optional[float] = None,
|
|
38
|
-
top_p: Optional[float] = None,
|
|
39
|
-
**kwargs: Any
|
|
40
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
41
|
-
"""
|
|
42
|
-
Creates a model response for the given chat conversation.
|
|
43
|
-
Mimics openai.chat.completions.create
|
|
44
|
-
"""
|
|
45
|
-
payload = {
|
|
46
|
-
"model": model,
|
|
47
|
-
"messages": messages,
|
|
48
|
-
"max_tokens": max_tokens,
|
|
49
|
-
"stream": stream,
|
|
50
|
-
}
|
|
51
|
-
if temperature is not None:
|
|
52
|
-
payload["temperature"] = temperature
|
|
53
|
-
if top_p is not None:
|
|
54
|
-
payload["top_p"] = top_p
|
|
55
|
-
|
|
56
|
-
# Add frequency_penalty and presence_penalty if provided
|
|
57
|
-
if "frequency_penalty" in kwargs:
|
|
58
|
-
payload["frequency_penalty"] = kwargs.pop("frequency_penalty")
|
|
59
|
-
if "presence_penalty" in kwargs:
|
|
60
|
-
payload["presence_penalty"] = kwargs.pop("presence_penalty")
|
|
61
|
-
|
|
62
|
-
# Add any tools if provided
|
|
63
|
-
if "tools" in kwargs and kwargs["tools"]:
|
|
64
|
-
payload["tools"] = kwargs.pop("tools")
|
|
65
|
-
|
|
66
|
-
payload.update(kwargs)
|
|
67
|
-
|
|
68
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
69
|
-
created_time = int(time.time())
|
|
70
|
-
|
|
71
|
-
if stream:
|
|
72
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
73
|
-
else:
|
|
74
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
75
|
-
|
|
76
|
-
def _create_stream(
|
|
77
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
78
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
79
|
-
try:
|
|
80
|
-
response = self._client.session.post(
|
|
81
|
-
self._client.base_url,
|
|
82
|
-
json=payload,
|
|
83
|
-
stream=True,
|
|
84
|
-
timeout=self._client.timeout,
|
|
85
|
-
impersonate="chrome110" # Use impersonate for better compatibility
|
|
86
|
-
)
|
|
87
|
-
|
|
88
|
-
if response.status_code != 200:
|
|
89
|
-
raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
|
|
90
|
-
|
|
91
|
-
# Track token usage across chunks
|
|
92
|
-
prompt_tokens = 0
|
|
93
|
-
completion_tokens = 0
|
|
94
|
-
total_tokens = 0
|
|
95
|
-
|
|
96
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
97
|
-
if line:
|
|
98
|
-
if line.startswith("data: "):
|
|
99
|
-
json_str = line[6:]
|
|
100
|
-
if json_str == "[DONE]":
|
|
101
|
-
break
|
|
102
|
-
|
|
103
|
-
try:
|
|
104
|
-
data = json.loads(json_str)
|
|
105
|
-
choice_data = data.get('choices', [{}])[0]
|
|
106
|
-
delta_data = choice_data.get('delta', {})
|
|
107
|
-
finish_reason = choice_data.get('finish_reason')
|
|
108
|
-
|
|
109
|
-
# Update token counts if available
|
|
110
|
-
usage_data = data.get('usage', {})
|
|
111
|
-
if usage_data:
|
|
112
|
-
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
113
|
-
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
114
|
-
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
115
|
-
|
|
116
|
-
# Create the delta object
|
|
117
|
-
delta = ChoiceDelta(
|
|
118
|
-
content=delta_data.get('content'),
|
|
119
|
-
role=delta_data.get('role'),
|
|
120
|
-
tool_calls=delta_data.get('tool_calls')
|
|
121
|
-
)
|
|
122
|
-
|
|
123
|
-
# Create the choice object
|
|
124
|
-
choice = Choice(
|
|
125
|
-
index=choice_data.get('index', 0),
|
|
126
|
-
delta=delta,
|
|
127
|
-
finish_reason=finish_reason,
|
|
128
|
-
logprobs=choice_data.get('logprobs')
|
|
129
|
-
)
|
|
130
|
-
|
|
131
|
-
# Create the chunk object
|
|
132
|
-
chunk = ChatCompletionChunk(
|
|
133
|
-
id=request_id,
|
|
134
|
-
choices=[choice],
|
|
135
|
-
created=created_time,
|
|
136
|
-
model=model,
|
|
137
|
-
system_fingerprint=data.get('system_fingerprint')
|
|
138
|
-
)
|
|
139
|
-
|
|
140
|
-
# Convert chunk to dict using Pydantic's API
|
|
141
|
-
if hasattr(chunk, "model_dump"):
|
|
142
|
-
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
143
|
-
else:
|
|
144
|
-
chunk_dict = chunk.dict(exclude_none=True)
|
|
145
|
-
|
|
146
|
-
# Add usage information to match OpenAI format
|
|
147
|
-
usage_dict = {
|
|
148
|
-
"prompt_tokens": prompt_tokens or 10,
|
|
149
|
-
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
150
|
-
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
151
|
-
"estimated_cost": None
|
|
152
|
-
}
|
|
153
|
-
|
|
154
|
-
# Update completion_tokens and total_tokens as we receive more content
|
|
155
|
-
if delta_data.get('content'):
|
|
156
|
-
completion_tokens += 1
|
|
157
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
158
|
-
usage_dict["completion_tokens"] = completion_tokens
|
|
159
|
-
usage_dict["total_tokens"] = total_tokens
|
|
160
|
-
|
|
161
|
-
chunk_dict["usage"] = usage_dict
|
|
162
|
-
|
|
163
|
-
yield chunk
|
|
164
|
-
except json.JSONDecodeError:
|
|
165
|
-
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
166
|
-
continue
|
|
167
|
-
except CurlError as e:
|
|
168
|
-
print(f"Error during Groq stream request: {e}")
|
|
169
|
-
raise IOError(f"Groq request failed: {e}") from e
|
|
170
|
-
except Exception as e:
|
|
171
|
-
print(f"Error processing Groq stream: {e}")
|
|
172
|
-
raise
|
|
173
|
-
|
|
174
|
-
def _create_non_stream(
|
|
175
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
176
|
-
) -> ChatCompletion:
|
|
177
|
-
try:
|
|
178
|
-
response = self._client.session.post(
|
|
179
|
-
self._client.base_url,
|
|
180
|
-
json=payload,
|
|
181
|
-
timeout=self._client.timeout,
|
|
182
|
-
impersonate="chrome110" # Use impersonate for better compatibility
|
|
183
|
-
)
|
|
184
|
-
|
|
185
|
-
if response.status_code != 200:
|
|
186
|
-
raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
|
|
187
|
-
|
|
188
|
-
data = response.json()
|
|
189
|
-
|
|
190
|
-
choices_data = data.get('choices', [])
|
|
191
|
-
usage_data = data.get('usage', {})
|
|
192
|
-
|
|
193
|
-
choices = []
|
|
194
|
-
for choice_d in choices_data:
|
|
195
|
-
message_d = choice_d.get('message', {})
|
|
196
|
-
|
|
197
|
-
# Handle tool calls if present
|
|
198
|
-
tool_calls = message_d.get('tool_calls')
|
|
199
|
-
|
|
200
|
-
message = ChatCompletionMessage(
|
|
201
|
-
role=message_d.get('role', 'assistant'),
|
|
202
|
-
content=message_d.get('content', ''),
|
|
203
|
-
tool_calls=tool_calls
|
|
204
|
-
)
|
|
205
|
-
choice = Choice(
|
|
206
|
-
index=choice_d.get('index', 0),
|
|
207
|
-
message=message,
|
|
208
|
-
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
209
|
-
)
|
|
210
|
-
choices.append(choice)
|
|
211
|
-
|
|
212
|
-
usage = CompletionUsage(
|
|
213
|
-
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
214
|
-
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
215
|
-
total_tokens=usage_data.get('total_tokens', 0)
|
|
216
|
-
)
|
|
217
|
-
|
|
218
|
-
completion = ChatCompletion(
|
|
219
|
-
id=request_id,
|
|
220
|
-
choices=choices,
|
|
221
|
-
created=created_time,
|
|
222
|
-
model=data.get('model', model),
|
|
223
|
-
usage=usage,
|
|
224
|
-
)
|
|
225
|
-
return completion
|
|
226
|
-
|
|
227
|
-
except CurlError as e:
|
|
228
|
-
print(f"Error during Groq non-stream request: {e}")
|
|
229
|
-
raise IOError(f"Groq request failed: {e}") from e
|
|
230
|
-
except Exception as e:
|
|
231
|
-
print(f"Error processing Groq response: {e}")
|
|
232
|
-
raise
|
|
233
|
-
|
|
234
|
-
class Chat(BaseChat):
|
|
235
|
-
def __init__(self, client: 'Groq'):
|
|
236
|
-
self.completions = Completions(client)
|
|
237
|
-
|
|
238
|
-
class Groq(OpenAICompatibleProvider):
|
|
239
|
-
AVAILABLE_MODELS = [
|
|
240
|
-
"distil-whisper-large-v3-en",
|
|
241
|
-
"gemma2-9b-it",
|
|
242
|
-
"llama-3.3-70b-versatile",
|
|
243
|
-
"llama-3.1-8b-instant",
|
|
244
|
-
"llama-guard-3-8b",
|
|
245
|
-
"llama3-70b-8192",
|
|
246
|
-
"llama3-8b-8192",
|
|
247
|
-
"whisper-large-v3",
|
|
248
|
-
"whisper-large-v3-turbo",
|
|
249
|
-
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
250
|
-
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
251
|
-
"playai-tts",
|
|
252
|
-
"playai-tts-arabic",
|
|
253
|
-
"qwen-qwq-32b",
|
|
254
|
-
"mistral-saba-24b",
|
|
255
|
-
"qwen-2.5-coder-32b",
|
|
256
|
-
"qwen-2.5-32b",
|
|
257
|
-
"deepseek-r1-distill-qwen-32b",
|
|
258
|
-
"deepseek-r1-distill-llama-70b",
|
|
259
|
-
"llama-3.3-70b-specdec",
|
|
260
|
-
"llama-3.2-1b-preview",
|
|
261
|
-
"llama-3.2-3b-preview",
|
|
262
|
-
"llama-3.2-11b-vision-preview",
|
|
263
|
-
"llama-3.2-90b-vision-preview",
|
|
264
|
-
"mixtral-8x7b-32768"
|
|
265
|
-
]
|
|
266
|
-
|
|
267
|
-
def __init__(self, api_key: str = None, timeout: Optional[int] = 30, browser: str = "chrome"):
|
|
268
|
-
self.timeout = timeout
|
|
269
|
-
self.base_url = "https://api.groq.com/openai/v1/chat/completions"
|
|
270
|
-
self.api_key = api_key
|
|
271
|
-
|
|
272
|
-
# Initialize curl_cffi Session
|
|
273
|
-
self.session = Session()
|
|
274
|
-
|
|
275
|
-
# Set up headers with API key if provided
|
|
276
|
-
self.headers = {
|
|
277
|
-
"Content-Type": "application/json",
|
|
278
|
-
}
|
|
279
|
-
|
|
280
|
-
if api_key:
|
|
281
|
-
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
282
|
-
|
|
283
|
-
# Try to use LitAgent for browser fingerprinting
|
|
284
|
-
try:
|
|
285
|
-
agent = LitAgent()
|
|
286
|
-
fingerprint = agent.generate_fingerprint(browser)
|
|
287
|
-
|
|
288
|
-
self.headers.update({
|
|
289
|
-
"Accept": fingerprint["accept"],
|
|
290
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
291
|
-
"Accept-Language": fingerprint["accept_language"],
|
|
292
|
-
"Cache-Control": "no-cache",
|
|
293
|
-
"Connection": "keep-alive",
|
|
294
|
-
"Origin": "https://console.groq.com",
|
|
295
|
-
"Pragma": "no-cache",
|
|
296
|
-
"Referer": "https://console.groq.com/",
|
|
297
|
-
"Sec-Fetch-Dest": "empty",
|
|
298
|
-
"Sec-Fetch-Mode": "cors",
|
|
299
|
-
"Sec-Fetch-Site": "same-site",
|
|
300
|
-
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
301
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
302
|
-
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
303
|
-
"User-Agent": fingerprint["user_agent"],
|
|
304
|
-
})
|
|
305
|
-
except (NameError, Exception):
|
|
306
|
-
# Fallback to basic headers if LitAgent is not available
|
|
307
|
-
self.headers.update({
|
|
308
|
-
"Accept": "application/json",
|
|
309
|
-
"Accept-Encoding": "gzip, deflate, br",
|
|
310
|
-
"Accept-Language": "en-US,en;q=0.9",
|
|
311
|
-
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
312
|
-
})
|
|
313
|
-
|
|
314
|
-
# Update session headers
|
|
315
|
-
self.session.headers.update(self.headers)
|
|
316
|
-
|
|
317
|
-
# Initialize chat interface
|
|
318
|
-
self.chat = Chat(self)
|
|
319
|
-
|
|
320
|
-
@classmethod
|
|
321
|
-
def get_models(cls, api_key: str = None):
|
|
322
|
-
"""Fetch available models from Groq API.
|
|
323
|
-
|
|
324
|
-
Args:
|
|
325
|
-
api_key (str, optional): Groq API key. If not provided, returns default models.
|
|
326
|
-
|
|
327
|
-
Returns:
|
|
328
|
-
list: List of available model IDs
|
|
329
|
-
"""
|
|
330
|
-
if not api_key:
|
|
331
|
-
return cls.AVAILABLE_MODELS
|
|
332
|
-
|
|
333
|
-
try:
|
|
334
|
-
# Use a temporary curl_cffi session for this class method
|
|
335
|
-
temp_session = Session()
|
|
336
|
-
headers = {
|
|
337
|
-
"Content-Type": "application/json",
|
|
338
|
-
"Authorization": f"Bearer {api_key}",
|
|
339
|
-
}
|
|
340
|
-
|
|
341
|
-
response = temp_session.get(
|
|
342
|
-
"https://api.groq.com/openai/v1/models",
|
|
343
|
-
headers=headers,
|
|
344
|
-
impersonate="chrome110" # Use impersonate for fetching
|
|
345
|
-
)
|
|
346
|
-
|
|
347
|
-
if response.status_code != 200:
|
|
348
|
-
return cls.AVAILABLE_MODELS
|
|
349
|
-
|
|
350
|
-
data = response.json()
|
|
351
|
-
if "data" in data and isinstance(data["data"], list):
|
|
352
|
-
return [model["id"] for model in data["data"]]
|
|
353
|
-
return cls.AVAILABLE_MODELS
|
|
354
|
-
|
|
355
|
-
except (CurlError, Exception):
|
|
356
|
-
# Fallback to default models list if fetching fails
|
|
357
|
-
return cls.AVAILABLE_MODELS
|
|
358
|
-
|
|
359
|
-
@property
|
|
360
|
-
def models(self):
|
|
361
|
-
class _ModelList:
|
|
362
|
-
def list(inner_self):
|
|
363
|
-
return type(self).AVAILABLE_MODELS
|
|
364
|
-
return _ModelList()
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import curl_cffi for improved request handling
|
|
8
|
+
from curl_cffi.requests import Session
|
|
9
|
+
from curl_cffi import CurlError
|
|
10
|
+
|
|
11
|
+
# Import base classes and utility structures
|
|
12
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
13
|
+
from .utils import (
|
|
14
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
15
|
+
ChatCompletionMessage, CompletionUsage
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
# Attempt to import LitAgent, fallback if not available
|
|
19
|
+
try:
|
|
20
|
+
from webscout.litagent import LitAgent
|
|
21
|
+
except ImportError:
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
# --- Groq Client ---
|
|
25
|
+
|
|
26
|
+
class Completions(BaseCompletions):
|
|
27
|
+
def __init__(self, client: 'Groq'):
|
|
28
|
+
self._client = client
|
|
29
|
+
|
|
30
|
+
def create(
|
|
31
|
+
self,
|
|
32
|
+
*,
|
|
33
|
+
model: str,
|
|
34
|
+
messages: List[Dict[str, str]],
|
|
35
|
+
max_tokens: Optional[int] = 2049,
|
|
36
|
+
stream: bool = False,
|
|
37
|
+
temperature: Optional[float] = None,
|
|
38
|
+
top_p: Optional[float] = None,
|
|
39
|
+
**kwargs: Any
|
|
40
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
41
|
+
"""
|
|
42
|
+
Creates a model response for the given chat conversation.
|
|
43
|
+
Mimics openai.chat.completions.create
|
|
44
|
+
"""
|
|
45
|
+
payload = {
|
|
46
|
+
"model": model,
|
|
47
|
+
"messages": messages,
|
|
48
|
+
"max_tokens": max_tokens,
|
|
49
|
+
"stream": stream,
|
|
50
|
+
}
|
|
51
|
+
if temperature is not None:
|
|
52
|
+
payload["temperature"] = temperature
|
|
53
|
+
if top_p is not None:
|
|
54
|
+
payload["top_p"] = top_p
|
|
55
|
+
|
|
56
|
+
# Add frequency_penalty and presence_penalty if provided
|
|
57
|
+
if "frequency_penalty" in kwargs:
|
|
58
|
+
payload["frequency_penalty"] = kwargs.pop("frequency_penalty")
|
|
59
|
+
if "presence_penalty" in kwargs:
|
|
60
|
+
payload["presence_penalty"] = kwargs.pop("presence_penalty")
|
|
61
|
+
|
|
62
|
+
# Add any tools if provided
|
|
63
|
+
if "tools" in kwargs and kwargs["tools"]:
|
|
64
|
+
payload["tools"] = kwargs.pop("tools")
|
|
65
|
+
|
|
66
|
+
payload.update(kwargs)
|
|
67
|
+
|
|
68
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
69
|
+
created_time = int(time.time())
|
|
70
|
+
|
|
71
|
+
if stream:
|
|
72
|
+
return self._create_stream(request_id, created_time, model, payload)
|
|
73
|
+
else:
|
|
74
|
+
return self._create_non_stream(request_id, created_time, model, payload)
|
|
75
|
+
|
|
76
|
+
def _create_stream(
|
|
77
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
78
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
79
|
+
try:
|
|
80
|
+
response = self._client.session.post(
|
|
81
|
+
self._client.base_url,
|
|
82
|
+
json=payload,
|
|
83
|
+
stream=True,
|
|
84
|
+
timeout=self._client.timeout,
|
|
85
|
+
impersonate="chrome110" # Use impersonate for better compatibility
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
if response.status_code != 200:
|
|
89
|
+
raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
|
|
90
|
+
|
|
91
|
+
# Track token usage across chunks
|
|
92
|
+
prompt_tokens = 0
|
|
93
|
+
completion_tokens = 0
|
|
94
|
+
total_tokens = 0
|
|
95
|
+
|
|
96
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
97
|
+
if line:
|
|
98
|
+
if line.startswith("data: "):
|
|
99
|
+
json_str = line[6:]
|
|
100
|
+
if json_str == "[DONE]":
|
|
101
|
+
break
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
data = json.loads(json_str)
|
|
105
|
+
choice_data = data.get('choices', [{}])[0]
|
|
106
|
+
delta_data = choice_data.get('delta', {})
|
|
107
|
+
finish_reason = choice_data.get('finish_reason')
|
|
108
|
+
|
|
109
|
+
# Update token counts if available
|
|
110
|
+
usage_data = data.get('usage', {})
|
|
111
|
+
if usage_data:
|
|
112
|
+
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
113
|
+
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
114
|
+
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
115
|
+
|
|
116
|
+
# Create the delta object
|
|
117
|
+
delta = ChoiceDelta(
|
|
118
|
+
content=delta_data.get('content'),
|
|
119
|
+
role=delta_data.get('role'),
|
|
120
|
+
tool_calls=delta_data.get('tool_calls')
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Create the choice object
|
|
124
|
+
choice = Choice(
|
|
125
|
+
index=choice_data.get('index', 0),
|
|
126
|
+
delta=delta,
|
|
127
|
+
finish_reason=finish_reason,
|
|
128
|
+
logprobs=choice_data.get('logprobs')
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
# Create the chunk object
|
|
132
|
+
chunk = ChatCompletionChunk(
|
|
133
|
+
id=request_id,
|
|
134
|
+
choices=[choice],
|
|
135
|
+
created=created_time,
|
|
136
|
+
model=model,
|
|
137
|
+
system_fingerprint=data.get('system_fingerprint')
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Convert chunk to dict using Pydantic's API
|
|
141
|
+
if hasattr(chunk, "model_dump"):
|
|
142
|
+
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
143
|
+
else:
|
|
144
|
+
chunk_dict = chunk.dict(exclude_none=True)
|
|
145
|
+
|
|
146
|
+
# Add usage information to match OpenAI format
|
|
147
|
+
usage_dict = {
|
|
148
|
+
"prompt_tokens": prompt_tokens or 10,
|
|
149
|
+
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
150
|
+
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
151
|
+
"estimated_cost": None
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
# Update completion_tokens and total_tokens as we receive more content
|
|
155
|
+
if delta_data.get('content'):
|
|
156
|
+
completion_tokens += 1
|
|
157
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
158
|
+
usage_dict["completion_tokens"] = completion_tokens
|
|
159
|
+
usage_dict["total_tokens"] = total_tokens
|
|
160
|
+
|
|
161
|
+
chunk_dict["usage"] = usage_dict
|
|
162
|
+
|
|
163
|
+
yield chunk
|
|
164
|
+
except json.JSONDecodeError:
|
|
165
|
+
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
166
|
+
continue
|
|
167
|
+
except CurlError as e:
|
|
168
|
+
print(f"Error during Groq stream request: {e}")
|
|
169
|
+
raise IOError(f"Groq request failed: {e}") from e
|
|
170
|
+
except Exception as e:
|
|
171
|
+
print(f"Error processing Groq stream: {e}")
|
|
172
|
+
raise
|
|
173
|
+
|
|
174
|
+
def _create_non_stream(
|
|
175
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
176
|
+
) -> ChatCompletion:
|
|
177
|
+
try:
|
|
178
|
+
response = self._client.session.post(
|
|
179
|
+
self._client.base_url,
|
|
180
|
+
json=payload,
|
|
181
|
+
timeout=self._client.timeout,
|
|
182
|
+
impersonate="chrome110" # Use impersonate for better compatibility
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
if response.status_code != 200:
|
|
186
|
+
raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
|
|
187
|
+
|
|
188
|
+
data = response.json()
|
|
189
|
+
|
|
190
|
+
choices_data = data.get('choices', [])
|
|
191
|
+
usage_data = data.get('usage', {})
|
|
192
|
+
|
|
193
|
+
choices = []
|
|
194
|
+
for choice_d in choices_data:
|
|
195
|
+
message_d = choice_d.get('message', {})
|
|
196
|
+
|
|
197
|
+
# Handle tool calls if present
|
|
198
|
+
tool_calls = message_d.get('tool_calls')
|
|
199
|
+
|
|
200
|
+
message = ChatCompletionMessage(
|
|
201
|
+
role=message_d.get('role', 'assistant'),
|
|
202
|
+
content=message_d.get('content', ''),
|
|
203
|
+
tool_calls=tool_calls
|
|
204
|
+
)
|
|
205
|
+
choice = Choice(
|
|
206
|
+
index=choice_d.get('index', 0),
|
|
207
|
+
message=message,
|
|
208
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
209
|
+
)
|
|
210
|
+
choices.append(choice)
|
|
211
|
+
|
|
212
|
+
usage = CompletionUsage(
|
|
213
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
214
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
215
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
completion = ChatCompletion(
|
|
219
|
+
id=request_id,
|
|
220
|
+
choices=choices,
|
|
221
|
+
created=created_time,
|
|
222
|
+
model=data.get('model', model),
|
|
223
|
+
usage=usage,
|
|
224
|
+
)
|
|
225
|
+
return completion
|
|
226
|
+
|
|
227
|
+
except CurlError as e:
|
|
228
|
+
print(f"Error during Groq non-stream request: {e}")
|
|
229
|
+
raise IOError(f"Groq request failed: {e}") from e
|
|
230
|
+
except Exception as e:
|
|
231
|
+
print(f"Error processing Groq response: {e}")
|
|
232
|
+
raise
|
|
233
|
+
|
|
234
|
+
class Chat(BaseChat):
|
|
235
|
+
def __init__(self, client: 'Groq'):
|
|
236
|
+
self.completions = Completions(client)
|
|
237
|
+
|
|
238
|
+
class Groq(OpenAICompatibleProvider):
|
|
239
|
+
AVAILABLE_MODELS = [
|
|
240
|
+
"distil-whisper-large-v3-en",
|
|
241
|
+
"gemma2-9b-it",
|
|
242
|
+
"llama-3.3-70b-versatile",
|
|
243
|
+
"llama-3.1-8b-instant",
|
|
244
|
+
"llama-guard-3-8b",
|
|
245
|
+
"llama3-70b-8192",
|
|
246
|
+
"llama3-8b-8192",
|
|
247
|
+
"whisper-large-v3",
|
|
248
|
+
"whisper-large-v3-turbo",
|
|
249
|
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
250
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
251
|
+
"playai-tts",
|
|
252
|
+
"playai-tts-arabic",
|
|
253
|
+
"qwen-qwq-32b",
|
|
254
|
+
"mistral-saba-24b",
|
|
255
|
+
"qwen-2.5-coder-32b",
|
|
256
|
+
"qwen-2.5-32b",
|
|
257
|
+
"deepseek-r1-distill-qwen-32b",
|
|
258
|
+
"deepseek-r1-distill-llama-70b",
|
|
259
|
+
"llama-3.3-70b-specdec",
|
|
260
|
+
"llama-3.2-1b-preview",
|
|
261
|
+
"llama-3.2-3b-preview",
|
|
262
|
+
"llama-3.2-11b-vision-preview",
|
|
263
|
+
"llama-3.2-90b-vision-preview",
|
|
264
|
+
"mixtral-8x7b-32768"
|
|
265
|
+
]
|
|
266
|
+
|
|
267
|
+
def __init__(self, api_key: str = None, timeout: Optional[int] = 30, browser: str = "chrome"):
|
|
268
|
+
self.timeout = timeout
|
|
269
|
+
self.base_url = "https://api.groq.com/openai/v1/chat/completions"
|
|
270
|
+
self.api_key = api_key
|
|
271
|
+
|
|
272
|
+
# Initialize curl_cffi Session
|
|
273
|
+
self.session = Session()
|
|
274
|
+
|
|
275
|
+
# Set up headers with API key if provided
|
|
276
|
+
self.headers = {
|
|
277
|
+
"Content-Type": "application/json",
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
if api_key:
|
|
281
|
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
282
|
+
|
|
283
|
+
# Try to use LitAgent for browser fingerprinting
|
|
284
|
+
try:
|
|
285
|
+
agent = LitAgent()
|
|
286
|
+
fingerprint = agent.generate_fingerprint(browser)
|
|
287
|
+
|
|
288
|
+
self.headers.update({
|
|
289
|
+
"Accept": fingerprint["accept"],
|
|
290
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
291
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
292
|
+
"Cache-Control": "no-cache",
|
|
293
|
+
"Connection": "keep-alive",
|
|
294
|
+
"Origin": "https://console.groq.com",
|
|
295
|
+
"Pragma": "no-cache",
|
|
296
|
+
"Referer": "https://console.groq.com/",
|
|
297
|
+
"Sec-Fetch-Dest": "empty",
|
|
298
|
+
"Sec-Fetch-Mode": "cors",
|
|
299
|
+
"Sec-Fetch-Site": "same-site",
|
|
300
|
+
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
301
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
302
|
+
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
303
|
+
"User-Agent": fingerprint["user_agent"],
|
|
304
|
+
})
|
|
305
|
+
except (NameError, Exception):
|
|
306
|
+
# Fallback to basic headers if LitAgent is not available
|
|
307
|
+
self.headers.update({
|
|
308
|
+
"Accept": "application/json",
|
|
309
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
310
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
311
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
312
|
+
})
|
|
313
|
+
|
|
314
|
+
# Update session headers
|
|
315
|
+
self.session.headers.update(self.headers)
|
|
316
|
+
|
|
317
|
+
# Initialize chat interface
|
|
318
|
+
self.chat = Chat(self)
|
|
319
|
+
|
|
320
|
+
@classmethod
|
|
321
|
+
def get_models(cls, api_key: str = None):
|
|
322
|
+
"""Fetch available models from Groq API.
|
|
323
|
+
|
|
324
|
+
Args:
|
|
325
|
+
api_key (str, optional): Groq API key. If not provided, returns default models.
|
|
326
|
+
|
|
327
|
+
Returns:
|
|
328
|
+
list: List of available model IDs
|
|
329
|
+
"""
|
|
330
|
+
if not api_key:
|
|
331
|
+
return cls.AVAILABLE_MODELS
|
|
332
|
+
|
|
333
|
+
try:
|
|
334
|
+
# Use a temporary curl_cffi session for this class method
|
|
335
|
+
temp_session = Session()
|
|
336
|
+
headers = {
|
|
337
|
+
"Content-Type": "application/json",
|
|
338
|
+
"Authorization": f"Bearer {api_key}",
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
response = temp_session.get(
|
|
342
|
+
"https://api.groq.com/openai/v1/models",
|
|
343
|
+
headers=headers,
|
|
344
|
+
impersonate="chrome110" # Use impersonate for fetching
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
if response.status_code != 200:
|
|
348
|
+
return cls.AVAILABLE_MODELS
|
|
349
|
+
|
|
350
|
+
data = response.json()
|
|
351
|
+
if "data" in data and isinstance(data["data"], list):
|
|
352
|
+
return [model["id"] for model in data["data"]]
|
|
353
|
+
return cls.AVAILABLE_MODELS
|
|
354
|
+
|
|
355
|
+
except (CurlError, Exception):
|
|
356
|
+
# Fallback to default models list if fetching fails
|
|
357
|
+
return cls.AVAILABLE_MODELS
|
|
358
|
+
|
|
359
|
+
@property
|
|
360
|
+
def models(self):
|
|
361
|
+
class _ModelList:
|
|
362
|
+
def list(inner_self):
|
|
363
|
+
return type(self).AVAILABLE_MODELS
|
|
364
|
+
return _ModelList()
|