webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
|
@@ -222,23 +222,60 @@ class Chat(BaseChat):
|
|
|
222
222
|
|
|
223
223
|
class DeepInfra(OpenAICompatibleProvider):
|
|
224
224
|
AVAILABLE_MODELS = [
|
|
225
|
+
# "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
|
|
226
|
+
|
|
227
|
+
"deepseek-ai/DeepSeek-R1",
|
|
228
|
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
229
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
230
|
+
"deepseek-ai/DeepSeek-R1-Turbo",
|
|
225
231
|
"deepseek-ai/DeepSeek-V3",
|
|
232
|
+
|
|
226
233
|
"google/gemma-2-27b-it",
|
|
227
234
|
"google/gemma-2-9b-it",
|
|
235
|
+
"google/gemma-3-27b-it",
|
|
236
|
+
"google/gemma-3-12b-it",
|
|
237
|
+
"google/gemma-3-4b-it",
|
|
238
|
+
# "google/gemini-1.5-flash", # >>>> NOT WORKING
|
|
239
|
+
# "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
|
|
240
|
+
# "google/gemini-2.0-flash-001", # >>>> NOT WORKING
|
|
241
|
+
|
|
242
|
+
# "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
|
|
243
|
+
|
|
244
|
+
# "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
|
|
245
|
+
# "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
|
|
228
246
|
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
229
247
|
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
248
|
+
# "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
|
|
249
|
+
# "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
|
|
230
250
|
"meta-llama/Llama-3.3-70B-Instruct",
|
|
231
251
|
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
252
|
+
# "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
|
|
253
|
+
# "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
|
|
254
|
+
# "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
|
|
255
|
+
# "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
|
|
232
256
|
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
233
257
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
258
|
+
# "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
|
|
259
|
+
|
|
234
260
|
"microsoft/phi-4",
|
|
235
261
|
"microsoft/Phi-4-multimodal-instruct",
|
|
236
262
|
"microsoft/WizardLM-2-8x22B",
|
|
263
|
+
# "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
|
|
264
|
+
# "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
|
|
265
|
+
# "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
|
|
237
266
|
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
238
267
|
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
268
|
+
# "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
|
|
269
|
+
# "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
|
|
239
270
|
"Qwen/QwQ-32B",
|
|
271
|
+
# "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
|
|
240
272
|
"Qwen/Qwen2.5-72B-Instruct",
|
|
241
273
|
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
274
|
+
"Qwen/Qwen3-14B",
|
|
275
|
+
"Qwen/Qwen3-30B-A3B",
|
|
276
|
+
"Qwen/Qwen3-32B",
|
|
277
|
+
# "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
|
|
278
|
+
# "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
|
|
242
279
|
]
|
|
243
280
|
|
|
244
281
|
def __init__(self, timeout: Optional[int] = None, browser: str = "chrome"):
|
|
@@ -0,0 +1,354 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import curl_cffi for improved request handling
|
|
8
|
+
from curl_cffi.requests import Session
|
|
9
|
+
from curl_cffi import CurlError
|
|
10
|
+
|
|
11
|
+
# Import base classes and utility structures
|
|
12
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
13
|
+
from .utils import (
|
|
14
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
15
|
+
ChatCompletionMessage, CompletionUsage
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
# Attempt to import LitAgent, fallback if not available
|
|
19
|
+
try:
|
|
20
|
+
from webscout.litagent import LitAgent
|
|
21
|
+
except ImportError:
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
# --- Groq Client ---
|
|
25
|
+
|
|
26
|
+
class Completions(BaseCompletions):
|
|
27
|
+
def __init__(self, client: 'Groq'):
|
|
28
|
+
self._client = client
|
|
29
|
+
|
|
30
|
+
def create(
|
|
31
|
+
self,
|
|
32
|
+
*,
|
|
33
|
+
model: str,
|
|
34
|
+
messages: List[Dict[str, str]],
|
|
35
|
+
max_tokens: Optional[int] = 2049,
|
|
36
|
+
stream: bool = False,
|
|
37
|
+
temperature: Optional[float] = None,
|
|
38
|
+
top_p: Optional[float] = None,
|
|
39
|
+
**kwargs: Any
|
|
40
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
41
|
+
"""
|
|
42
|
+
Creates a model response for the given chat conversation.
|
|
43
|
+
Mimics openai.chat.completions.create
|
|
44
|
+
"""
|
|
45
|
+
payload = {
|
|
46
|
+
"model": model,
|
|
47
|
+
"messages": messages,
|
|
48
|
+
"max_tokens": max_tokens,
|
|
49
|
+
"stream": stream,
|
|
50
|
+
}
|
|
51
|
+
if temperature is not None:
|
|
52
|
+
payload["temperature"] = temperature
|
|
53
|
+
if top_p is not None:
|
|
54
|
+
payload["top_p"] = top_p
|
|
55
|
+
|
|
56
|
+
# Add frequency_penalty and presence_penalty if provided
|
|
57
|
+
if "frequency_penalty" in kwargs:
|
|
58
|
+
payload["frequency_penalty"] = kwargs.pop("frequency_penalty")
|
|
59
|
+
if "presence_penalty" in kwargs:
|
|
60
|
+
payload["presence_penalty"] = kwargs.pop("presence_penalty")
|
|
61
|
+
|
|
62
|
+
# Add any tools if provided
|
|
63
|
+
if "tools" in kwargs and kwargs["tools"]:
|
|
64
|
+
payload["tools"] = kwargs.pop("tools")
|
|
65
|
+
|
|
66
|
+
payload.update(kwargs)
|
|
67
|
+
|
|
68
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
69
|
+
created_time = int(time.time())
|
|
70
|
+
|
|
71
|
+
if stream:
|
|
72
|
+
return self._create_stream(request_id, created_time, model, payload)
|
|
73
|
+
else:
|
|
74
|
+
return self._create_non_stream(request_id, created_time, model, payload)
|
|
75
|
+
|
|
76
|
+
def _create_stream(
|
|
77
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
78
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
79
|
+
try:
|
|
80
|
+
response = self._client.session.post(
|
|
81
|
+
self._client.base_url,
|
|
82
|
+
json=payload,
|
|
83
|
+
stream=True,
|
|
84
|
+
timeout=self._client.timeout,
|
|
85
|
+
impersonate="chrome110" # Use impersonate for better compatibility
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
if response.status_code != 200:
|
|
89
|
+
raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
|
|
90
|
+
|
|
91
|
+
# Track token usage across chunks
|
|
92
|
+
prompt_tokens = 0
|
|
93
|
+
completion_tokens = 0
|
|
94
|
+
total_tokens = 0
|
|
95
|
+
|
|
96
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
97
|
+
if line:
|
|
98
|
+
if line.startswith("data: "):
|
|
99
|
+
json_str = line[6:]
|
|
100
|
+
if json_str == "[DONE]":
|
|
101
|
+
break
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
data = json.loads(json_str)
|
|
105
|
+
choice_data = data.get('choices', [{}])[0]
|
|
106
|
+
delta_data = choice_data.get('delta', {})
|
|
107
|
+
finish_reason = choice_data.get('finish_reason')
|
|
108
|
+
|
|
109
|
+
# Update token counts if available
|
|
110
|
+
usage_data = data.get('usage', {})
|
|
111
|
+
if usage_data:
|
|
112
|
+
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
113
|
+
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
114
|
+
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
115
|
+
|
|
116
|
+
# Create the delta object
|
|
117
|
+
delta = ChoiceDelta(
|
|
118
|
+
content=delta_data.get('content'),
|
|
119
|
+
role=delta_data.get('role'),
|
|
120
|
+
tool_calls=delta_data.get('tool_calls')
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Create the choice object
|
|
124
|
+
choice = Choice(
|
|
125
|
+
index=choice_data.get('index', 0),
|
|
126
|
+
delta=delta,
|
|
127
|
+
finish_reason=finish_reason,
|
|
128
|
+
logprobs=choice_data.get('logprobs')
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
# Create the chunk object
|
|
132
|
+
chunk = ChatCompletionChunk(
|
|
133
|
+
id=request_id,
|
|
134
|
+
choices=[choice],
|
|
135
|
+
created=created_time,
|
|
136
|
+
model=model,
|
|
137
|
+
system_fingerprint=data.get('system_fingerprint')
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Convert to dict for proper formatting
|
|
141
|
+
chunk_dict = chunk.to_dict()
|
|
142
|
+
|
|
143
|
+
# Add usage information to match OpenAI format
|
|
144
|
+
usage_dict = {
|
|
145
|
+
"prompt_tokens": prompt_tokens or 10,
|
|
146
|
+
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
147
|
+
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
148
|
+
"estimated_cost": None
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
# Update completion_tokens and total_tokens as we receive more content
|
|
152
|
+
if delta_data.get('content'):
|
|
153
|
+
completion_tokens += 1
|
|
154
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
155
|
+
usage_dict["completion_tokens"] = completion_tokens
|
|
156
|
+
usage_dict["total_tokens"] = total_tokens
|
|
157
|
+
|
|
158
|
+
chunk_dict["usage"] = usage_dict
|
|
159
|
+
|
|
160
|
+
yield chunk
|
|
161
|
+
except json.JSONDecodeError:
|
|
162
|
+
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
163
|
+
continue
|
|
164
|
+
except CurlError as e:
|
|
165
|
+
print(f"Error during Groq stream request: {e}")
|
|
166
|
+
raise IOError(f"Groq request failed: {e}") from e
|
|
167
|
+
except Exception as e:
|
|
168
|
+
print(f"Error processing Groq stream: {e}")
|
|
169
|
+
raise
|
|
170
|
+
|
|
171
|
+
def _create_non_stream(
|
|
172
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
173
|
+
) -> ChatCompletion:
|
|
174
|
+
try:
|
|
175
|
+
response = self._client.session.post(
|
|
176
|
+
self._client.base_url,
|
|
177
|
+
json=payload,
|
|
178
|
+
timeout=self._client.timeout,
|
|
179
|
+
impersonate="chrome110" # Use impersonate for better compatibility
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
if response.status_code != 200:
|
|
183
|
+
raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
|
|
184
|
+
|
|
185
|
+
data = response.json()
|
|
186
|
+
|
|
187
|
+
choices_data = data.get('choices', [])
|
|
188
|
+
usage_data = data.get('usage', {})
|
|
189
|
+
|
|
190
|
+
choices = []
|
|
191
|
+
for choice_d in choices_data:
|
|
192
|
+
message_d = choice_d.get('message', {})
|
|
193
|
+
|
|
194
|
+
# Handle tool calls if present
|
|
195
|
+
tool_calls = message_d.get('tool_calls')
|
|
196
|
+
|
|
197
|
+
message = ChatCompletionMessage(
|
|
198
|
+
role=message_d.get('role', 'assistant'),
|
|
199
|
+
content=message_d.get('content', ''),
|
|
200
|
+
tool_calls=tool_calls
|
|
201
|
+
)
|
|
202
|
+
choice = Choice(
|
|
203
|
+
index=choice_d.get('index', 0),
|
|
204
|
+
message=message,
|
|
205
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
206
|
+
)
|
|
207
|
+
choices.append(choice)
|
|
208
|
+
|
|
209
|
+
usage = CompletionUsage(
|
|
210
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
211
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
212
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
completion = ChatCompletion(
|
|
216
|
+
id=request_id,
|
|
217
|
+
choices=choices,
|
|
218
|
+
created=created_time,
|
|
219
|
+
model=data.get('model', model),
|
|
220
|
+
usage=usage,
|
|
221
|
+
)
|
|
222
|
+
return completion
|
|
223
|
+
|
|
224
|
+
except CurlError as e:
|
|
225
|
+
print(f"Error during Groq non-stream request: {e}")
|
|
226
|
+
raise IOError(f"Groq request failed: {e}") from e
|
|
227
|
+
except Exception as e:
|
|
228
|
+
print(f"Error processing Groq response: {e}")
|
|
229
|
+
raise
|
|
230
|
+
|
|
231
|
+
class Chat(BaseChat):
|
|
232
|
+
def __init__(self, client: 'Groq'):
|
|
233
|
+
self.completions = Completions(client)
|
|
234
|
+
|
|
235
|
+
class Groq(OpenAICompatibleProvider):
|
|
236
|
+
AVAILABLE_MODELS = [
|
|
237
|
+
"distil-whisper-large-v3-en",
|
|
238
|
+
"gemma2-9b-it",
|
|
239
|
+
"llama-3.3-70b-versatile",
|
|
240
|
+
"llama-3.1-8b-instant",
|
|
241
|
+
"llama-guard-3-8b",
|
|
242
|
+
"llama3-70b-8192",
|
|
243
|
+
"llama3-8b-8192",
|
|
244
|
+
"whisper-large-v3",
|
|
245
|
+
"whisper-large-v3-turbo",
|
|
246
|
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
247
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
248
|
+
"playai-tts",
|
|
249
|
+
"playai-tts-arabic",
|
|
250
|
+
"qwen-qwq-32b",
|
|
251
|
+
"mistral-saba-24b",
|
|
252
|
+
"qwen-2.5-coder-32b",
|
|
253
|
+
"qwen-2.5-32b",
|
|
254
|
+
"deepseek-r1-distill-qwen-32b",
|
|
255
|
+
"deepseek-r1-distill-llama-70b",
|
|
256
|
+
"llama-3.3-70b-specdec",
|
|
257
|
+
"llama-3.2-1b-preview",
|
|
258
|
+
"llama-3.2-3b-preview",
|
|
259
|
+
"llama-3.2-11b-vision-preview",
|
|
260
|
+
"llama-3.2-90b-vision-preview",
|
|
261
|
+
"mixtral-8x7b-32768"
|
|
262
|
+
]
|
|
263
|
+
|
|
264
|
+
def __init__(self, api_key: str = None, timeout: Optional[int] = 30, browser: str = "chrome"):
|
|
265
|
+
self.timeout = timeout
|
|
266
|
+
self.base_url = "https://api.groq.com/openai/v1/chat/completions"
|
|
267
|
+
self.api_key = api_key
|
|
268
|
+
|
|
269
|
+
# Initialize curl_cffi Session
|
|
270
|
+
self.session = Session()
|
|
271
|
+
|
|
272
|
+
# Set up headers with API key if provided
|
|
273
|
+
self.headers = {
|
|
274
|
+
"Content-Type": "application/json",
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
if api_key:
|
|
278
|
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
279
|
+
|
|
280
|
+
# Try to use LitAgent for browser fingerprinting
|
|
281
|
+
try:
|
|
282
|
+
agent = LitAgent()
|
|
283
|
+
fingerprint = agent.generate_fingerprint(browser)
|
|
284
|
+
|
|
285
|
+
self.headers.update({
|
|
286
|
+
"Accept": fingerprint["accept"],
|
|
287
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
288
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
289
|
+
"Cache-Control": "no-cache",
|
|
290
|
+
"Connection": "keep-alive",
|
|
291
|
+
"Origin": "https://console.groq.com",
|
|
292
|
+
"Pragma": "no-cache",
|
|
293
|
+
"Referer": "https://console.groq.com/",
|
|
294
|
+
"Sec-Fetch-Dest": "empty",
|
|
295
|
+
"Sec-Fetch-Mode": "cors",
|
|
296
|
+
"Sec-Fetch-Site": "same-site",
|
|
297
|
+
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
298
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
299
|
+
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
300
|
+
"User-Agent": fingerprint["user_agent"],
|
|
301
|
+
})
|
|
302
|
+
except (NameError, Exception):
|
|
303
|
+
# Fallback to basic headers if LitAgent is not available
|
|
304
|
+
self.headers.update({
|
|
305
|
+
"Accept": "application/json",
|
|
306
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
307
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
308
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
309
|
+
})
|
|
310
|
+
|
|
311
|
+
# Update session headers
|
|
312
|
+
self.session.headers.update(self.headers)
|
|
313
|
+
|
|
314
|
+
# Initialize chat interface
|
|
315
|
+
self.chat = Chat(self)
|
|
316
|
+
|
|
317
|
+
@classmethod
|
|
318
|
+
def get_models(cls, api_key: str = None):
|
|
319
|
+
"""Fetch available models from Groq API.
|
|
320
|
+
|
|
321
|
+
Args:
|
|
322
|
+
api_key (str, optional): Groq API key. If not provided, returns default models.
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
list: List of available model IDs
|
|
326
|
+
"""
|
|
327
|
+
if not api_key:
|
|
328
|
+
return cls.AVAILABLE_MODELS
|
|
329
|
+
|
|
330
|
+
try:
|
|
331
|
+
# Use a temporary curl_cffi session for this class method
|
|
332
|
+
temp_session = Session()
|
|
333
|
+
headers = {
|
|
334
|
+
"Content-Type": "application/json",
|
|
335
|
+
"Authorization": f"Bearer {api_key}",
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
response = temp_session.get(
|
|
339
|
+
"https://api.groq.com/openai/v1/models",
|
|
340
|
+
headers=headers,
|
|
341
|
+
impersonate="chrome110" # Use impersonate for fetching
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
if response.status_code != 200:
|
|
345
|
+
return cls.AVAILABLE_MODELS
|
|
346
|
+
|
|
347
|
+
data = response.json()
|
|
348
|
+
if "data" in data and isinstance(data["data"], list):
|
|
349
|
+
return [model["id"] for model in data["data"]]
|
|
350
|
+
return cls.AVAILABLE_MODELS
|
|
351
|
+
|
|
352
|
+
except (CurlError, Exception):
|
|
353
|
+
# Fallback to default models list if fetching fails
|
|
354
|
+
return cls.AVAILABLE_MODELS
|
|
@@ -249,10 +249,14 @@ class HeckAI(OpenAICompatibleProvider):
|
|
|
249
249
|
"""
|
|
250
250
|
|
|
251
251
|
AVAILABLE_MODELS = [
|
|
252
|
+
"google/gemini-2.0-flash-001",
|
|
252
253
|
"deepseek/deepseek-chat",
|
|
253
|
-
"openai/gpt-4o-mini",
|
|
254
254
|
"deepseek/deepseek-r1",
|
|
255
|
-
"
|
|
255
|
+
"openai/gpt-4o-mini",
|
|
256
|
+
"openai/gpt-4.1-mini",
|
|
257
|
+
"x-ai/grok-3-mini-beta",
|
|
258
|
+
"meta-llama/llama-4-scout"
|
|
259
|
+
|
|
256
260
|
]
|
|
257
261
|
|
|
258
262
|
def __init__(
|