webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -222,23 +222,60 @@ class Chat(BaseChat):
222
222
 
223
223
  class DeepInfra(OpenAICompatibleProvider):
224
224
  AVAILABLE_MODELS = [
225
+ # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
226
+
227
+ "deepseek-ai/DeepSeek-R1",
228
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
229
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
230
+ "deepseek-ai/DeepSeek-R1-Turbo",
225
231
  "deepseek-ai/DeepSeek-V3",
232
+
226
233
  "google/gemma-2-27b-it",
227
234
  "google/gemma-2-9b-it",
235
+ "google/gemma-3-27b-it",
236
+ "google/gemma-3-12b-it",
237
+ "google/gemma-3-4b-it",
238
+ # "google/gemini-1.5-flash", # >>>> NOT WORKING
239
+ # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
240
+ # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
241
+
242
+ # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
243
+
244
+ # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
245
+ # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
228
246
  "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
229
247
  "meta-llama/Llama-4-Scout-17B-16E-Instruct",
248
+ # "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
249
+ # "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
230
250
  "meta-llama/Llama-3.3-70B-Instruct",
231
251
  "meta-llama/Llama-3.3-70B-Instruct-Turbo",
252
+ # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
253
+ # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
254
+ # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
255
+ # "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
232
256
  "meta-llama/Meta-Llama-3.1-8B-Instruct",
233
257
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
258
+ # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
259
+
234
260
  "microsoft/phi-4",
235
261
  "microsoft/Phi-4-multimodal-instruct",
236
262
  "microsoft/WizardLM-2-8x22B",
263
+ # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
264
+ # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
265
+ # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
237
266
  "mistralai/Mistral-Small-24B-Instruct-2501",
238
267
  "nvidia/Llama-3.1-Nemotron-70B-Instruct",
268
+ # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
269
+ # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
239
270
  "Qwen/QwQ-32B",
271
+ # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
240
272
  "Qwen/Qwen2.5-72B-Instruct",
241
273
  "Qwen/Qwen2.5-Coder-32B-Instruct",
274
+ "Qwen/Qwen3-14B",
275
+ "Qwen/Qwen3-30B-A3B",
276
+ "Qwen/Qwen3-32B",
277
+ # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
278
+ # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
242
279
  ]
243
280
 
244
281
  def __init__(self, timeout: Optional[int] = None, browser: str = "chrome"):
@@ -35,6 +35,8 @@ MODEL_CONFIGS = {
35
35
  "gemini-2.0-flash-thinking-exp-01-21",
36
36
  "gemini-2.5-pro-exp-03-25",
37
37
  "gemini-2.0-pro-exp-02-05",
38
+ "gemini-2.5-flash-preview-04-17",
39
+
38
40
 
39
41
  ],
40
42
  },
@@ -83,6 +85,7 @@ MODEL_CONFIGS = {
83
85
  },
84
86
  }
85
87
 
88
+
86
89
  class Completions(BaseCompletions):
87
90
  def __init__(self, client: 'ExaChat'):
88
91
  self._client = client
@@ -292,6 +295,7 @@ class ExaChat(OpenAICompatibleProvider):
292
295
  "gemini-2.0-flash-thinking-exp-01-21",
293
296
  "gemini-2.5-pro-exp-03-25",
294
297
  "gemini-2.0-pro-exp-02-05",
298
+ "gemini-2.5-flash-preview-04-17",
295
299
 
296
300
  # OpenRouter Models
297
301
  "mistralai/mistral-small-3.1-24b-instruct:free",
@@ -0,0 +1,354 @@
1
+ import requests
2
+ import json
3
+ import time
4
+ import uuid
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import curl_cffi for improved request handling
8
+ from curl_cffi.requests import Session
9
+ from curl_cffi import CurlError
10
+
11
+ # Import base classes and utility structures
12
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
+ from .utils import (
14
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
15
+ ChatCompletionMessage, CompletionUsage
16
+ )
17
+
18
+ # Attempt to import LitAgent, fallback if not available
19
+ try:
20
+ from webscout.litagent import LitAgent
21
+ except ImportError:
22
+ pass
23
+
24
+ # --- Groq Client ---
25
+
26
+ class Completions(BaseCompletions):
27
+ def __init__(self, client: 'Groq'):
28
+ self._client = client
29
+
30
+ def create(
31
+ self,
32
+ *,
33
+ model: str,
34
+ messages: List[Dict[str, str]],
35
+ max_tokens: Optional[int] = 2049,
36
+ stream: bool = False,
37
+ temperature: Optional[float] = None,
38
+ top_p: Optional[float] = None,
39
+ **kwargs: Any
40
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
41
+ """
42
+ Creates a model response for the given chat conversation.
43
+ Mimics openai.chat.completions.create
44
+ """
45
+ payload = {
46
+ "model": model,
47
+ "messages": messages,
48
+ "max_tokens": max_tokens,
49
+ "stream": stream,
50
+ }
51
+ if temperature is not None:
52
+ payload["temperature"] = temperature
53
+ if top_p is not None:
54
+ payload["top_p"] = top_p
55
+
56
+ # Add frequency_penalty and presence_penalty if provided
57
+ if "frequency_penalty" in kwargs:
58
+ payload["frequency_penalty"] = kwargs.pop("frequency_penalty")
59
+ if "presence_penalty" in kwargs:
60
+ payload["presence_penalty"] = kwargs.pop("presence_penalty")
61
+
62
+ # Add any tools if provided
63
+ if "tools" in kwargs and kwargs["tools"]:
64
+ payload["tools"] = kwargs.pop("tools")
65
+
66
+ payload.update(kwargs)
67
+
68
+ request_id = f"chatcmpl-{uuid.uuid4()}"
69
+ created_time = int(time.time())
70
+
71
+ if stream:
72
+ return self._create_stream(request_id, created_time, model, payload)
73
+ else:
74
+ return self._create_non_stream(request_id, created_time, model, payload)
75
+
76
+ def _create_stream(
77
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
78
+ ) -> Generator[ChatCompletionChunk, None, None]:
79
+ try:
80
+ response = self._client.session.post(
81
+ self._client.base_url,
82
+ json=payload,
83
+ stream=True,
84
+ timeout=self._client.timeout,
85
+ impersonate="chrome110" # Use impersonate for better compatibility
86
+ )
87
+
88
+ if response.status_code != 200:
89
+ raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
90
+
91
+ # Track token usage across chunks
92
+ prompt_tokens = 0
93
+ completion_tokens = 0
94
+ total_tokens = 0
95
+
96
+ for line in response.iter_lines(decode_unicode=True):
97
+ if line:
98
+ if line.startswith("data: "):
99
+ json_str = line[6:]
100
+ if json_str == "[DONE]":
101
+ break
102
+
103
+ try:
104
+ data = json.loads(json_str)
105
+ choice_data = data.get('choices', [{}])[0]
106
+ delta_data = choice_data.get('delta', {})
107
+ finish_reason = choice_data.get('finish_reason')
108
+
109
+ # Update token counts if available
110
+ usage_data = data.get('usage', {})
111
+ if usage_data:
112
+ prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
113
+ completion_tokens = usage_data.get('completion_tokens', completion_tokens)
114
+ total_tokens = usage_data.get('total_tokens', total_tokens)
115
+
116
+ # Create the delta object
117
+ delta = ChoiceDelta(
118
+ content=delta_data.get('content'),
119
+ role=delta_data.get('role'),
120
+ tool_calls=delta_data.get('tool_calls')
121
+ )
122
+
123
+ # Create the choice object
124
+ choice = Choice(
125
+ index=choice_data.get('index', 0),
126
+ delta=delta,
127
+ finish_reason=finish_reason,
128
+ logprobs=choice_data.get('logprobs')
129
+ )
130
+
131
+ # Create the chunk object
132
+ chunk = ChatCompletionChunk(
133
+ id=request_id,
134
+ choices=[choice],
135
+ created=created_time,
136
+ model=model,
137
+ system_fingerprint=data.get('system_fingerprint')
138
+ )
139
+
140
+ # Convert to dict for proper formatting
141
+ chunk_dict = chunk.to_dict()
142
+
143
+ # Add usage information to match OpenAI format
144
+ usage_dict = {
145
+ "prompt_tokens": prompt_tokens or 10,
146
+ "completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
147
+ "total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
148
+ "estimated_cost": None
149
+ }
150
+
151
+ # Update completion_tokens and total_tokens as we receive more content
152
+ if delta_data.get('content'):
153
+ completion_tokens += 1
154
+ total_tokens = prompt_tokens + completion_tokens
155
+ usage_dict["completion_tokens"] = completion_tokens
156
+ usage_dict["total_tokens"] = total_tokens
157
+
158
+ chunk_dict["usage"] = usage_dict
159
+
160
+ yield chunk
161
+ except json.JSONDecodeError:
162
+ print(f"Warning: Could not decode JSON line: {json_str}")
163
+ continue
164
+ except CurlError as e:
165
+ print(f"Error during Groq stream request: {e}")
166
+ raise IOError(f"Groq request failed: {e}") from e
167
+ except Exception as e:
168
+ print(f"Error processing Groq stream: {e}")
169
+ raise
170
+
171
+ def _create_non_stream(
172
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
173
+ ) -> ChatCompletion:
174
+ try:
175
+ response = self._client.session.post(
176
+ self._client.base_url,
177
+ json=payload,
178
+ timeout=self._client.timeout,
179
+ impersonate="chrome110" # Use impersonate for better compatibility
180
+ )
181
+
182
+ if response.status_code != 200:
183
+ raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
184
+
185
+ data = response.json()
186
+
187
+ choices_data = data.get('choices', [])
188
+ usage_data = data.get('usage', {})
189
+
190
+ choices = []
191
+ for choice_d in choices_data:
192
+ message_d = choice_d.get('message', {})
193
+
194
+ # Handle tool calls if present
195
+ tool_calls = message_d.get('tool_calls')
196
+
197
+ message = ChatCompletionMessage(
198
+ role=message_d.get('role', 'assistant'),
199
+ content=message_d.get('content', ''),
200
+ tool_calls=tool_calls
201
+ )
202
+ choice = Choice(
203
+ index=choice_d.get('index', 0),
204
+ message=message,
205
+ finish_reason=choice_d.get('finish_reason', 'stop')
206
+ )
207
+ choices.append(choice)
208
+
209
+ usage = CompletionUsage(
210
+ prompt_tokens=usage_data.get('prompt_tokens', 0),
211
+ completion_tokens=usage_data.get('completion_tokens', 0),
212
+ total_tokens=usage_data.get('total_tokens', 0)
213
+ )
214
+
215
+ completion = ChatCompletion(
216
+ id=request_id,
217
+ choices=choices,
218
+ created=created_time,
219
+ model=data.get('model', model),
220
+ usage=usage,
221
+ )
222
+ return completion
223
+
224
+ except CurlError as e:
225
+ print(f"Error during Groq non-stream request: {e}")
226
+ raise IOError(f"Groq request failed: {e}") from e
227
+ except Exception as e:
228
+ print(f"Error processing Groq response: {e}")
229
+ raise
230
+
231
+ class Chat(BaseChat):
232
+ def __init__(self, client: 'Groq'):
233
+ self.completions = Completions(client)
234
+
235
+ class Groq(OpenAICompatibleProvider):
236
+ AVAILABLE_MODELS = [
237
+ "distil-whisper-large-v3-en",
238
+ "gemma2-9b-it",
239
+ "llama-3.3-70b-versatile",
240
+ "llama-3.1-8b-instant",
241
+ "llama-guard-3-8b",
242
+ "llama3-70b-8192",
243
+ "llama3-8b-8192",
244
+ "whisper-large-v3",
245
+ "whisper-large-v3-turbo",
246
+ "meta-llama/llama-4-scout-17b-16e-instruct",
247
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
248
+ "playai-tts",
249
+ "playai-tts-arabic",
250
+ "qwen-qwq-32b",
251
+ "mistral-saba-24b",
252
+ "qwen-2.5-coder-32b",
253
+ "qwen-2.5-32b",
254
+ "deepseek-r1-distill-qwen-32b",
255
+ "deepseek-r1-distill-llama-70b",
256
+ "llama-3.3-70b-specdec",
257
+ "llama-3.2-1b-preview",
258
+ "llama-3.2-3b-preview",
259
+ "llama-3.2-11b-vision-preview",
260
+ "llama-3.2-90b-vision-preview",
261
+ "mixtral-8x7b-32768"
262
+ ]
263
+
264
+ def __init__(self, api_key: str = None, timeout: Optional[int] = 30, browser: str = "chrome"):
265
+ self.timeout = timeout
266
+ self.base_url = "https://api.groq.com/openai/v1/chat/completions"
267
+ self.api_key = api_key
268
+
269
+ # Initialize curl_cffi Session
270
+ self.session = Session()
271
+
272
+ # Set up headers with API key if provided
273
+ self.headers = {
274
+ "Content-Type": "application/json",
275
+ }
276
+
277
+ if api_key:
278
+ self.headers["Authorization"] = f"Bearer {api_key}"
279
+
280
+ # Try to use LitAgent for browser fingerprinting
281
+ try:
282
+ agent = LitAgent()
283
+ fingerprint = agent.generate_fingerprint(browser)
284
+
285
+ self.headers.update({
286
+ "Accept": fingerprint["accept"],
287
+ "Accept-Encoding": "gzip, deflate, br, zstd",
288
+ "Accept-Language": fingerprint["accept_language"],
289
+ "Cache-Control": "no-cache",
290
+ "Connection": "keep-alive",
291
+ "Origin": "https://console.groq.com",
292
+ "Pragma": "no-cache",
293
+ "Referer": "https://console.groq.com/",
294
+ "Sec-Fetch-Dest": "empty",
295
+ "Sec-Fetch-Mode": "cors",
296
+ "Sec-Fetch-Site": "same-site",
297
+ "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
298
+ "Sec-CH-UA-Mobile": "?0",
299
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
300
+ "User-Agent": fingerprint["user_agent"],
301
+ })
302
+ except (NameError, Exception):
303
+ # Fallback to basic headers if LitAgent is not available
304
+ self.headers.update({
305
+ "Accept": "application/json",
306
+ "Accept-Encoding": "gzip, deflate, br",
307
+ "Accept-Language": "en-US,en;q=0.9",
308
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
309
+ })
310
+
311
+ # Update session headers
312
+ self.session.headers.update(self.headers)
313
+
314
+ # Initialize chat interface
315
+ self.chat = Chat(self)
316
+
317
+ @classmethod
318
+ def get_models(cls, api_key: str = None):
319
+ """Fetch available models from Groq API.
320
+
321
+ Args:
322
+ api_key (str, optional): Groq API key. If not provided, returns default models.
323
+
324
+ Returns:
325
+ list: List of available model IDs
326
+ """
327
+ if not api_key:
328
+ return cls.AVAILABLE_MODELS
329
+
330
+ try:
331
+ # Use a temporary curl_cffi session for this class method
332
+ temp_session = Session()
333
+ headers = {
334
+ "Content-Type": "application/json",
335
+ "Authorization": f"Bearer {api_key}",
336
+ }
337
+
338
+ response = temp_session.get(
339
+ "https://api.groq.com/openai/v1/models",
340
+ headers=headers,
341
+ impersonate="chrome110" # Use impersonate for fetching
342
+ )
343
+
344
+ if response.status_code != 200:
345
+ return cls.AVAILABLE_MODELS
346
+
347
+ data = response.json()
348
+ if "data" in data and isinstance(data["data"], list):
349
+ return [model["id"] for model in data["data"]]
350
+ return cls.AVAILABLE_MODELS
351
+
352
+ except (CurlError, Exception):
353
+ # Fallback to default models list if fetching fails
354
+ return cls.AVAILABLE_MODELS
@@ -249,10 +249,14 @@ class HeckAI(OpenAICompatibleProvider):
249
249
  """
250
250
 
251
251
  AVAILABLE_MODELS = [
252
+ "google/gemini-2.0-flash-001",
252
253
  "deepseek/deepseek-chat",
253
- "openai/gpt-4o-mini",
254
254
  "deepseek/deepseek-r1",
255
- "google/gemini-2.0-flash-001"
255
+ "openai/gpt-4o-mini",
256
+ "openai/gpt-4.1-mini",
257
+ "x-ai/grok-3-mini-beta",
258
+ "meta-llama/llama-4-scout"
259
+
256
260
  ]
257
261
 
258
262
  def __init__(