webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -1,735 +1,766 @@
1
- import requests
2
- import random
3
- import string
4
- import base64
5
- from datetime import datetime, timedelta
6
- from typing import List, Dict, Optional, Any
7
- import json
8
- import uuid
9
- import time
10
-
11
- # Import base classes and utility structures
12
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
- from .utils import (
14
- ChatCompletion, Choice,
15
- ChatCompletionMessage, CompletionUsage
16
- )
17
-
18
-
19
- def to_data_uri(image_data):
20
- """Convert image data to a data URI format"""
21
- if isinstance(image_data, str):
22
- # Assume it's already a data URI
23
- return image_data
24
-
25
- # Encode binary data to base64
26
- encoded = base64.b64encode(image_data).decode('utf-8')
27
-
28
- # Determine MIME type (simplified)
29
- mime_type = "image/jpeg" # Default
30
- if image_data.startswith(b'\x89PNG'):
31
- mime_type = "image/png"
32
- elif image_data.startswith(b'\xff\xd8'):
33
- mime_type = "image/jpeg"
34
- elif image_data.startswith(b'GIF'):
35
- mime_type = "image/gif"
36
-
37
- return f"data:{mime_type};base64,{encoded}"
38
-
39
-
40
- class Completions(BaseCompletions):
41
- def __init__(self, client: 'BLACKBOXAI'):
42
- self._client = client
43
-
44
- def create(
45
- self,
46
- *,
47
- model: str,
48
- messages: List[Dict[str, Any]],
49
- max_tokens: Optional[int] = None,
50
- stream: bool = False,
51
- temperature: Optional[float] = None,
52
- top_p: Optional[float] = None,
53
- **kwargs: Any
54
- ) -> ChatCompletion:
55
- """
56
- Create a chat completion with BlackboxAI API.
57
-
58
- Args:
59
- model: The model to use (from AVAILABLE_MODELS)
60
- messages: List of message dictionaries with 'role' and 'content'
61
- max_tokens: Maximum number of tokens to generate
62
- stream: If True, raises an error as streaming is not supported
63
- temperature: Sampling temperature (0-1)
64
- top_p: Nucleus sampling parameter (0-1)
65
- **kwargs: Additional parameters to pass to the API
66
-
67
- Returns:
68
- Returns a ChatCompletion object
69
- """
70
- # Check if streaming is requested and raise an error
71
- if stream:
72
- raise ValueError("Streaming is not supported by the BLACKBOXAI provider. Please use stream=False.")
73
-
74
- # Generate request ID and timestamp
75
- request_id = str(uuid.uuid4())
76
- created_time = int(time.time())
77
-
78
- # Extract system message if present
79
- system_message = "You are a helpful AI assistant."
80
- for msg in messages:
81
- if msg.get("role") == "system":
82
- system_message = msg.get("content")
83
- break
84
-
85
- # Look for any image content
86
- media = []
87
- for msg in messages:
88
- if msg.get("role") == "user":
89
- # Check for image attachments in content
90
- content = msg.get("content", [])
91
- if isinstance(content, list):
92
- for item in content:
93
- if isinstance(item, dict) and item.get("type") == "image_url":
94
- image_url = item.get("image_url", {})
95
- if isinstance(image_url, dict) and "url" in image_url:
96
- url = image_url["url"]
97
- if url.startswith("data:"):
98
- # It's already a data URI
99
- image_name = f"image_{len(media)}.png"
100
- media.append((url, image_name))
101
- else:
102
- # Need to fetch and convert to data URI
103
- try:
104
- image_response = requests.get(url)
105
- if image_response.ok:
106
- image_name = f"image_{len(media)}.png"
107
- media.append((image_response.content, image_name))
108
- except Exception as e:
109
- pass
110
-
111
- # Use non-streaming implementation
112
- return self._create_non_streaming(
113
- request_id=request_id,
114
- created_time=created_time,
115
- model=model,
116
- messages=messages,
117
- system_message=system_message,
118
- max_tokens=max_tokens,
119
- temperature=temperature,
120
- top_p=top_p,
121
- media=media
122
- )
123
-
124
-
125
-
126
- def _create_non_streaming(
127
- self,
128
- *,
129
- request_id: str,
130
- created_time: int,
131
- model: str,
132
- messages: List[Dict[str, Any]],
133
- system_message: str,
134
- max_tokens: Optional[int] = None,
135
- temperature: Optional[float] = None,
136
- top_p: Optional[float] = None,
137
- media: List = None
138
- ) -> ChatCompletion:
139
- """Implementation for non-streaming chat completions."""
140
- try:
141
- # Prepare user messages for BlackboxAI API format
142
- blackbox_messages = []
143
- for i, msg in enumerate(messages):
144
- if msg["role"] == "system":
145
- continue # System message handled separately
146
-
147
- msg_id = self._client.generate_id() if i > 0 else request_id
148
- blackbox_messages.append({
149
- "id": msg_id,
150
- "content": msg["content"],
151
- "role": msg["role"]
152
- })
153
-
154
- # Add image data if provided
155
- if media and blackbox_messages:
156
- blackbox_messages[-1]['data'] = {
157
- "imagesData": [
158
- {
159
- "filePath": f"/",
160
- "contents": to_data_uri(image)
161
- } for image in media
162
- ],
163
- "fileText": "",
164
- "title": ""
165
- }
166
-
167
- # Generate request payload with session
168
- request_email = f"{self._client.generate_random_string(8)}@blackbox.ai"
169
- session_data = self._client.generate_session(request_email)
170
-
171
- # Create the API request payload
172
- payload = self._client.create_request_payload(
173
- messages=blackbox_messages,
174
- chat_id=request_id,
175
- system_message=system_message,
176
- max_tokens=max_tokens,
177
- temperature=temperature,
178
- top_p=top_p,
179
- session_data=session_data,
180
- model=model
181
- )
182
-
183
- # Make the API request with cookies
184
- response = self._client.session.post(
185
- self._client.api_endpoint,
186
- json=payload,
187
- headers=self._client.headers,
188
- cookies=self._client.cookies,
189
- timeout=self._client.timeout
190
- )
191
-
192
- # Process the response
193
- full_content = ""
194
- if response.status_code == 200:
195
- # Extract content from response text
196
- response_text = response.text
197
-
198
- # Handle possible SSE format in response
199
- if "data: " in response_text:
200
- # Extract content from SSE format
201
- content_lines = []
202
- for line in response_text.split('\n'):
203
- if line.startswith("data: "):
204
- line = line[6:].strip()
205
- if line and not any(error_msg in line.lower() for error_msg in [
206
- "service has been suspended",
207
- "api request failed",
208
- "you have reached your request limit"
209
- ]):
210
- content_lines.append(line)
211
- full_content = "".join(content_lines)
212
- else:
213
- # Regular response
214
- full_content = response_text
215
- else:
216
- # Handle error response
217
- raise IOError(f"BlackboxAI request failed with status code {response.status_code}")
218
-
219
- # Create the completion message
220
- message = ChatCompletionMessage(
221
- role="assistant",
222
- content=full_content
223
- )
224
-
225
- # Create the choice with the message
226
- choice = Choice(
227
- index=0,
228
- message=message,
229
- finish_reason="stop"
230
- )
231
-
232
- # Estimate token usage
233
- prompt_tokens = sum(len(str(msg.get("content", ""))) // 4 for msg in messages)
234
- completion_tokens = len(full_content) // 4
235
-
236
- # Create the final completion object
237
- completion = ChatCompletion(
238
- id=request_id,
239
- choices=[choice],
240
- created=created_time,
241
- model=model,
242
- usage=CompletionUsage(
243
- prompt_tokens=prompt_tokens,
244
- completion_tokens=completion_tokens,
245
- total_tokens=prompt_tokens + completion_tokens
246
- )
247
- )
248
-
249
- return completion
250
-
251
- except Exception as e:
252
- raise IOError(f"BlackboxAI request failed: {str(e)}") from e
253
-
254
-
255
- class Chat(BaseChat):
256
- def __init__(self, client: 'BLACKBOXAI'):
257
- self.completions = Completions(client)
258
-
259
-
260
- class BLACKBOXAI(OpenAICompatibleProvider):
261
- """
262
- OpenAI-compatible client for BlackboxAI API.
263
-
264
- Usage:
265
- client = BLACKBOXAI()
266
- response = client.chat.completions.create(
267
- model="GPT-4.1",
268
- messages=[{"role": "user", "content": "Hello!"}]
269
- )
270
- print(response.choices[0].message.content)
271
- """
272
- # Default model
273
- default_model = "GPT-4.1"
274
- default_vision_model = default_model
275
- api_endpoint = "https://www.blackbox.ai/api/chat"
276
- timeout = 30
277
-
278
-
279
- # Default model (remains the same as per original class)
280
- default_model = "GPT-4.1"
281
- default_vision_model = default_model
282
-
283
- # New OpenRouter models list
284
- openrouter_models = [
285
- "Deepcoder 14B Preview",
286
- "DeepHermes 3 Llama 3 8B Preview",
287
- "DeepSeek R1 Zero",
288
- "Dolphin3.0 Mistral 24B",
289
- "Dolphin3.0 R1 Mistral 24B",
290
- "Flash 3",
291
- "Gemini 2.0 Flash Experimental",
292
- "Gemma 2 9B",
293
- "Gemma 3 12B",
294
- "Gemma 3 1B",
295
- "Gemma 3 27B",
296
- "Gemma 3 4B",
297
- "Kimi VL A3B Thinking",
298
- "Llama 3.1 8B Instruct",
299
- "Llama 3.1 Nemotron Ultra 253B v1",
300
- "Llama 3.2 11B Vision Instruct",
301
- "Llama 3.2 1B Instruct",
302
- "Llama 3.2 3B Instruct",
303
- "Llama 3.3 70B Instruct",
304
- "Llama 3.3 Nemotron Super 49B v1",
305
- "Llama 4 Maverick",
306
- "Llama 4 Scout",
307
- "Mistral 7B Instruct",
308
- "Mistral Nemo",
309
- "Mistral Small 3",
310
- "Mistral Small 3.1 24B",
311
- "Molmo 7B D",
312
- "Moonlight 16B A3B Instruct",
313
- "Qwen2.5 72B Instruct",
314
- "Qwen2.5 7B Instruct",
315
- "Qwen2.5 Coder 32B Instruct",
316
- "Qwen2.5 VL 32B Instruct",
317
- "Qwen2.5 VL 3B Instruct",
318
- "Qwen2.5 VL 72B Instruct",
319
- "Qwen2.5-VL 7B Instruct",
320
- "Qwerky 72B",
321
- "QwQ 32B",
322
- "QwQ 32B Preview",
323
- "QwQ 32B RpR v1",
324
- "R1",
325
- "R1 Distill Llama 70B",
326
- "R1 Distill Qwen 14B",
327
- "R1 Distill Qwen 32B",
328
- ]
329
-
330
- # New base models list
331
- models = [
332
- default_model,
333
- "o3-mini",
334
- "gpt-4.1-nano",
335
- "Claude-sonnet-3.7",
336
- "Claude-sonnet-3.5",
337
- "DeepSeek-R1",
338
- "Mistral-Small-24B-Instruct-2501",
339
- *openrouter_models,
340
- # Trending agent modes (names)
341
- 'Python Agent', 'HTML Agent', 'Builder Agent', 'Java Agent', 'JavaScript Agent',
342
- 'React Agent', 'Android Agent', 'Flutter Agent', 'Next.js Agent', 'AngularJS Agent',
343
- 'Swift Agent', 'MongoDB Agent', 'PyTorch Agent', 'Xcode Agent', 'Azure Agent',
344
- 'Bitbucket Agent', 'DigitalOcean Agent', 'Docker Agent', 'Electron Agent',
345
- 'Erlang Agent', 'FastAPI Agent', 'Firebase Agent', 'Flask Agent', 'Git Agent',
346
- 'Gitlab Agent', 'Go Agent', 'Godot Agent', 'Google Cloud Agent', 'Heroku Agent'
347
- ]
348
-
349
- # Models that support vision capabilities
350
- vision_models = [default_vision_model, 'o3-mini', "Llama 3.2 11B Vision Instruct"] # Added Llama vision
351
-
352
- # Models that can be directly selected by users
353
- userSelectedModel = ['o3-mini','Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
354
-
355
- # Agent mode configurations
356
- agentMode = {
357
- # OpenRouter Free
358
- 'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
359
- 'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
360
- 'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
361
- 'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
362
- 'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
363
- 'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
364
- 'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
365
- 'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
366
- 'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
367
- 'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
368
- 'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
369
- 'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
370
- 'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
371
- 'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
372
- 'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
373
- 'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
374
- 'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
375
- 'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
376
- 'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
377
- 'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
378
- 'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
379
- 'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
380
- 'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
381
- 'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
382
- 'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"}, # Matches Mistral-Small-24B-Instruct-2501
383
- 'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
384
- 'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
385
- 'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
386
- 'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
387
- 'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
388
- 'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
389
- 'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
390
- 'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
391
- 'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
392
- 'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
393
- 'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
394
- 'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
395
- 'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
396
- 'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
397
- 'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"}, # Matches DeepSeek-R1
398
- 'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
399
- 'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
400
- 'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
401
- # Default models from the new list
402
- 'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
403
- 'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
404
- 'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"}, # This is 'R1' in openrouter, but 'DeepSeek-R1' in base models
405
- 'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
406
- # Add default_model if it's not covered and has an agent mode
407
- default_model: {'mode': True, 'id': "openai/gpt-4.1", 'name': default_model}, # Assuming GPT-4.1 is agent-compatible
408
- 'o3-mini': {'mode': True, 'id': "o3-mini", 'name': "o3-mini"}, # Assuming o3-mini is agent-compatible
409
- 'gpt-4.1-nano': {'mode': True, 'id': "gpt-4.1-nano", 'name': "gpt-4.1-nano"}, # Assuming gpt-4.1-nano is agent-compatible
410
- }
411
-
412
- # Trending agent modes
413
- trendingAgentMode = {
414
- 'Python Agent': {'mode': True, 'id': "python"},
415
- 'HTML Agent': {'mode': True, 'id': "html"},
416
- 'Builder Agent': {'mode': True, 'id': "builder"},
417
- 'Java Agent': {'mode': True, 'id': "java"},
418
- 'JavaScript Agent': {'mode': True, 'id': "javascript"},
419
- 'React Agent': {'mode': True, 'id': "react"},
420
- 'Android Agent': {'mode': True, 'id': "android"},
421
- 'Flutter Agent': {'mode': True, 'id': "flutter"},
422
- 'Next.js Agent': {'mode': True, 'id': "next.js"},
423
- 'AngularJS Agent': {'mode': True, 'id': "angularjs"},
424
- 'Swift Agent': {'mode': True, 'id': "swift"},
425
- 'MongoDB Agent': {'mode': True, 'id': "mongodb"},
426
- 'PyTorch Agent': {'mode': True, 'id': "pytorch"},
427
- 'Xcode Agent': {'mode': True, 'id': "xcode"},
428
- 'Azure Agent': {'mode': True, 'id': "azure"},
429
- 'Bitbucket Agent': {'mode': True, 'id': "bitbucket"},
430
- 'DigitalOcean Agent': {'mode': True, 'id': "digitalocean"},
431
- 'Docker Agent': {'mode': True, 'id': "docker"},
432
- 'Electron Agent': {'mode': True, 'id': "electron"},
433
- 'Erlang Agent': {'mode': True, 'id': "erlang"},
434
- 'FastAPI Agent': {'mode': True, 'id': "fastapi"},
435
- 'Firebase Agent': {'mode': True, 'id': "firebase"},
436
- 'Flask Agent': {'mode': True, 'id': "flask"},
437
- 'Git Agent': {'mode': True, 'id': "git"},
438
- 'Gitlab Agent': {'mode': True, 'id': "gitlab"},
439
- 'Go Agent': {'mode': True, 'id': "go"},
440
- 'Godot Agent': {'mode': True, 'id': "godot"},
441
- 'Google Cloud Agent': {'mode': True, 'id': "googlecloud"},
442
- 'Heroku Agent': {'mode': True, 'id': "heroku"},
443
- }
444
-
445
- # Create a list of all model aliases
446
- _all_model_aliases = list(dict.fromkeys([
447
- # Add all model aliases
448
- "gpt-4", "gpt-4.1", "gpt-4o", "gpt-4o-mini",
449
- "claude-3.7-sonnet", "claude-3.5-sonnet",
450
- "deepcoder-14b", "deephermes-3-8b", "deepseek-r1-zero", "deepseek-r1",
451
- "dolphin-3.0-24b", "dolphin-3.0-r1-24b", "reka-flash", "gemini-2.0-flash",
452
- "gemma-2-9b", "gemma-3-12b", "gemma-3-1b", "gemma-3-27b", "gemma-3-4b",
453
- "kimi-vl-a3b-thinking", "llama-3.1-8b", "nemotron-253b", "llama-3.2-11b",
454
- "llama-3.2-1b", "llama-3.2-3b", "llama-3.3-70b", "nemotron-49b",
455
- "llama-4-maverick", "llama-4-scout", "mistral-7b", "mistral-nemo",
456
- "mistral-small-24b", "mistral-small-24b-instruct-2501", "mistral-small-3.1-24b",
457
- "molmo-7b", "moonlight-16b", "qwen-2.5-72b", "qwen-2.5-7b", "qwen-2.5-coder-32b",
458
- "qwen-2.5-vl-32b", "qwen-2.5-vl-3b", "qwen-2.5-vl-72b", "qwen-2.5-vl-7b",
459
- "qwerky-72b", "qwq-32b", "qwq-32b-preview", "qwq-32b-arliai",
460
- "deepseek-r1-distill-llama-70b", "deepseek-r1-distill-qwen-14b", "deepseek-r1-distill-qwen-32b",
461
- # Add base models
462
- "o3-mini", "gpt-4.1-nano"
463
- ]))
464
-
465
- # Create AVAILABLE_MODELS as a list with the format "BLACKBOXAI/model"
466
- AVAILABLE_MODELS = [f"BLACKBOXAI/{name}" for name in _all_model_aliases]
467
-
468
- # Create a mapping dictionary for internal use
469
- _model_mapping = {name: f"BLACKBOXAI/{name}" for name in _all_model_aliases}
470
-
471
-
472
- # Model aliases for easier reference
473
- model_aliases = {
474
- "gpt-4": default_model, # default_model is "GPT-4.1"
475
- "gpt-4.1": default_model,
476
- "gpt-4o": default_model, # Defaulting to GPT-4.1 as per previous logic if specific GPT-4o handling isn't defined elsewhere
477
- "gpt-4o-mini": default_model, # Defaulting
478
- "claude-3.7-sonnet": "Claude-sonnet-3.7",
479
- "claude-3.5-sonnet": "Claude-sonnet-3.5",
480
- # "deepseek-r1": "DeepSeek-R1", # This is in base models, maps to R1 or DeepSeek R1 Zero in agentMode
481
- #
482
- "deepcoder-14b": "Deepcoder 14B Preview",
483
- "deephermes-3-8b": "DeepHermes 3 Llama 3 8B Preview",
484
- "deepseek-r1-zero": "DeepSeek R1 Zero",
485
- "deepseek-r1": "R1", # Alias for R1 (which is deepseek/deepseek-r1:free)
486
- "dolphin-3.0-24b": "Dolphin3.0 Mistral 24B",
487
- "dolphin-3.0-r1-24b": "Dolphin3.0 R1 Mistral 24B",
488
- "reka-flash": "Flash 3",
489
- "gemini-2.0-flash": "Gemini 2.0 Flash Experimental",
490
- "gemma-2-9b": "Gemma 2 9B",
491
- "gemma-3-12b": "Gemma 3 12B",
492
- "gemma-3-1b": "Gemma 3 1B",
493
- "gemma-3-27b": "Gemma 3 27B",
494
- "gemma-3-4b": "Gemma 3 4B",
495
- "kimi-vl-a3b-thinking": "Kimi VL A3B Thinking",
496
- "llama-3.1-8b": "Llama 3.1 8B Instruct",
497
- "nemotron-253b": "Llama 3.1 Nemotron Ultra 253B v1",
498
- "llama-3.2-11b": "Llama 3.2 11B Vision Instruct",
499
- "llama-3.2-1b": "Llama 3.2 1B Instruct",
500
- "llama-3.2-3b": "Llama 3.2 3B Instruct",
501
- "llama-3.3-70b": "Llama 3.3 70B Instruct",
502
- "nemotron-49b": "Llama 3.3 Nemotron Super 49B v1",
503
- "llama-4-maverick": "Llama 4 Maverick",
504
- "llama-4-scout": "Llama 4 Scout",
505
- "mistral-7b": "Mistral 7B Instruct",
506
- "mistral-nemo": "Mistral Nemo",
507
- "mistral-small-24b": "Mistral Small 3", # Alias for "Mistral Small 3"
508
- "mistral-small-24b-instruct-2501": "Mistral-Small-24B-Instruct-2501", # Specific name
509
- "mistral-small-3.1-24b": "Mistral Small 3.1 24B",
510
- "molmo-7b": "Molmo 7B D",
511
- "moonlight-16b": "Moonlight 16B A3B Instruct",
512
- "qwen-2.5-72b": "Qwen2.5 72B Instruct",
513
- "qwen-2.5-7b": "Qwen2.5 7B Instruct",
514
- "qwen-2.5-coder-32b": "Qwen2.5 Coder 32B Instruct",
515
- "qwen-2.5-vl-32b": "Qwen2.5 VL 32B Instruct",
516
- "qwen-2.5-vl-3b": "Qwen2.5 VL 3B Instruct",
517
- "qwen-2.5-vl-72b": "Qwen2.5 VL 72B Instruct",
518
- "qwen-2.5-vl-7b": "Qwen2.5-VL 7B Instruct",
519
- "qwerky-72b": "Qwerky 72B",
520
- "qwq-32b": "QwQ 32B",
521
- "qwq-32b-preview": "QwQ 32B Preview",
522
- "qwq-32b-arliai": "QwQ 32B RpR v1",
523
- "deepseek-r1-distill-llama-70b": "R1 Distill Llama 70B",
524
- "deepseek-r1-distill-qwen-14b": "R1 Distill Qwen 14B",
525
- "deepseek-r1-distill-qwen-32b": "R1 Distill Qwen 32B",
526
- }
527
-
528
- def __init__(
529
- self,
530
- proxies: dict = {}
531
- ):
532
- """
533
- Initialize the BlackboxAI provider with OpenAI compatibility.
534
-
535
- Args:
536
- proxies: Optional proxy configuration
537
- """
538
- # Initialize session
539
- self.session = requests.Session()
540
-
541
- # Set headers based on GitHub reference
542
- self.headers = {
543
- 'Accept': 'text/event-stream',
544
- 'Accept-Encoding': 'gzip, deflate, br, zstd',
545
- 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
546
- 'Content-Type': 'application/json',
547
- 'DNT': '1',
548
- 'Origin': 'https://www.blackbox.ai',
549
- 'Referer': 'https://www.blackbox.ai/',
550
- 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
551
- 'Sec-CH-UA-Mobile': '?0',
552
- 'Sec-CH-UA-Platform': '"Windows"',
553
- 'Sec-Fetch-Dest': 'empty',
554
- 'Sec-Fetch-Mode': 'cors',
555
- 'Sec-Fetch-Site': 'same-origin',
556
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
557
- }
558
-
559
- # Set cookies for the session
560
- self.cookies = {
561
- 'cfzs_amplitude': self.generate_id(32),
562
- 'cfz_amplitude': self.generate_id(32),
563
- '__cf_bm': self.generate_id(32),
564
- }
565
-
566
- # Set proxies if provided
567
- self.session.proxies = proxies
568
-
569
- # Initialize chat interface with completions
570
- self.chat = Chat(self)
571
-
572
- @property
573
- def models(self):
574
- class _ModelList:
575
- def list(inner_self):
576
- return type(self).AVAILABLE_MODELS
577
- return _ModelList()
578
-
579
-
580
- @classmethod
581
- def get_model(cls, model: str) -> str:
582
- """Resolve model name from alias."""
583
- # Remove BLACKBOXAI/ prefix if present
584
- if model.startswith("BLACKBOXAI/"):
585
- model = model[len("BLACKBOXAI/"):]
586
-
587
- # Convert to lowercase for case-insensitive matching
588
- model_lower = model.lower()
589
-
590
- # Check aliases (case-insensitive)
591
- for alias, target in cls.model_aliases.items():
592
- if model_lower == alias.lower():
593
- return target
594
-
595
- # If the model is directly in available models (without the prefix), return it
596
- for available_model in cls._all_model_aliases:
597
- if model_lower == available_model.lower():
598
- # Find the corresponding model in model_aliases or use the model directly
599
- for alias, target in cls.model_aliases.items():
600
- if available_model.lower() == alias.lower():
601
- return target
602
- return available_model
603
-
604
- # If we get here, use the default model
605
- return cls.default_model
606
-
607
- @classmethod
608
- def generate_random_string(cls, length: int = 8) -> str:
609
- """Generate a random string of specified length."""
610
- chars = string.ascii_lowercase + string.digits
611
- return ''.join(random.choice(chars) for _ in range(length))
612
-
613
- @classmethod
614
- def generate_id(cls, length: int = 7) -> str:
615
- """Generate a random ID of specified length."""
616
- chars = string.ascii_letters + string.digits
617
- return ''.join(random.choice(chars) for _ in range(length))
618
-
619
- @classmethod
620
- def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 30) -> dict:
621
- """
622
- Generate a dynamic session with proper ID and expiry format using a specific email.
623
-
624
- Args:
625
- email: The email to use for this session
626
- id_length: Length of the numeric ID (default: 21)
627
- days_ahead: Number of days ahead for expiry (default: 30)
628
-
629
- Returns:
630
- dict: A session dictionary with user information and expiry
631
- """
632
- # Generate a random name
633
- first_names = ["Alex", "Jordan", "Taylor", "Morgan", "Casey", "Riley", "Avery", "Quinn", "Skyler", "Dakota"]
634
- last_names = ["Smith", "Johnson", "Williams", "Brown", "Jones", "Miller", "Davis", "Garcia", "Rodriguez", "Wilson"]
635
- name = f"{random.choice(first_names)} {random.choice(last_names)}"
636
-
637
- # Generate numeric ID - using Google-like ID format
638
- numeric_id = ''.join(random.choice('0123456789') for _ in range(id_length))
639
-
640
- # Generate future expiry date
641
- future_date = datetime.now() + timedelta(days=days_ahead)
642
- expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
643
-
644
- # Generate random image ID for the new URL format
645
- chars = string.ascii_letters + string.digits + "-"
646
- random_img_id = ''.join(random.choice(chars) for _ in range(48))
647
- image_url = f"https://lh3.googleusercontent.com/a/ACg8oc{random_img_id}=s96-c"
648
-
649
- return {
650
- "user": {
651
- "name": name,
652
- "email": email,
653
- "image": image_url,
654
- "id": numeric_id
655
- },
656
- "expires": expiry,
657
- "isNewUser": False
658
- }
659
-
660
- def create_request_payload(
661
- self,
662
- messages: List[Dict[str, Any]],
663
- chat_id: str,
664
- system_message: str,
665
- max_tokens: int,
666
- temperature: Optional[float] = None,
667
- top_p: Optional[float] = None,
668
- session_data: Dict[str, Any] = None,
669
- model: str = None
670
- ) -> Dict[str, Any]:
671
- """Create the full request payload for the BlackboxAI API."""
672
- # Get the correct model ID and agent mode
673
- model_name = self.get_model(model or self.default_model)
674
- agent_mode = self.agentMode.get(model_name, {})
675
-
676
- # Generate a random customer ID for the subscription
677
- customer_id = "cus_" + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(14))
678
-
679
- # Create the full request payload
680
- return {
681
- "messages": messages,
682
- "agentMode": agent_mode,
683
- "id": chat_id,
684
- "previewToken": None,
685
- "userId": None,
686
- "codeModelMode": True,
687
- "trendingAgentMode": {},
688
- "isMicMode": False,
689
- "userSystemPrompt": system_message,
690
- "maxTokens": max_tokens,
691
- "playgroundTopP": top_p,
692
- "playgroundTemperature": temperature,
693
- "isChromeExt": False,
694
- "githubToken": "",
695
- "clickedAnswer2": False,
696
- "clickedAnswer3": False,
697
- "clickedForceWebSearch": False,
698
- "visitFromDelta": False,
699
- "isMemoryEnabled": False,
700
- "mobileClient": False,
701
- "userSelectedModel": model_name if model_name in self.userSelectedModel else None,
702
- "validated": "00f37b34-a166-4efb-bce5-1312d87f2f94",
703
- "imageGenerationMode": False,
704
- "webSearchModePrompt": False,
705
- "deepSearchMode": False,
706
- "designerMode": False,
707
- "domains": None,
708
- "vscodeClient": False,
709
- "codeInterpreterMode": False,
710
- "customProfile": {
711
- "name": "",
712
- "occupation": "",
713
- "traits": [],
714
- "additionalInfo": "",
715
- "enableNewChats": False
716
- },
717
- "webSearchModeOption": {
718
- "autoMode": True,
719
- "webMode": False,
720
- "offlineMode": False
721
- },
722
- "session": session_data,
723
- "isPremium": True,
724
- "subscriptionCache": {
725
- "status": "PREMIUM",
726
- "customerId": customer_id,
727
- "expiryTimestamp": int((datetime.now() + timedelta(days=30)).timestamp()),
728
- "lastChecked": int(datetime.now().timestamp() * 1000),
729
- "isTrialSubscription": True
730
- },
731
- "beastMode": False,
732
- "reasoningMode": False,
733
- "designerMode": False,
734
- "workspaceId": ""
735
- }
1
+ import requests
2
+ import random
3
+ import string
4
+ import base64
5
+ from datetime import datetime, timedelta
6
+ from typing import Generator, List, Dict, Optional, Any, Union
7
+ import json # Not used directly in this snippet, but often useful
8
+ import uuid
9
+ import time
10
+
11
+ # Import base classes and utility structures
12
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
+ from webscout.Provider.OPENAI.utils import (
14
+ ChatCompletion, Choice,
15
+ ChatCompletionMessage, CompletionUsage, count_tokens,
16
+ ChatCompletionChunk, ChoiceDelta # Added for streaming return type
17
+ )
18
+ from webscout.litagent import LitAgent, agent
19
+ agent = LitAgent()
20
+
21
+ def to_data_uri(image_data):
22
+ """Convert image data to a data URI format"""
23
+ if isinstance(image_data, str):
24
+ # Assume it's already a data URI
25
+ return image_data
26
+
27
+ # Encode binary data to base64
28
+ encoded = base64.b64encode(image_data).decode('utf-8')
29
+
30
+ # Determine MIME type (simplified)
31
+ mime_type = "image/jpeg" # Default
32
+ if image_data.startswith(b'\x89PNG'):
33
+ mime_type = "image/png"
34
+ elif image_data.startswith(b'\xff\xd8'):
35
+ mime_type = "image/jpeg"
36
+ elif image_data.startswith(b'GIF'):
37
+ mime_type = "image/gif"
38
+
39
+ return f"data:{mime_type};base64,{encoded}"
40
+
41
+
42
+ class Completions(BaseCompletions):
43
+ def __init__(self, client: 'BLACKBOXAI'):
44
+ self._client = client
45
+
46
+ def create(
47
+ self,
48
+ *,
49
+ model: str,
50
+ messages: List[Dict[str, Any]],
51
+ max_tokens: Optional[int] = None,
52
+ stream: bool = False,
53
+ temperature: Optional[float] = None,
54
+ top_p: Optional[float] = None,
55
+ **kwargs: Any
56
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
57
+ """
58
+ Create a chat completion with BlackboxAI API.
59
+
60
+ Args:
61
+ model: The model to use (from AVAILABLE_MODELS)
62
+ messages: List of message dictionaries with 'role' and 'content'
63
+ max_tokens: Maximum number of tokens to generate
64
+ stream: If True, yields streaming chunks
65
+ temperature: Sampling temperature (0-1)
66
+ top_p: Nucleus sampling parameter (0-1)
67
+ **kwargs: Additional parameters to pass to the API
68
+
69
+ Returns:
70
+ Returns a ChatCompletion object or a generator for streaming
71
+ """
72
+ # Generate request ID and timestamp
73
+ request_id = str(uuid.uuid4())
74
+ created_time = int(time.time())
75
+
76
+ # Extract system message if present
77
+ system_message = "You are a helpful AI assistant."
78
+ for msg in messages:
79
+ if msg.get("role") == "system":
80
+ system_message = msg.get("content")
81
+ break
82
+
83
+ # Look for any image content
84
+ media = []
85
+ for msg in messages:
86
+ if msg.get("role") == "user":
87
+ # Check for image attachments in content
88
+ content = msg.get("content", [])
89
+ if isinstance(content, list):
90
+ for item in content:
91
+ if isinstance(item, dict) and item.get("type") == "image_url":
92
+ image_url = item.get("image_url", {})
93
+ if isinstance(image_url, dict) and "url" in image_url:
94
+ url = image_url["url"]
95
+ if url.startswith("data:"):
96
+ # It's already a data URI
97
+ image_name = f"image_{len(media)}.png"
98
+ media.append((url, image_name))
99
+ else:
100
+ # Need to fetch and convert to data URI
101
+ try:
102
+ image_response = requests.get(url)
103
+ if image_response.ok:
104
+ image_name = f"image_{len(media)}.png"
105
+ media.append((image_response.content, image_name))
106
+ except Exception as e:
107
+ pass
108
+
109
+ # Check if streaming is requested and raise an error
110
+ if stream:
111
+ return self._create_streaming(
112
+ request_id=request_id,
113
+ created_time=created_time,
114
+ model=model,
115
+ messages=messages,
116
+ system_message=system_message,
117
+ max_tokens=max_tokens,
118
+ temperature=temperature,
119
+ top_p=top_p,
120
+ media=media
121
+ )
122
+
123
+ # Use non-streaming implementation
124
+ return self._create_non_streaming(
125
+ request_id=request_id,
126
+ created_time=created_time,
127
+ model=model,
128
+ messages=messages,
129
+ system_message=system_message,
130
+ max_tokens=max_tokens,
131
+ temperature=temperature,
132
+ top_p=top_p,
133
+ media=media
134
+ )
135
+
136
+
137
+ def _create_non_streaming(
138
+ self,
139
+ *,
140
+ request_id: str,
141
+ created_time: int,
142
+ model: str,
143
+ messages: List[Dict[str, Any]],
144
+ system_message: str,
145
+ max_tokens: Optional[int] = None,
146
+ temperature: Optional[float] = None,
147
+ top_p: Optional[float] = None,
148
+ media: List = None
149
+ ) -> ChatCompletion:
150
+ """Implementation for non-streaming chat completions."""
151
+ try:
152
+ # Prepare user messages for BlackboxAI API format
153
+ blackbox_messages = []
154
+ for i, msg in enumerate(messages):
155
+ if msg["role"] == "system":
156
+ continue # System message handled separately
157
+
158
+ msg_id = self._client.generate_id() if i > 0 else request_id
159
+ blackbox_messages.append({
160
+ "id": msg_id,
161
+ "content": msg["content"],
162
+ "role": msg["role"]
163
+ })
164
+
165
+ # Add image data if provided
166
+ if media and blackbox_messages:
167
+ blackbox_messages[-1]['data'] = {
168
+ "imagesData": [
169
+ {
170
+ "filePath": f"/",
171
+ "contents": to_data_uri(image[0])
172
+ } for image in media
173
+ ],
174
+ "fileText": "",
175
+ "title": ""
176
+ }
177
+
178
+ # Generate request payload with session
179
+ request_email = f"{self._client.generate_random_string(8)}@blackbox.ai"
180
+ session_data = self._client.generate_session(request_email)
181
+
182
+ # Create the API request payload
183
+ payload = self._client.create_request_payload(
184
+ messages=blackbox_messages,
185
+ chat_id=request_id,
186
+ system_message=system_message,
187
+ max_tokens=max_tokens,
188
+ temperature=temperature,
189
+ top_p=top_p,
190
+ session_data=session_data,
191
+ model=model
192
+ )
193
+
194
+ # Make the API request with cookies
195
+ response = self._client.session.post(
196
+ self._client.api_endpoint,
197
+ json=payload,
198
+ headers=self._client.headers,
199
+ cookies=self._client.cookies,
200
+ timeout=self._client.timeout
201
+ )
202
+
203
+ # Process the response
204
+ full_content = ""
205
+ if response.status_code == 200:
206
+ # Extract content from response text
207
+ response_text = response.text
208
+
209
+ # Handle possible SSE format in response
210
+ if "data: " in response_text:
211
+ # Extract content from SSE format
212
+ content_lines = []
213
+ for line in response_text.split('\n'):
214
+ if line.startswith("data: "):
215
+ line = line[6:].strip()
216
+ if line and not any(error_msg in line.lower() for error_msg in [
217
+ "service has been suspended",
218
+ "api request failed",
219
+ "you have reached your request limit"
220
+ ]):
221
+ content_lines.append(line)
222
+ full_content = "".join(content_lines)
223
+ else:
224
+ # Regular response
225
+ full_content = response_text
226
+ else:
227
+ # Handle error response
228
+ raise IOError(f"BlackboxAI request failed with status code {response.status_code}")
229
+
230
+ # Create the completion message
231
+ message = ChatCompletionMessage(
232
+ role="assistant",
233
+ content=full_content
234
+ )
235
+
236
+ # Create the choice with the message
237
+ choice = Choice(
238
+ index=0,
239
+ message=message,
240
+ finish_reason="stop"
241
+ )
242
+
243
+ # Estimate token usage using count_tokens
244
+ prompt_tokens = count_tokens([str(msg.get("content", "")) for msg in messages])
245
+ completion_tokens = count_tokens(full_content)
246
+
247
+ # Create the final completion object
248
+ completion = ChatCompletion(
249
+ id=request_id,
250
+ choices=[choice],
251
+ created=created_time,
252
+ model=model,
253
+ usage=CompletionUsage(
254
+ prompt_tokens=prompt_tokens,
255
+ completion_tokens=completion_tokens,
256
+ total_tokens=prompt_tokens + completion_tokens
257
+ )
258
+ )
259
+
260
+ return completion
261
+
262
+ except Exception as e:
263
+ raise IOError(f"BlackboxAI request failed: {str(e)}") from e
264
+
265
+ def _create_streaming(
266
+ self,
267
+ *,
268
+ request_id: str,
269
+ created_time: int,
270
+ model: str,
271
+ messages: List[Dict[str, Any]],
272
+ system_message: str,
273
+ max_tokens: Optional[int] = None,
274
+ temperature: Optional[float] = None,
275
+ top_p: Optional[float] = None,
276
+ media: List = None
277
+ ):
278
+ """Implementation for streaming chat completions (OpenAI-compatible chunks)."""
279
+ # Prepare user messages for BlackboxAI API format
280
+ blackbox_messages = []
281
+ for i, msg in enumerate(messages):
282
+ if msg["role"] == "system":
283
+ continue # System message handled separately
284
+ msg_id = self._client.generate_id() if i > 0 else request_id
285
+ blackbox_messages.append({
286
+ "id": msg_id,
287
+ "content": msg["content"],
288
+ "role": msg["role"]
289
+ })
290
+ # Add image data if provided
291
+ if media and blackbox_messages:
292
+ blackbox_messages[-1]['data'] = {
293
+ "imagesData": [
294
+ {
295
+ "filePath": f"/",
296
+ "contents": to_data_uri(image[0])
297
+ } for image in media
298
+ ],
299
+ "fileText": "",
300
+ "title": ""
301
+ }
302
+ # Generate request payload with session
303
+ request_email = f"{self._client.generate_random_string(8)}@blackbox.ai"
304
+ session_data = self._client.generate_session(request_email)
305
+ payload = self._client.create_request_payload(
306
+ messages=blackbox_messages,
307
+ chat_id=request_id,
308
+ system_message=system_message,
309
+ max_tokens=max_tokens,
310
+ temperature=temperature,
311
+ top_p=top_p,
312
+ session_data=session_data,
313
+ model=model
314
+ )
315
+ # Make the API request with cookies, stream=True
316
+ response = self._client.session.post(
317
+ self._client.api_endpoint,
318
+ json=payload,
319
+ headers=self._client.headers,
320
+ cookies=self._client.cookies,
321
+ stream=True,
322
+ timeout=self._client.timeout
323
+ )
324
+ # Blackbox streams as raw text, no line breaks, so chunk manually
325
+ buffer = ""
326
+ chunk_size = 32 # Tune as needed for smoothness
327
+ from webscout.Provider.OPENAI.utils import ChatCompletionChunk, Choice, ChoiceDelta
328
+ for chunk in response.iter_content(chunk_size=chunk_size):
329
+ if not chunk:
330
+ continue
331
+ text = chunk.decode(errors="ignore")
332
+ buffer += text
333
+ # Yield in small pieces, but only non-empty
334
+ while len(buffer) >= chunk_size:
335
+ out = buffer[:chunk_size]
336
+ buffer = buffer[chunk_size:]
337
+ if out.strip():
338
+ # Wrap the chunk in OpenAI-compatible structure
339
+ delta = ChoiceDelta(content=out, role="assistant")
340
+ choice = Choice(index=0, delta=delta, finish_reason=None)
341
+ chunk_obj = ChatCompletionChunk(
342
+ id=request_id,
343
+ choices=[choice],
344
+ created=created_time,
345
+ model=model,
346
+ system_fingerprint=None
347
+ )
348
+ yield chunk_obj
349
+ # Yield any remaining buffer
350
+ if buffer.strip():
351
+ delta = ChoiceDelta(content=buffer, role="assistant")
352
+ choice = Choice(index=0, delta=delta, finish_reason=None)
353
+ chunk_obj = ChatCompletionChunk(
354
+ id=request_id,
355
+ choices=[choice],
356
+ created=created_time,
357
+ model=model,
358
+ system_fingerprint=None
359
+ )
360
+ yield chunk_obj
361
+
362
+
363
+ class Chat(BaseChat):
364
+ def __init__(self, client: 'BLACKBOXAI'):
365
+ self.completions = Completions(client)
366
+
367
+
368
+ class BLACKBOXAI(OpenAICompatibleProvider):
369
+ """
370
+ OpenAI-compatible client for BlackboxAI API.
371
+
372
+ Usage:
373
+ client = BLACKBOXAI()
374
+ response = client.chat.completions.create(
375
+ model="GPT-4.1",
376
+ messages=[{"role": "user", "content": "Hello!"}]
377
+ )
378
+ print(response.choices[0].message.content)
379
+ """
380
+ # Default model
381
+ default_model = "GPT-4.1"
382
+ default_vision_model = default_model
383
+ api_endpoint = "https://www.blackbox.ai/api/chat"
384
+ timeout = 30
385
+
386
+
387
+ # Default model (remains the same as per original class)
388
+ default_model = "GPT-4.1"
389
+ default_vision_model = default_model
390
+
391
+ # New OpenRouter models list
392
+ openrouter_models = [
393
+ "Deepcoder 14B Preview",
394
+ "DeepHermes 3 Llama 3 8B Preview",
395
+ "DeepSeek R1 Zero",
396
+ "Dolphin3.0 Mistral 24B",
397
+ "Dolphin3.0 R1 Mistral 24B",
398
+ "Flash 3",
399
+ "Gemini 2.0 Flash Experimental",
400
+ "Gemma 2 9B",
401
+ "Gemma 3 12B",
402
+ "Gemma 3 1B",
403
+ "Gemma 3 27B",
404
+ "Gemma 3 4B",
405
+ "Kimi VL A3B Thinking",
406
+ "Llama 3.1 8B Instruct",
407
+ "Llama 3.1 Nemotron Ultra 253B v1",
408
+ "Llama 3.2 11B Vision Instruct",
409
+ "Llama 3.2 1B Instruct",
410
+ "Llama 3.2 3B Instruct",
411
+ "Llama 3.3 70B Instruct",
412
+ "Llama 3.3 Nemotron Super 49B v1",
413
+ "Llama 4 Maverick",
414
+ "Llama 4 Scout",
415
+ "Mistral 7B Instruct",
416
+ "Mistral Nemo",
417
+ "Mistral Small 3",
418
+ "Mistral Small 3.1 24B",
419
+ "Molmo 7B D",
420
+ "Moonlight 16B A3B Instruct",
421
+ "Qwen2.5 72B Instruct",
422
+ "Qwen2.5 7B Instruct",
423
+ "Qwen2.5 Coder 32B Instruct",
424
+ "Qwen2.5 VL 32B Instruct",
425
+ "Qwen2.5 VL 3B Instruct",
426
+ "Qwen2.5 VL 72B Instruct",
427
+ "Qwen2.5-VL 7B Instruct",
428
+ "Qwerky 72B",
429
+ "QwQ 32B",
430
+ "QwQ 32B Preview",
431
+ "QwQ 32B RpR v1",
432
+ "R1",
433
+ "R1 Distill Llama 70B",
434
+ "R1 Distill Qwen 14B",
435
+ "R1 Distill Qwen 32B",
436
+ ]
437
+
438
+ # New base models list
439
+ models = [
440
+ default_model,
441
+ "o3-mini",
442
+ "gpt-4.1-nano",
443
+ "Claude Opus 4", # Added Claude Opus 4
444
+ "Claude Sonnet 4", # Added Claude Sonnet 4
445
+ "Claude-sonnet-3.7",
446
+ "Claude-sonnet-3.5",
447
+ "Grok 3", # Added Grok 3
448
+ "Gemini 2.5 Pro", # Added Gemini 2.5 Pro
449
+ "UI-TARS 72B", # Added UI-TARS 72B
450
+ "DeepSeek-R1",
451
+ "Mistral-Small-24B-Instruct-2501",
452
+ *openrouter_models,
453
+ # Trending agent modes (names)
454
+ 'Python Agent', 'HTML Agent', 'Builder Agent', 'Java Agent', 'JavaScript Agent',
455
+ 'React Agent', 'Android Agent', 'Flutter Agent', 'Next.js Agent', 'AngularJS Agent',
456
+ 'Swift Agent', 'MongoDB Agent', 'PyTorch Agent', 'Xcode Agent', 'Azure Agent',
457
+ 'Bitbucket Agent', 'DigitalOcean Agent', 'Docker Agent', 'Electron Agent',
458
+ 'Erlang Agent', 'FastAPI Agent', 'Firebase Agent', 'Flask Agent', 'Git Agent',
459
+ 'Gitlab Agent', 'Go Agent', 'Godot Agent', 'Google Cloud Agent', 'Heroku Agent'
460
+ ]
461
+
462
+ # Models that support vision capabilities
463
+ vision_models = [default_vision_model, 'o3-mini', "Llama 3.2 11B Vision Instruct", "Gemini 2.5 Pro", "Claude Sonnet 4", "Claude Opus 4", "UI-TARS 72B"] # Added Llama vision, Gemini 2.5 Pro, Claude Sonnet 4, Claude Opus 4, and UI-TARS 72B
464
+
465
+ # Models that can be directly selected by users
466
+ userSelectedModel = ['o3-mini', 'Claude Opus 4', 'Claude Sonnet 4', 'Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'Grok 3', 'Gemini 2.5 Pro', 'UI-TARS 72B', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
467
+
468
+ # Agent mode configurations
469
+ agentMode = {
470
+ # OpenRouter Free
471
+ 'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
472
+ 'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
473
+ 'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
474
+ 'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
475
+ 'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
476
+ 'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
477
+ 'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
478
+ 'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
479
+ 'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
480
+ 'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
481
+ 'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
482
+ 'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
483
+ 'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
484
+ 'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
485
+ 'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
486
+ 'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
487
+ 'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
488
+ 'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
489
+ 'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
490
+ 'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
491
+ 'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
492
+ 'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
493
+ 'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
494
+ 'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
495
+ 'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"}, # Matches Mistral-Small-24B-Instruct-2501
496
+ 'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
497
+ 'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
498
+ 'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
499
+ 'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
500
+ 'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
501
+ 'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
502
+ 'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
503
+ 'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
504
+ 'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
505
+ 'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
506
+ 'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
507
+ 'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
508
+ 'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
509
+ 'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
510
+ 'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"}, # Matches DeepSeek-R1
511
+ 'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
512
+ 'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
513
+ 'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
514
+ # Default models from the new list
515
+ 'Claude Opus 4': {'mode': True, 'id': "anthropic/claude-opus-4", 'name': "Claude Opus 4"},
516
+ 'Claude Sonnet 4': {'mode': True, 'id': "anthropic/claude-sonnet-4", 'name': "Claude Sonnet 4"},
517
+ 'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
518
+ 'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
519
+ 'Grok 3': {'mode': True, 'id': "x-ai/grok-3-beta", 'name': "Grok 3"},
520
+ 'Gemini 2.5 Pro': {'mode': True, 'id': "google/gemini-2.5-pro-preview-03-25", 'name': "Gemini 2.5 Pro"},
521
+ 'UI-TARS 72B': {'mode': True, 'id': "bytedance-research/ui-tars-72b:free", 'name': "UI-TARS 72B"},
522
+ 'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"}, # This is 'R1' in openrouter, but 'DeepSeek-R1' in base models
523
+ 'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
524
+ # Add default_model if it's not covered and has an agent mode
525
+ default_model: {'mode': True, 'id': "openai/gpt-4.1", 'name': default_model}, # Assuming GPT-4.1 is agent-compatible
526
+ 'o3-mini': {'mode': True, 'id': "o3-mini", 'name': "o3-mini"}, # Assuming o3-mini is agent-compatible
527
+ 'gpt-4.1-nano': {'mode': True, 'id': "gpt-4.1-nano", 'name': "gpt-4.1-nano"}, # Assuming gpt-4.1-nano is agent-compatible
528
+ }
529
+
530
+ # Trending agent modes
531
+ trendingAgentMode = {
532
+ 'Python Agent': {'mode': True, 'id': "python"},
533
+ 'HTML Agent': {'mode': True, 'id': "html"},
534
+ 'Builder Agent': {'mode': True, 'id': "builder"},
535
+ 'Java Agent': {'mode': True, 'id': "java"},
536
+ 'JavaScript Agent': {'mode': True, 'id': "javascript"},
537
+ 'React Agent': {'mode': True, 'id': "react"},
538
+ 'Android Agent': {'mode': True, 'id': "android"},
539
+ 'Flutter Agent': {'mode': True, 'id': "flutter"},
540
+ 'Next.js Agent': {'mode': True, 'id': "next.js"},
541
+ 'AngularJS Agent': {'mode': True, 'id': "angularjs"},
542
+ 'Swift Agent': {'mode': True, 'id': "swift"},
543
+ 'MongoDB Agent': {'mode': True, 'id': "mongodb"},
544
+ 'PyTorch Agent': {'mode': True, 'id': "pytorch"},
545
+ 'Xcode Agent': {'mode': True, 'id': "xcode"},
546
+ 'Azure Agent': {'mode': True, 'id': "azure"},
547
+ 'Bitbucket Agent': {'mode': True, 'id': "bitbucket"},
548
+ 'DigitalOcean Agent': {'mode': True, 'id': "digitalocean"},
549
+ 'Docker Agent': {'mode': True, 'id': "docker"},
550
+ 'Electron Agent': {'mode': True, 'id': "electron"},
551
+ 'Erlang Agent': {'mode': True, 'id': "erlang"},
552
+ 'FastAPI Agent': {'mode': True, 'id': "fastapi"},
553
+ 'Firebase Agent': {'mode': True, 'id': "firebase"},
554
+ 'Flask Agent': {'mode': True, 'id': "flask"},
555
+ 'Git Agent': {'mode': True, 'id': "git"},
556
+ 'Gitlab Agent': {'mode': True, 'id': "gitlab"},
557
+ 'Go Agent': {'mode': True, 'id': "go"},
558
+ 'Godot Agent': {'mode': True, 'id': "godot"},
559
+ 'Google Cloud Agent': {'mode': True, 'id': "googlecloud"},
560
+ 'Heroku Agent': {'mode': True, 'id': "heroku"},
561
+ }
562
+
563
+ # Create AVAILABLE_MODELS as a list with just the model aliases (no "BLACKBOXAI/" prefix)
564
+ AVAILABLE_MODELS = list(models)
565
+
566
+
567
+ def __init__(
568
+ self,
569
+ proxies: dict = {}
570
+ ):
571
+ """
572
+ Initialize the BlackboxAI provider with OpenAI compatibility.
573
+
574
+ Args:
575
+ proxies: Optional proxy configuration
576
+ """
577
+ # Initialize session
578
+ self.session = requests.Session()
579
+
580
+ # Set headers based on GitHub reference
581
+ self.headers = {
582
+ 'Accept': 'text/event-stream',
583
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
584
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
585
+ 'Content-Type': 'application/json',
586
+ 'DNT': '1',
587
+ 'Origin': 'https://www.blackbox.ai',
588
+ 'Referer': 'https://www.blackbox.ai/',
589
+ 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
590
+ 'Sec-CH-UA-Mobile': '?0',
591
+ 'Sec-CH-UA-Platform': '"Windows"',
592
+ 'Sec-Fetch-Dest': 'empty',
593
+ 'Sec-Fetch-Mode': 'cors',
594
+ 'Sec-Fetch-Site': 'same-origin',
595
+ 'User-Agent': agent.random(),
596
+ }
597
+
598
+ # Set cookies for the session
599
+ self.cookies = {
600
+ 'cfzs_amplitude': self.generate_id(32),
601
+ 'cfz_amplitude': self.generate_id(32),
602
+ '__cf_bm': self.generate_id(32),
603
+ }
604
+
605
+ # Set proxies if provided
606
+ self.session.proxies = proxies
607
+
608
+ # Initialize chat interface with completions
609
+ self.chat = Chat(self)
610
+
611
+ @property
612
+ def models(self):
613
+ class _ModelList:
614
+ def list(inner_self):
615
+ return type(self).AVAILABLE_MODELS
616
+ return _ModelList()
617
+
618
+
619
+ @classmethod
620
+ def get_model(cls, model: str) -> str:
621
+ """Return the model name, removing BLACKBOXAI/ prefix if present, or default_model."""
622
+ if model.startswith("BLACKBOXAI/"):
623
+ model = model[len("BLACKBOXAI/"):]
624
+ if model in cls.AVAILABLE_MODELS:
625
+ return model
626
+ return cls.default_model
627
+
628
+ @classmethod
629
+ def generate_random_string(cls, length: int = 8) -> str:
630
+ """Generate a random string of specified length."""
631
+ chars = string.ascii_lowercase + string.digits
632
+ return ''.join(random.choice(chars) for _ in range(length))
633
+
634
+ @classmethod
635
+ def generate_id(cls, length: int = 7) -> str:
636
+ """Generate a random ID of specified length."""
637
+ chars = string.ascii_letters + string.digits
638
+ return ''.join(random.choice(chars) for _ in range(length))
639
+
640
+ @classmethod
641
+ def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 30) -> dict:
642
+ """
643
+ Generate a dynamic session with proper ID and expiry format using a specific email.
644
+
645
+ Args:
646
+ email: The email to use for this session
647
+ id_length: Length of the numeric ID (default: 21)
648
+ days_ahead: Number of days ahead for expiry (default: 30)
649
+
650
+ Returns:
651
+ dict: A session dictionary with user information and expiry
652
+ """
653
+ # Generate a random name
654
+ first_names = ["Alex", "Jordan", "Taylor", "Morgan", "Casey", "Riley", "Avery", "Quinn", "Skyler", "Dakota"]
655
+ last_names = ["Smith", "Johnson", "Williams", "Brown", "Jones", "Miller", "Davis", "Garcia", "Rodriguez", "Wilson"]
656
+ name = f"{random.choice(first_names)} {random.choice(last_names)}"
657
+
658
+ # Generate numeric ID - using Google-like ID format
659
+ numeric_id = ''.join(random.choice('0123456789') for _ in range(id_length))
660
+
661
+ # Generate future expiry date
662
+ future_date = datetime.now() + timedelta(days=days_ahead)
663
+ expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
664
+
665
+ # Generate random image ID for the new URL format
666
+ chars = string.ascii_letters + string.digits + "-"
667
+ random_img_id = ''.join(random.choice(chars) for _ in range(48))
668
+ image_url = f"https://lh3.googleusercontent.com/a/ACg8oc{random_img_id}=s96-c"
669
+
670
+ return {
671
+ "user": {
672
+ "name": name,
673
+ "email": email,
674
+ "image": image_url,
675
+ "id": numeric_id
676
+ },
677
+ "expires": expiry,
678
+ "isNewUser": False
679
+ }
680
+
681
+ def create_request_payload(
682
+ self,
683
+ messages: List[Dict[str, Any]],
684
+ chat_id: str,
685
+ system_message: str,
686
+ max_tokens: int,
687
+ temperature: Optional[float] = None,
688
+ top_p: Optional[float] = None,
689
+ session_data: Dict[str, Any] = None,
690
+ model: str = None
691
+ ) -> Dict[str, Any]:
692
+ """Create the full request payload for the BlackboxAI API."""
693
+ # Get the correct model ID and agent mode
694
+ model_name = self.get_model(model or self.default_model)
695
+ agent_mode = self.agentMode.get(model_name, {})
696
+
697
+ # Generate a random customer ID for the subscription
698
+ customer_id = "cus_" + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(14))
699
+
700
+ # Create the full request payload
701
+ return {
702
+ "messages": messages,
703
+ "agentMode": agent_mode,
704
+ "id": chat_id,
705
+ "previewToken": None,
706
+ "userId": None,
707
+ "codeModelMode": True,
708
+ "trendingAgentMode": {},
709
+ "isMicMode": False,
710
+ "userSystemPrompt": system_message,
711
+ "maxTokens": max_tokens,
712
+ "playgroundTopP": top_p,
713
+ "playgroundTemperature": temperature,
714
+ "isChromeExt": False,
715
+ "githubToken": "",
716
+ "clickedAnswer2": False,
717
+ "clickedAnswer3": False,
718
+ "clickedForceWebSearch": False,
719
+ "visitFromDelta": False,
720
+ "isMemoryEnabled": False,
721
+ "mobileClient": False,
722
+ "userSelectedModel": model_name if model_name in self.userSelectedModel else None,
723
+ "validated": "00f37b34-a166-4efb-bce5-1312d87f2f94",
724
+ "imageGenerationMode": False,
725
+ "webSearchModePrompt": False,
726
+ "deepSearchMode": False,
727
+ "designerMode": False,
728
+ "domains": None,
729
+ "vscodeClient": False,
730
+ "codeInterpreterMode": False,
731
+ "customProfile": {
732
+ "name": "",
733
+ "occupation": "",
734
+ "traits": [],
735
+ "additionalInfo": "",
736
+ "enableNewChats": False
737
+ },
738
+ "webSearchModeOption": {
739
+ "autoMode": True,
740
+ "webMode": False,
741
+ "offlineMode": False
742
+ },
743
+ "session": session_data,
744
+ "isPremium": True,
745
+ "subscriptionCache": {
746
+ "status": "PREMIUM",
747
+ "customerId": customer_id,
748
+ "expiryTimestamp": int((datetime.now() + timedelta(days=30)).timestamp()),
749
+ "lastChecked": int(datetime.now().timestamp() * 1000),
750
+ "isTrialSubscription": True
751
+ },
752
+ "beastMode": False,
753
+ "reasoningMode": False,
754
+ "designerMode": False,
755
+ "workspaceId": ""
756
+ }
757
+ if __name__ == "__main__":
758
+ # Example usage
759
+ client = BLACKBOXAI()
760
+ response = client.chat.completions.create(
761
+ model="GPT-4.1",
762
+ messages=[{"role": "user", "content": "Tell me about india in points"}],
763
+ stream=True
764
+ )
765
+ for chunk in response:
766
+ print(chunk.choices[0].delta.content, end='', flush=True)