webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,1045 +0,0 @@
1
- # from pickle import NONE
2
- import requests
3
- import random
4
- import string
5
- import base64
6
- from datetime import datetime, timedelta
7
- from typing import Generator, List, Dict, Optional, Any, Union
8
- import uuid
9
- import time
10
- import codecs
11
- import gzip
12
- import zstandard as zstd
13
- import brotli
14
- import zlib
15
- from webscout.Provider.OPENAI.utils import (
16
- ChatCompletion, Choice,
17
- ChatCompletionMessage, CompletionUsage, count_tokens,
18
- ChatCompletionChunk, ChoiceDelta # Ensure ChoiceDelta is always imported at the top
19
- )
20
- try:
21
- from webscout.litagent import LitAgent
22
- agent = LitAgent()
23
- except ImportError:
24
- print("Warning: LitAgent not available, using default user agent")
25
- class MockAgent:
26
- def random(self):
27
- return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36'
28
- agent = MockAgent()
29
-
30
- from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
31
-
32
- class StreamingDecompressor:
33
- """
34
- A streaming decompressor that can handle partial compressed data in real-time.
35
- This allows for true streaming decompression without buffering entire response.
36
- """
37
- def __init__(self, content_encoding: str):
38
- self.encoding = content_encoding.lower().strip() if content_encoding else None
39
- self.decompressor = None
40
- self.text_decoder = codecs.getincrementaldecoder("utf-8")("replace")
41
- self.zstd_buffer = b"" # Buffer for zstd incomplete frames
42
-
43
- if self.encoding == 'gzip':
44
- self.decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS) # gzip format
45
- elif self.encoding == 'deflate':
46
- self.decompressor = zlib.decompressobj() # deflate format
47
- elif self.encoding == 'zstd':
48
- self.decompressor = zstd.ZstdDecompressor()
49
- elif self.encoding == 'br':
50
- self.decompressor = brotli.Decompressor()
51
-
52
- def decompress_chunk(self, chunk: bytes) -> str:
53
- """
54
- Decompress a chunk of data and return decoded text.
55
- Handles partial compressed data properly for real-time streaming.
56
- """
57
- try:
58
- if not chunk:
59
- return ""
60
-
61
- if not self.encoding or self.encoding not in ['gzip', 'deflate', 'zstd', 'br']:
62
- # No compression or unsupported - decode directly
63
- return self.text_decoder.decode(chunk, final=False)
64
-
65
- if self.encoding in ['gzip', 'deflate']:
66
- # Use zlib decompressor for gzip/deflate
67
- decompressed_data = self.decompressor.decompress(chunk)
68
- return self.text_decoder.decode(decompressed_data, final=False)
69
-
70
- elif self.encoding == 'zstd':
71
- # Zstandard streaming decompression with buffering for incomplete frames
72
- self.zstd_buffer += chunk
73
- try:
74
- # Try to decompress the current buffer
75
- decompressed_data = self.decompressor.decompress(self.zstd_buffer)
76
- # If successful, clear the buffer and return decoded text
77
- self.zstd_buffer = b""
78
- return self.text_decoder.decode(decompressed_data, final=False)
79
- except zstd.ZstdError:
80
- # Frame is incomplete, keep buffering
81
- # Try to decompress any complete frames from buffer start
82
- try:
83
- # Process buffer in chunks to find complete frames
84
- buffer_len = len(self.zstd_buffer)
85
- if buffer_len > 4: # Minimum zstd frame size
86
- # Try smaller chunks of the buffer
87
- for end_pos in range(4, buffer_len + 1):
88
- try:
89
- partial_data = self.decompressor.decompress(self.zstd_buffer[:end_pos])
90
- # If we got here, we found a complete frame
91
- self.zstd_buffer = self.zstd_buffer[end_pos:]
92
- return self.text_decoder.decode(partial_data, final=False)
93
- except zstd.ZstdError:
94
- continue
95
- except Exception:
96
- pass
97
- return ""
98
-
99
- elif self.encoding == 'br':
100
- # Brotli streaming decompression
101
- try:
102
- decompressed_data = self.decompressor.decompress(chunk)
103
- return self.text_decoder.decode(decompressed_data, final=False)
104
- except brotli.error:
105
- # If brotli fails, it might need more data or be at end
106
- return ""
107
-
108
- except Exception as e:
109
- # If decompression fails, try to decode the chunk as-is (fallback)
110
- try:
111
- return self.text_decoder.decode(chunk, final=False)
112
- except UnicodeDecodeError:
113
- return ""
114
-
115
- def finalize(self) -> str:
116
- """
117
- Finalize the decompression and return any remaining decoded text.
118
- """
119
- try:
120
- remaining_text = ""
121
-
122
- if self.encoding in ['gzip', 'deflate'] and self.decompressor:
123
- # Flush any remaining compressed data
124
- remaining_data = self.decompressor.flush()
125
- if remaining_data:
126
- remaining_text = self.text_decoder.decode(remaining_data, final=True)
127
- else:
128
- remaining_text = self.text_decoder.decode(b"", final=True)
129
- elif self.encoding == 'zstd':
130
- # Process any remaining buffered data
131
- if self.zstd_buffer:
132
- try:
133
- remaining_data = self.decompressor.decompress(self.zstd_buffer)
134
- remaining_text = self.text_decoder.decode(remaining_data, final=True)
135
- except:
136
- # If buffered data can't be decompressed, finalize decoder
137
- remaining_text = self.text_decoder.decode(b"", final=True)
138
- else:
139
- remaining_text = self.text_decoder.decode(b"", final=True)
140
- else:
141
- # Finalize the text decoder for other encodings
142
- remaining_text = self.text_decoder.decode(b"", final=True)
143
-
144
- return remaining_text
145
- except Exception:
146
- # Ensure we always finalize the text decoder
147
- try:
148
- return self.text_decoder.decode(b"", final=True)
149
- except:
150
- return ""
151
-
152
- def decompress_response(response_content: bytes, content_encoding: str) -> str:
153
- """
154
- Decompress response content based on the Content-Encoding header.
155
-
156
- Args:
157
- response_content: The raw response content as bytes
158
- content_encoding: The Content-Encoding header value
159
-
160
- Returns:
161
- str: The decompressed and decoded content as UTF-8 string
162
-
163
- Raises:
164
- IOError: If decompression fails
165
- """
166
- try:
167
- if not content_encoding:
168
- # No compression, decode directly
169
- return response_content.decode('utf-8')
170
-
171
- encoding = content_encoding.lower().strip()
172
-
173
- if encoding == 'zstd':
174
- # Decompress using zstandard
175
- dctx = zstd.ZstdDecompressor()
176
- decompressed_data = dctx.decompress(response_content)
177
- return decompressed_data.decode('utf-8')
178
-
179
- elif encoding == 'gzip':
180
- # Decompress using gzip
181
- decompressed_data = gzip.decompress(response_content)
182
- return decompressed_data.decode('utf-8')
183
-
184
- elif encoding == 'br':
185
- # Decompress using brotli
186
- decompressed_data = brotli.decompress(response_content)
187
- return decompressed_data.decode('utf-8')
188
-
189
- elif encoding == 'deflate':
190
- # Decompress using zlib (deflate)
191
- import zlib
192
- decompressed_data = zlib.decompress(response_content)
193
- return decompressed_data.decode('utf-8')
194
-
195
- else:
196
- # Unknown or unsupported encoding, try to decode as-is
197
- return response_content.decode('utf-8')
198
-
199
- except Exception as e:
200
- raise IOError(f"Failed to decompress response with encoding '{content_encoding}': {str(e)}") from e
201
-
202
- def to_data_uri(image_data):
203
- """Convert image data to a data URI format"""
204
- if isinstance(image_data, str):
205
- # Assume it's already a data URI
206
- return image_data
207
-
208
- # Encode binary data to base64
209
- encoded = base64.b64encode(image_data).decode('utf-8')
210
-
211
- # Determine MIME type (simplified)
212
- mime_type = "image/jpeg" # Default
213
- if image_data.startswith(b'\x89PNG'):
214
- mime_type = "image/png"
215
- elif image_data.startswith(b'\xff\xd8'):
216
- mime_type = "image/jpeg"
217
- elif image_data.startswith(b'GIF'):
218
- mime_type = "image/gif"
219
-
220
- return f"data:{mime_type};base64,{encoded}"
221
-
222
- def clean_text(text):
223
- """Clean text by removing null bytes and control characters except newlines and tabs."""
224
- import re
225
- if not isinstance(text, str):
226
- return text
227
-
228
- # Remove null bytes
229
- text = text.replace('\x00', '')
230
-
231
- # Keep newlines, tabs, and other printable characters, remove other control chars
232
- # This regex matches control characters except \n, \r, \t
233
- return re.sub(r'[\x01-\x08\x0b\x0c\x0e-\x1f\x7f]', '', text)
234
-
235
-
236
- class Completions(BaseCompletions):
237
- def __init__(self, client: 'BLACKBOXAI'):
238
- self._client = client
239
-
240
- def create(
241
- self,
242
- *,
243
- model: str,
244
- messages: List[Dict[str, Any]],
245
- max_tokens: Optional[int] = None,
246
- stream: bool = False,
247
- temperature: Optional[float] = None,
248
- top_p: Optional[float] = None,
249
- timeout: Optional[int] = None,
250
- proxies: Optional[dict] = None,
251
- **kwargs: Any
252
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
253
- """
254
- Create a chat completion with BlackboxAI API.
255
-
256
- Args:
257
- model: The model to use (from AVAILABLE_MODELS)
258
- messages: List of message dictionaries with 'role' and 'content'
259
- max_tokens: Maximum number of tokens to generate
260
- stream: If True, yields streaming chunks
261
- temperature: Sampling temperature (0-1)
262
- top_p: Nucleus sampling parameter (0-1)
263
- **kwargs: Additional parameters to pass to the API
264
-
265
- Returns:
266
- Returns a ChatCompletion object or a generator for streaming
267
- """
268
- # Generate request ID and timestamp
269
- request_id = str(uuid.uuid4())
270
- created_time = int(time.time())
271
-
272
- # Extract system message if present
273
- system_message = "You are a helpful AI assistant."
274
- for msg in messages:
275
- if msg.get("role") == "system":
276
- system_message = msg.get("content")
277
- break
278
-
279
- # Look for any image content
280
- media = []
281
- for msg in messages:
282
- if msg.get("role") == "user":
283
- # Check for image attachments in content
284
- content = msg.get("content", [])
285
- if isinstance(content, list):
286
- for item in content:
287
- if isinstance(item, dict) and item.get("type") == "image_url":
288
- image_url = item.get("image_url", {})
289
- if isinstance(image_url, dict) and "url" in image_url:
290
- url = image_url["url"]
291
- if url.startswith("data:"):
292
- # It's already a data URI
293
- image_name = f"image_{len(media)}.png"
294
- media.append((url, image_name))
295
- else:
296
- # Need to fetch and convert to data URI
297
- try:
298
- image_response = requests.get(url)
299
- if image_response.ok:
300
- image_name = f"image_{len(media)}.png"
301
- media.append((image_response.content, image_name))
302
- except Exception as e:
303
- pass
304
-
305
- # Check if streaming is requested and raise an error
306
- if stream:
307
- return self._create_streaming(
308
- request_id=request_id,
309
- created_time=created_time,
310
- model=model,
311
- messages=messages,
312
- system_message=system_message,
313
- max_tokens=max_tokens,
314
- temperature=temperature,
315
- top_p=top_p,
316
- media=media,
317
- timeout=timeout,
318
- proxies=proxies
319
- )
320
-
321
- # Use non-streaming implementation
322
- return self._create_non_streaming(
323
- request_id=request_id,
324
- created_time=created_time,
325
- model=model,
326
- messages=messages,
327
- system_message=system_message,
328
- max_tokens=max_tokens,
329
- temperature=temperature,
330
- top_p=top_p,
331
- media=media,
332
- timeout=timeout,
333
- proxies=proxies
334
- )
335
-
336
-
337
- def _create_non_streaming(
338
- self,
339
- *,
340
- request_id: str,
341
- created_time: int,
342
- model: str,
343
- messages: List[Dict[str, Any]],
344
- system_message: str,
345
- max_tokens: Optional[int] = None,
346
- temperature: Optional[float] = None,
347
- top_p: Optional[float] = None,
348
- media: List = None,
349
- timeout: Optional[int] = None,
350
- proxies: Optional[dict] = None
351
- ) -> ChatCompletion:
352
- """Implementation for non-streaming chat completions."""
353
- original_proxies = self._client.session.proxies.copy()
354
- # Only use proxies if they are explicitly provided and not causing issues
355
- if proxies is not None and proxies:
356
- self._client.session.proxies.update(proxies)
357
- else:
358
- # Clear proxies to avoid connection issues
359
- self._client.session.proxies = {}
360
- try:
361
- # Prepare user messages for BlackboxAI API format
362
- blackbox_messages = []
363
- for i, msg in enumerate(messages):
364
- if msg["role"] == "system":
365
- continue # System message handled separately
366
-
367
- msg_id = self._client.generate_id() if i > 0 else request_id
368
- blackbox_messages.append({
369
- "id": msg_id,
370
- "content": msg["content"],
371
- "role": msg["role"]
372
- })
373
-
374
- # Add image data if provided
375
- if media and blackbox_messages:
376
- blackbox_messages[-1]['data'] = {
377
- "imagesData": [
378
- {
379
- "filePath": f"/",
380
- "contents": to_data_uri(image[0])
381
- } for image in media
382
- ],
383
- "fileText": "",
384
- "title": ""
385
- }
386
-
387
- # Generate request payload with session
388
- request_email = f"{self._client.generate_random_string(8)}@blackbox.ai"
389
- session_data = self._client.generate_session(request_email)
390
-
391
- # Create the API request payload
392
- payload = self._client.create_request_payload(
393
- messages=blackbox_messages,
394
- chat_id=request_id,
395
- system_message=system_message,
396
- max_tokens=max_tokens,
397
- temperature=temperature,
398
- top_p=top_p,
399
- session_data=session_data,
400
- model=model
401
- )
402
-
403
- # Make the API request with cookies and retry logic
404
- max_retries = 3
405
- for attempt in range(max_retries):
406
- try:
407
- response = self._client.session.post(
408
- self._client.api_endpoint,
409
- json=payload,
410
- headers=self._client.headers,
411
- cookies=self._client.cookies,
412
- timeout=timeout if timeout is not None else self._client.timeout
413
- )
414
- break # Success, exit retry loop
415
- except (requests.exceptions.ConnectionError, requests.exceptions.ProxyError) as e:
416
- if attempt == max_retries - 1:
417
- raise IOError(f"BlackboxAI connection failed after {max_retries} attempts: {str(e)}") from e
418
- # Clear proxies and retry
419
- self._client.session.proxies = {}
420
- time.sleep(1) # Wait before retry
421
-
422
- # Process the response
423
- full_content = ""
424
- if response.status_code == 200:
425
- # Check for Content-Encoding header
426
- content_encoding = response.headers.get('Content-Encoding')
427
-
428
- # Decompress the response if needed
429
- try:
430
- response_text = decompress_response(response.content, content_encoding)
431
- except IOError as e:
432
- # If decompression fails, fall back to the original method
433
- print(f"Warning: {e}. Falling back to original decoding method.")
434
- decoder = codecs.getincrementaldecoder("utf-8")("replace")
435
- response_text = decoder.decode(response.content, final=True)
436
-
437
- # Handle possible SSE format in response
438
- if "data: " in response_text:
439
- # Extract content from SSE format
440
- content_lines = []
441
- for line in response_text.split('\n'):
442
- if line.startswith("data: "):
443
- line = line[6:].strip()
444
- if line and not any(error_msg in line.lower() for error_msg in [
445
- "service has been suspended",
446
- "api request failed",
447
- "you have reached your request limit"
448
- ]):
449
- content_lines.append(line)
450
- full_content = "".join(content_lines)
451
- else:
452
- # Regular response
453
- full_content = response_text
454
- else:
455
- # Handle error response
456
- raise IOError(f"BlackboxAI request failed with status code {response.status_code}")
457
-
458
- # Clean and create the completion message
459
- cleaned_content = clean_text(full_content)
460
- message = ChatCompletionMessage(
461
- role="assistant",
462
- content=cleaned_content
463
- )
464
-
465
- # Create the choice with the message
466
- choice = Choice(
467
- index=0,
468
- message=message,
469
- finish_reason="stop"
470
- )
471
-
472
- # Estimate token usage using count_tokens
473
- prompt_tokens = count_tokens([str(msg.get("content", "")) for msg in messages])
474
- completion_tokens = count_tokens(cleaned_content)
475
-
476
- # Create the final completion object
477
- completion = ChatCompletion(
478
- id=request_id,
479
- choices=[choice],
480
- created=created_time,
481
- model=model,
482
- usage=CompletionUsage(
483
- prompt_tokens=prompt_tokens,
484
- completion_tokens=completion_tokens,
485
- total_tokens=prompt_tokens + completion_tokens
486
- )
487
- )
488
-
489
- return completion
490
-
491
- except Exception as e:
492
- raise IOError(f"BlackboxAI request failed: {str(e)}") from e
493
- finally:
494
- # Restore original proxies
495
- self._client.session.proxies = original_proxies
496
-
497
- def _create_streaming(
498
- self,
499
- *,
500
- request_id: str,
501
- created_time: int,
502
- model: str,
503
- messages: List[Dict[str, Any]],
504
- system_message: str,
505
- max_tokens: Optional[int] = None,
506
- temperature: Optional[float] = None,
507
- top_p: Optional[float] = None,
508
- media: List = None,
509
- timeout: Optional[int] = None,
510
- proxies: Optional[dict] = None
511
- ):
512
- """Implementation for streaming chat completions (OpenAI-compatible chunks)."""
513
- original_proxies = self._client.session.proxies.copy()
514
- # Only use proxies if they are explicitly provided and not causing issues
515
- if proxies is not None and proxies:
516
- self._client.session.proxies.update(proxies)
517
- else:
518
- # Clear proxies to avoid connection issues
519
- self._client.session.proxies = {}
520
- try:
521
- # Prepare user messages for BlackboxAI API format
522
- blackbox_messages = []
523
- for i, msg in enumerate(messages):
524
- if msg["role"] == "system":
525
- continue # System message handled separately
526
- msg_id = self._client.generate_id() if i > 0 else request_id
527
- blackbox_messages.append({
528
- "id": msg_id,
529
- "content": msg["content"],
530
- "role": msg["role"]
531
- })
532
- # Add image data if provided
533
- if media and blackbox_messages:
534
- blackbox_messages[-1]['data'] = {
535
- "imagesData": [
536
- {
537
- "filePath": f"/",
538
- "contents": to_data_uri(image[0])
539
- } for image in media
540
- ],
541
- "fileText": "",
542
- "title": ""
543
- }
544
- # Generate request payload with session
545
- request_email = f"{self._client.generate_random_string(8)}@blackbox.ai"
546
- session_data = self._client.generate_session(request_email)
547
- payload = self._client.create_request_payload(
548
- messages=blackbox_messages,
549
- chat_id=request_id,
550
- system_message=system_message,
551
- max_tokens=max_tokens,
552
- temperature=temperature,
553
- top_p=top_p,
554
- session_data=session_data,
555
- model=model
556
- )
557
- # Make the API request with cookies, stream=True and retry logic
558
- max_retries = 3
559
- for attempt in range(max_retries):
560
- try:
561
- response = self._client.session.post(
562
- self._client.api_endpoint,
563
- json=payload,
564
- headers=self._client.headers,
565
- cookies=self._client.cookies,
566
- stream=True,
567
- timeout=timeout if timeout is not None else self._client.timeout
568
- )
569
- break # Success, exit retry loop
570
- except (requests.exceptions.ConnectionError, requests.exceptions.ProxyError) as e:
571
- if attempt == max_retries - 1:
572
- raise IOError(f"BlackboxAI connection failed after {max_retries} attempts: {str(e)}") from e
573
- # Clear proxies and retry
574
- self._client.session.proxies = {}
575
- time.sleep(1) # Wait before retry
576
- # Blackbox streams as raw text, no line breaks, so chunk manually
577
- chunk_size = 32 # Tune as needed for smoothness
578
- # ChoiceDelta is already imported at the top of the file
579
-
580
- # Check if the response is compressed and create appropriate decompressor
581
- content_encoding = response.headers.get('Content-Encoding')
582
- streaming_decompressor = StreamingDecompressor(content_encoding)
583
-
584
- # Stream with real-time decompression
585
- for chunk in response.iter_content(chunk_size=chunk_size):
586
- if not chunk:
587
- continue
588
-
589
- # Decompress chunk in real-time
590
- text = streaming_decompressor.decompress_chunk(chunk)
591
-
592
- if text:
593
- cleaned_chunk = clean_text(text)
594
- if cleaned_chunk.strip():
595
- delta = ChoiceDelta(content=cleaned_chunk, role="assistant")
596
- choice = Choice(index=0, delta=delta, finish_reason=None)
597
- chunk_obj = ChatCompletionChunk(
598
- id=request_id,
599
- choices=[choice],
600
- created=created_time,
601
- model=model,
602
- system_fingerprint=None
603
- )
604
- yield chunk_obj
605
-
606
- # Finalize decompression and get any remaining text
607
- final_text = streaming_decompressor.finalize()
608
- if final_text.strip():
609
- cleaned_final = clean_text(final_text)
610
- delta = ChoiceDelta(content=cleaned_final, role="assistant")
611
- choice = Choice(index=0, delta=delta, finish_reason=None)
612
- chunk_obj = ChatCompletionChunk(
613
- id=request_id,
614
- choices=[choice],
615
- created=created_time,
616
- model=model,
617
- system_fingerprint=None
618
- )
619
- yield chunk_obj
620
-
621
- # Send final chunk with finish_reason="stop"
622
- delta = ChoiceDelta(content="", role="assistant")
623
- choice = Choice(index=0, delta=delta, finish_reason="stop")
624
- final_chunk = ChatCompletionChunk(
625
- id=request_id,
626
- choices=[choice],
627
- created=created_time,
628
- model=model,
629
- system_fingerprint=None
630
- )
631
- yield final_chunk
632
-
633
- except Exception as e:
634
- # Handle errors gracefully by yielding an error chunk
635
- error_delta = ChoiceDelta(content=f"Error: {str(e)}", role="assistant")
636
- error_choice = Choice(index=0, delta=error_delta, finish_reason="stop")
637
- error_chunk = ChatCompletionChunk(
638
- id=request_id,
639
- choices=[error_choice],
640
- created=created_time,
641
- model=model,
642
- system_fingerprint=None
643
- )
644
- yield error_chunk
645
- finally:
646
- # Restore original proxies
647
- self._client.session.proxies = original_proxies
648
-
649
-
650
- class Chat(BaseChat):
651
- def __init__(self, client: 'BLACKBOXAI'):
652
- self.completions = Completions(client)
653
-
654
-
655
- class BLACKBOXAI(OpenAICompatibleProvider):
656
- """
657
- OpenAI-compatible client for BlackboxAI API.
658
-
659
- Usage:
660
- client = BLACKBOXAI()
661
- response = client.chat.completions.create(
662
- model="GPT-4.1",
663
- messages=[{"role": "user", "content": "Hello!"}]
664
- )
665
- print(response.choices[0].message.content)
666
- """
667
- # Default model
668
- default_model = "GPT-4.1"
669
- default_vision_model = default_model
670
- api_endpoint = "https://www.blackbox.ai/api/chat"
671
- timeout = None
672
-
673
- # New OpenRouter models list
674
- openrouter_models = [
675
- "Deepcoder 14B Preview",
676
- "DeepHermes 3 Llama 3 8B Preview",
677
- "DeepSeek R1 Zero",
678
- "Dolphin3.0 Mistral 24B",
679
- "Dolphin3.0 R1 Mistral 24B",
680
- "Flash 3",
681
- "Gemini 2.0 Flash Experimental",
682
- "Gemma 2 9B",
683
- "Gemma 3 12B",
684
- "Gemma 3 1B",
685
- "Gemma 3 27B",
686
- "Gemma 3 4B",
687
- "Kimi VL A3B Thinking",
688
- "Llama 3.1 8B Instruct",
689
- "Llama 3.1 Nemotron Ultra 253B v1",
690
- "Llama 3.2 11B Vision Instruct",
691
- "Llama 3.2 1B Instruct",
692
- "Llama 3.2 3B Instruct",
693
- "Llama 3.3 70B Instruct",
694
- "Llama 3.3 Nemotron Super 49B v1",
695
- "Llama 4 Maverick",
696
- "Llama 4 Scout",
697
- "Mistral 7B Instruct",
698
- "Mistral Nemo",
699
- "Mistral Small 3",
700
- "Mistral Small 3.1 24B",
701
- "Molmo 7B D",
702
- "Moonlight 16B A3B Instruct",
703
- "Qwen2.5 72B Instruct",
704
- "Qwen2.5 7B Instruct",
705
- "Qwen2.5 Coder 32B Instruct",
706
- "Qwen2.5 VL 32B Instruct",
707
- "Qwen2.5 VL 3B Instruct",
708
- "Qwen2.5 VL 72B Instruct",
709
- "Qwen2.5-VL 7B Instruct",
710
- "Qwerky 72B",
711
- "QwQ 32B",
712
- "QwQ 32B Preview",
713
- "QwQ 32B RpR v1",
714
- "R1",
715
- "R1 Distill Llama 70B",
716
- "R1 Distill Qwen 14B",
717
- "R1 Distill Qwen 32B",
718
- ]
719
-
720
- # New base models list
721
- models = [
722
- "gpt-4.1-mini", # Added new model
723
- default_model,
724
- "o3-mini",
725
- "gpt-4.1-nano",
726
- "Claude Opus 4", # Added Claude Opus 4
727
- "Claude Sonnet 4", # Added Claude Sonnet 4
728
- "Claude-sonnet-3.7",
729
- "Claude-sonnet-3.5",
730
- "Grok 3", # Added Grok 3
731
- "Gemini 2.5 Pro", # Added Gemini 2.5 Pro
732
- "UI-TARS 72B", # Added UI-TARS 72B
733
- "DeepSeek-R1",
734
- "Mistral-Small-24B-Instruct-2501",
735
- *openrouter_models,
736
- # Trending agent modes (names)
737
- 'Python Agent', 'HTML Agent', 'Builder Agent', 'Java Agent', 'JavaScript Agent',
738
- 'React Agent', 'Android Agent', 'Flutter Agent', 'Next.js Agent', 'AngularJS Agent',
739
- 'Swift Agent', 'MongoDB Agent', 'PyTorch Agent', 'Xcode Agent', 'Azure Agent',
740
- 'Bitbucket Agent', 'DigitalOcean Agent', 'Docker Agent', 'Electron Agent',
741
- 'Erlang Agent', 'FastAPI Agent', 'Firebase Agent', 'Flask Agent', 'Git Agent',
742
- 'Gitlab Agent', 'Go Agent', 'Godot Agent', 'Google Cloud Agent', 'Heroku Agent'
743
- ]
744
-
745
- # Models that support vision capabilities
746
- vision_models = [default_vision_model, 'o3-mini', "Llama 3.2 11B Vision Instruct", "Gemini 2.5 Pro", "Claude Sonnet 4", "Claude Opus 4", "UI-TARS 72B"] # Added Llama vision, Gemini 2.5 Pro, Claude Sonnet 4, Claude Opus 4, and UI-TARS 72B
747
-
748
- # Models that can be directly selected by users
749
- userSelectedModel = ['o3-mini', 'Claude Opus 4', 'Claude Sonnet 4', 'Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'Grok 3', 'Gemini 2.5 Pro', 'UI-TARS 72B', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
750
-
751
- # Agent mode configurations
752
- agentMode = {
753
- # OpenRouter Free
754
- 'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
755
- 'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
756
- 'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
757
- 'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
758
- 'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
759
- 'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
760
- 'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
761
- 'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
762
- 'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
763
- 'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
764
- 'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
765
- 'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
766
- 'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
767
- 'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
768
- 'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
769
- 'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
770
- 'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
771
- 'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
772
- 'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
773
- 'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
774
- 'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
775
- 'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
776
- 'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
777
- 'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
778
- 'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"}, # Matches Mistral-Small-24B-Instruct-2501
779
- 'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
780
- 'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
781
- 'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
782
- 'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
783
- 'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
784
- 'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
785
- 'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
786
- 'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
787
- 'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
788
- 'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
789
- 'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
790
- 'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
791
- 'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
792
- 'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
793
- 'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"}, # Matches DeepSeek-R1
794
- 'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
795
- 'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
796
- 'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
797
- # Default models from the new list
798
- 'Claude Opus 4': {'mode': True, 'id': "anthropic/claude-opus-4", 'name': "Claude Opus 4"},
799
- 'Claude Sonnet 4': {'mode': True, 'id': "anthropic/claude-sonnet-4", 'name': "Claude Sonnet 4"},
800
- 'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
801
- 'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
802
- 'Grok 3': {'mode': True, 'id': "x-ai/grok-3-beta", 'name': "Grok 3"},
803
- 'Gemini 2.5 Pro': {'mode': True, 'id': "google/gemini-2.5-pro-preview-03-25", 'name': "Gemini 2.5 Pro"},
804
- 'UI-TARS 72B': {'mode': True, 'id': "bytedance-research/ui-tars-72b:free", 'name': "UI-TARS 72B"},
805
- 'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"}, # This is 'R1' in openrouter, but 'DeepSeek-R1' in base models
806
- 'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
807
- # Add default_model if it's not covered and has an agent mode
808
- default_model: {'mode': True, 'id': "openai/gpt-4.1", 'name': default_model}, # Assuming GPT-4.1 is agent-compatible
809
- 'o3-mini': {'mode': True, 'id': "o3-mini", 'name': "o3-mini"}, # Assuming o3-mini is agent-compatible
810
- 'gpt-4.1-nano': {'mode': True, 'id': "gpt-4.1-nano", 'name': "gpt-4.1-nano"}, # Assuming gpt-4.1-nano is agent-compatible
811
- 'gpt-4.1-mini': {'mode': True, 'id': "gpt-4.1-mini", 'name': "gpt-4.1-mini"}, # Added agent mode for gpt-4.1-mini
812
- }
813
-
814
- # Trending agent modes
815
- trendingAgentMode = {
816
- 'Python Agent': {'mode': True, 'id': "python"},
817
- 'HTML Agent': {'mode': True, 'id': "html"},
818
- 'Builder Agent': {'mode': True, 'id': "builder"},
819
- 'Java Agent': {'mode': True, 'id': "java"},
820
- 'JavaScript Agent': {'mode': True, 'id': "javascript"},
821
- 'React Agent': {'mode': True, 'id': "react"},
822
- 'Android Agent': {'mode': True, 'id': "android"},
823
- 'Flutter Agent': {'mode': True, 'id': "flutter"},
824
- 'Next.js Agent': {'mode': True, 'id': "next.js"},
825
- 'AngularJS Agent': {'mode': True, 'id': "angularjs"},
826
- 'Swift Agent': {'mode': True, 'id': "swift"},
827
- 'MongoDB Agent': {'mode': True, 'id': "mongodb"},
828
- 'PyTorch Agent': {'mode': True, 'id': "pytorch"},
829
- 'Xcode Agent': {'mode': True, 'id': "xcode"},
830
- 'Azure Agent': {'mode': True, 'id': "azure"},
831
- 'Bitbucket Agent': {'mode': True, 'id': "bitbucket"},
832
- 'DigitalOcean Agent': {'mode': True, 'id': "digitalocean"},
833
- 'Docker Agent': {'mode': True, 'id': "docker"},
834
- 'Electron Agent': {'mode': True, 'id': "electron"},
835
- 'Erlang Agent': {'mode': True, 'id': "erlang"},
836
- 'FastAPI Agent': {'mode': True, 'id': "fastapi"},
837
- 'Firebase Agent': {'mode': True, 'id': "firebase"},
838
- 'Flask Agent': {'mode': True, 'id': "flask"},
839
- 'Git Agent': {'mode': True, 'id': "git"},
840
- 'Gitlab Agent': {'mode': True, 'id': "gitlab"},
841
- 'Go Agent': {'mode': True, 'id': "go"},
842
- 'Godot Agent': {'mode': True, 'id': "godot"},
843
- 'Google Cloud Agent': {'mode': True, 'id': "googlecloud"},
844
- 'Heroku Agent': {'mode': True, 'id': "heroku"},
845
- }
846
-
847
- # Create AVAILABLE_MODELS as a list with just the model aliases (no "BLACKBOXAI/" prefix)
848
- AVAILABLE_MODELS = list(models)
849
-
850
-
851
- def __init__(
852
- self
853
- ):
854
- """
855
- Initialize the BlackboxAI provider with OpenAI compatibility.
856
- """
857
- # Initialize session
858
- self.session = requests.Session()
859
- # Remove any proxy configuration to avoid connection issues
860
- self.session.proxies = {}
861
-
862
- # Set headers based on GitHub reference
863
- self.headers = {
864
- 'Accept': 'text/event-stream',
865
- 'Accept-Encoding': 'gzip, deflate, br, zstd',
866
- 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
867
- 'Content-Type': 'application/json',
868
- 'DNT': '1',
869
- 'Origin': 'https://www.blackbox.ai',
870
- 'Referer': 'https://www.blackbox.ai/',
871
- 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
872
- 'Sec-CH-UA-Mobile': '?0',
873
- 'Sec-CH-UA-Platform': '"Windows"',
874
- 'Sec-Fetch-Dest': 'empty',
875
- 'Sec-Fetch-Mode': 'cors',
876
- 'Sec-Fetch-Site': 'same-origin',
877
- 'User-Agent': agent.random(),
878
- }
879
-
880
- # Set cookies for the session
881
- self.cookies = {
882
- 'cfzs_amplitude': self.generate_id(32),
883
- 'cfz_amplitude': self.generate_id(32),
884
- '__cf_bm': self.generate_id(32),
885
- }
886
-
887
- # Initialize chat interface with completions
888
- self.chat = Chat(self)
889
-
890
- @property
891
- def models(self):
892
- class _ModelList:
893
- def list(inner_self):
894
- return type(self).AVAILABLE_MODELS
895
- return _ModelList()
896
-
897
-
898
- @classmethod
899
- def get_model(cls, model: str) -> str:
900
- """Return the model name, removing BLACKBOXAI/ prefix if present, or default_model."""
901
- if model.startswith("BLACKBOXAI/"):
902
- model = model[len("BLACKBOXAI/"):]
903
- if model in cls.AVAILABLE_MODELS:
904
- return model
905
- return cls.default_model
906
-
907
- @classmethod
908
- def generate_random_string(cls, length: int = 8) -> str:
909
- """Generate a random string of specified length."""
910
- chars = string.ascii_lowercase + string.digits
911
- return ''.join(random.choice(chars) for _ in range(length))
912
-
913
- @classmethod
914
- def generate_id(cls, length: int = 7) -> str:
915
- """Generate a random ID of specified length."""
916
- chars = string.ascii_letters + string.digits
917
- return ''.join(random.choice(chars) for _ in range(length))
918
-
919
- @classmethod
920
- def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 30) -> dict:
921
- """
922
- Generate a dynamic session with proper ID and expiry format using a specific email.
923
-
924
- Args:
925
- email: The email to use for this session
926
- id_length: Length of the numeric ID (default: 21)
927
- days_ahead: Number of days ahead for expiry (default: 30)
928
-
929
- Returns:
930
- dict: A session dictionary with user information and expiry
931
- """
932
- # Generate a random name
933
- first_names = ["Alex", "Jordan", "Taylor", "Morgan", "Casey", "Riley", "Avery", "Quinn", "Skyler", "Dakota"]
934
- last_names = ["Smith", "Johnson", "Williams", "Brown", "Jones", "Miller", "Davis", "Garcia", "Rodriguez", "Wilson"]
935
- name = f"{random.choice(first_names)} {random.choice(last_names)}"
936
-
937
- # Generate numeric ID - using Google-like ID format
938
- numeric_id = ''.join(random.choice('0123456789') for _ in range(id_length))
939
-
940
- # Generate future expiry date
941
- future_date = datetime.now() + timedelta(days=days_ahead)
942
- expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
943
-
944
- # Generate random image ID for the new URL format
945
- chars = string.ascii_letters + string.digits + "-"
946
- random_img_id = ''.join(random.choice(chars) for _ in range(48))
947
- image_url = f"https://lh3.googleusercontent.com/a/ACg8oc{random_img_id}=s96-c"
948
-
949
- return {
950
- "user": {
951
- "name": name,
952
- "email": email,
953
- "image": image_url,
954
- "id": numeric_id
955
- },
956
- "expires": expiry,
957
- "isNewUser": False
958
- }
959
-
960
- def create_request_payload(
961
- self,
962
- messages: List[Dict[str, Any]],
963
- chat_id: str,
964
- system_message: str,
965
- max_tokens: int,
966
- temperature: Optional[float] = None,
967
- top_p: Optional[float] = None,
968
- session_data: Dict[str, Any] = None,
969
- model: str = None
970
- ) -> Dict[str, Any]:
971
- """Create the full request payload for the BlackboxAI API."""
972
- # Get the correct model ID and agent mode
973
- model_name = self.get_model(model or self.default_model)
974
- agent_mode = self.agentMode.get(model_name, {})
975
-
976
- # Generate a random customer ID for the subscription
977
- customer_id = "cus_" + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(14))
978
-
979
- # Create the full request payload
980
- return {
981
- "messages": messages,
982
- "agentMode": agent_mode,
983
- "id": chat_id,
984
- "previewToken": None,
985
- "userId": None,
986
- "codeModelMode": True,
987
- "trendingAgentMode": {},
988
- "isMicMode": False,
989
- "userSystemPrompt": system_message,
990
- "maxTokens": max_tokens,
991
- "playgroundTopP": top_p,
992
- "playgroundTemperature": temperature,
993
- "isChromeExt": False,
994
- "githubToken": "",
995
- "clickedAnswer2": False,
996
- "clickedAnswer3": False,
997
- "clickedForceWebSearch": False,
998
- "visitFromDelta": False,
999
- "isMemoryEnabled": False,
1000
- "mobileClient": False,
1001
- "userSelectedModel": model_name if model_name in self.userSelectedModel else None,
1002
- "validated": "00f37b34-a166-4efb-bce5-1312d87f2f94",
1003
- "imageGenerationMode": False,
1004
- "webSearchModePrompt": False,
1005
- "deepSearchMode": False,
1006
- "designerMode": False,
1007
- "domains": None,
1008
- "vscodeClient": False,
1009
- "codeInterpreterMode": False,
1010
- "customProfile": {
1011
- "name": "",
1012
- "occupation": "",
1013
- "traits": [],
1014
- "additionalInfo": "",
1015
- "enableNewChats": False
1016
- },
1017
- "webSearchModeOption": {
1018
- "autoMode": True,
1019
- "webMode": False,
1020
- "offlineMode": False
1021
- },
1022
- "session": session_data,
1023
- "isPremium": True,
1024
- "subscriptionCache": {
1025
- "status": "PREMIUM",
1026
- "customerId": customer_id,
1027
- "expiryTimestamp": int((datetime.now() + timedelta(days=30)).timestamp()),
1028
- "lastChecked": int(datetime.now().timestamp() * 1000),
1029
- "isTrialSubscription": True
1030
- },
1031
- "beastMode": False,
1032
- "reasoningMode": False,
1033
- "designerMode": False,
1034
- "workspaceId": ""
1035
- }
1036
- if __name__ == "__main__":
1037
- # Example usage
1038
- client = BLACKBOXAI()
1039
- response = client.chat.completions.create(
1040
- model="GPT-4.1",
1041
- messages=[{"role": "user", "content": "Tell me about india in points"}],
1042
- stream=True
1043
- )
1044
- for chunk in response:
1045
- print(chunk.choices[0].delta.content, end='', flush=True)