webscout 8.3__py3-none-any.whl → 8.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (62) hide show
  1. webscout/AIauto.py +4 -4
  2. webscout/AIbase.py +61 -1
  3. webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
  4. webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
  5. webscout/Extra/YTToolkit/ytapi/video.py +10 -10
  6. webscout/Extra/autocoder/autocoder_utiles.py +1 -1
  7. webscout/Litlogger/formats.py +9 -0
  8. webscout/Litlogger/handlers.py +18 -0
  9. webscout/Litlogger/logger.py +43 -1
  10. webscout/Provider/AISEARCH/scira_search.py +3 -2
  11. webscout/Provider/LambdaChat.py +7 -1
  12. webscout/Provider/OPENAI/BLACKBOXAI.py +1049 -1017
  13. webscout/Provider/OPENAI/Qwen3.py +303 -303
  14. webscout/Provider/OPENAI/README.md +3 -0
  15. webscout/Provider/OPENAI/TogetherAI.py +355 -0
  16. webscout/Provider/OPENAI/__init__.py +2 -1
  17. webscout/Provider/OPENAI/api.py +298 -13
  18. webscout/Provider/OPENAI/autoproxy.py +39 -0
  19. webscout/Provider/OPENAI/base.py +89 -12
  20. webscout/Provider/OPENAI/chatgpt.py +15 -2
  21. webscout/Provider/OPENAI/chatgptclone.py +14 -3
  22. webscout/Provider/OPENAI/deepinfra.py +339 -328
  23. webscout/Provider/OPENAI/e2b.py +295 -73
  24. webscout/Provider/OPENAI/opkfc.py +18 -6
  25. webscout/Provider/OPENAI/scirachat.py +3 -2
  26. webscout/Provider/OPENAI/toolbaz.py +0 -1
  27. webscout/Provider/OPENAI/writecream.py +166 -166
  28. webscout/Provider/OPENAI/x0gpt.py +367 -367
  29. webscout/Provider/OPENAI/yep.py +383 -383
  30. webscout/Provider/STT/__init__.py +3 -0
  31. webscout/Provider/STT/base.py +281 -0
  32. webscout/Provider/STT/elevenlabs.py +265 -0
  33. webscout/Provider/TTI/__init__.py +3 -1
  34. webscout/Provider/TTI/aiarta.py +399 -365
  35. webscout/Provider/TTI/base.py +74 -2
  36. webscout/Provider/TTI/fastflux.py +63 -30
  37. webscout/Provider/TTI/gpt1image.py +149 -0
  38. webscout/Provider/TTI/imagen.py +196 -0
  39. webscout/Provider/TTI/magicstudio.py +60 -29
  40. webscout/Provider/TTI/piclumen.py +43 -32
  41. webscout/Provider/TTI/pixelmuse.py +232 -225
  42. webscout/Provider/TTI/pollinations.py +43 -32
  43. webscout/Provider/TTI/together.py +287 -0
  44. webscout/Provider/TTI/utils.py +2 -1
  45. webscout/Provider/TTS/README.md +1 -0
  46. webscout/Provider/TTS/__init__.py +2 -1
  47. webscout/Provider/TTS/freetts.py +140 -0
  48. webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
  49. webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
  50. webscout/Provider/__init__.py +3 -0
  51. webscout/Provider/scira_chat.py +3 -2
  52. webscout/Provider/toolbaz.py +0 -1
  53. webscout/litagent/Readme.md +12 -3
  54. webscout/litagent/agent.py +99 -62
  55. webscout/version.py +1 -1
  56. {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/METADATA +1 -1
  57. {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/RECORD +61 -51
  58. webscout/Provider/TTI/artbit.py +0 -0
  59. {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/WHEEL +0 -0
  60. {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
  61. {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
  62. {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
@@ -1,1017 +1,1049 @@
1
- # from pickle import NONE
2
- import requests
3
- import requests
4
- import random
5
- import string
6
- import base64
7
- from datetime import datetime, timedelta
8
- from typing import Generator, List, Dict, Optional, Any, Union
9
- import uuid
10
- import time
11
- import codecs
12
- import gzip
13
- import zstandard as zstd
14
- import brotli
15
- import zlib
16
-
17
- # Import base classes and utility structures
18
- from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
19
- from webscout.Provider.OPENAI.utils import (
20
- ChatCompletion, Choice,
21
- ChatCompletionMessage, CompletionUsage, count_tokens,
22
- ChatCompletionChunk # Added for streaming return type
23
- )
24
- from webscout.litagent import LitAgent
25
- agent = LitAgent()
26
-
27
- class StreamingDecompressor:
28
- """
29
- A streaming decompressor that can handle partial compressed data in real-time.
30
- This allows for true streaming decompression without buffering entire response.
31
- """
32
- def __init__(self, content_encoding: str):
33
- self.encoding = content_encoding.lower().strip() if content_encoding else None
34
- self.decompressor = None
35
- self.text_decoder = codecs.getincrementaldecoder("utf-8")("replace")
36
- self.zstd_buffer = b"" # Buffer for zstd incomplete frames
37
-
38
- if self.encoding == 'gzip':
39
- self.decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS) # gzip format
40
- elif self.encoding == 'deflate':
41
- self.decompressor = zlib.decompressobj() # deflate format
42
- elif self.encoding == 'zstd':
43
- self.decompressor = zstd.ZstdDecompressor()
44
- elif self.encoding == 'br':
45
- self.decompressor = brotli.Decompressor()
46
-
47
- def decompress_chunk(self, chunk: bytes) -> str:
48
- """
49
- Decompress a chunk of data and return decoded text.
50
- Handles partial compressed data properly for real-time streaming.
51
- """
52
- try:
53
- if not chunk:
54
- return ""
55
-
56
- if not self.encoding or self.encoding not in ['gzip', 'deflate', 'zstd', 'br']:
57
- # No compression or unsupported - decode directly
58
- return self.text_decoder.decode(chunk, final=False)
59
-
60
- if self.encoding in ['gzip', 'deflate']:
61
- # Use zlib decompressor for gzip/deflate
62
- decompressed_data = self.decompressor.decompress(chunk)
63
- return self.text_decoder.decode(decompressed_data, final=False)
64
-
65
- elif self.encoding == 'zstd':
66
- # Zstandard streaming decompression with buffering for incomplete frames
67
- self.zstd_buffer += chunk
68
- try:
69
- # Try to decompress the current buffer
70
- decompressed_data = self.decompressor.decompress(self.zstd_buffer)
71
- # If successful, clear the buffer and return decoded text
72
- self.zstd_buffer = b""
73
- return self.text_decoder.decode(decompressed_data, final=False)
74
- except zstd.ZstdError:
75
- # Frame is incomplete, keep buffering
76
- # Try to decompress any complete frames from buffer start
77
- try:
78
- # Process buffer in chunks to find complete frames
79
- buffer_len = len(self.zstd_buffer)
80
- if buffer_len > 4: # Minimum zstd frame size
81
- # Try smaller chunks of the buffer
82
- for end_pos in range(4, buffer_len + 1):
83
- try:
84
- partial_data = self.decompressor.decompress(self.zstd_buffer[:end_pos])
85
- # If we got here, we found a complete frame
86
- self.zstd_buffer = self.zstd_buffer[end_pos:]
87
- return self.text_decoder.decode(partial_data, final=False)
88
- except zstd.ZstdError:
89
- continue
90
- except Exception:
91
- pass
92
- return ""
93
-
94
- elif self.encoding == 'br':
95
- # Brotli streaming decompression
96
- try:
97
- decompressed_data = self.decompressor.decompress(chunk)
98
- return self.text_decoder.decode(decompressed_data, final=False)
99
- except brotli.error:
100
- # If brotli fails, it might need more data or be at end
101
- return ""
102
-
103
- except Exception as e:
104
- # If decompression fails, try to decode the chunk as-is (fallback)
105
- try:
106
- return self.text_decoder.decode(chunk, final=False)
107
- except UnicodeDecodeError:
108
- return ""
109
-
110
- def finalize(self) -> str:
111
- """
112
- Finalize the decompression and return any remaining decoded text.
113
- """
114
- try:
115
- remaining_text = ""
116
-
117
- if self.encoding in ['gzip', 'deflate'] and self.decompressor:
118
- # Flush any remaining compressed data
119
- remaining_data = self.decompressor.flush()
120
- if remaining_data:
121
- remaining_text = self.text_decoder.decode(remaining_data, final=True)
122
- else:
123
- remaining_text = self.text_decoder.decode(b"", final=True)
124
- elif self.encoding == 'zstd':
125
- # Process any remaining buffered data
126
- if self.zstd_buffer:
127
- try:
128
- remaining_data = self.decompressor.decompress(self.zstd_buffer)
129
- remaining_text = self.text_decoder.decode(remaining_data, final=True)
130
- except:
131
- # If buffered data can't be decompressed, finalize decoder
132
- remaining_text = self.text_decoder.decode(b"", final=True)
133
- else:
134
- remaining_text = self.text_decoder.decode(b"", final=True)
135
- else:
136
- # Finalize the text decoder for other encodings
137
- remaining_text = self.text_decoder.decode(b"", final=True)
138
-
139
- return remaining_text
140
- except Exception:
141
- # Ensure we always finalize the text decoder
142
- try:
143
- return self.text_decoder.decode(b"", final=True)
144
- except:
145
- return ""
146
-
147
- def decompress_response(response_content: bytes, content_encoding: str) -> str:
148
- """
149
- Decompress response content based on the Content-Encoding header.
150
-
151
- Args:
152
- response_content: The raw response content as bytes
153
- content_encoding: The Content-Encoding header value
154
-
155
- Returns:
156
- str: The decompressed and decoded content as UTF-8 string
157
-
158
- Raises:
159
- IOError: If decompression fails
160
- """
161
- try:
162
- if not content_encoding:
163
- # No compression, decode directly
164
- return response_content.decode('utf-8')
165
-
166
- encoding = content_encoding.lower().strip()
167
-
168
- if encoding == 'zstd':
169
- # Decompress using zstandard
170
- dctx = zstd.ZstdDecompressor()
171
- decompressed_data = dctx.decompress(response_content)
172
- return decompressed_data.decode('utf-8')
173
-
174
- elif encoding == 'gzip':
175
- # Decompress using gzip
176
- decompressed_data = gzip.decompress(response_content)
177
- return decompressed_data.decode('utf-8')
178
-
179
- elif encoding == 'br':
180
- # Decompress using brotli
181
- decompressed_data = brotli.decompress(response_content)
182
- return decompressed_data.decode('utf-8')
183
-
184
- elif encoding == 'deflate':
185
- # Decompress using zlib (deflate)
186
- import zlib
187
- decompressed_data = zlib.decompress(response_content)
188
- return decompressed_data.decode('utf-8')
189
-
190
- else:
191
- # Unknown or unsupported encoding, try to decode as-is
192
- return response_content.decode('utf-8')
193
-
194
- except Exception as e:
195
- raise IOError(f"Failed to decompress response with encoding '{content_encoding}': {str(e)}") from e
196
-
197
- def to_data_uri(image_data):
198
- """Convert image data to a data URI format"""
199
- if isinstance(image_data, str):
200
- # Assume it's already a data URI
201
- return image_data
202
-
203
- # Encode binary data to base64
204
- encoded = base64.b64encode(image_data).decode('utf-8')
205
-
206
- # Determine MIME type (simplified)
207
- mime_type = "image/jpeg" # Default
208
- if image_data.startswith(b'\x89PNG'):
209
- mime_type = "image/png"
210
- elif image_data.startswith(b'\xff\xd8'):
211
- mime_type = "image/jpeg"
212
- elif image_data.startswith(b'GIF'):
213
- mime_type = "image/gif"
214
-
215
- return f"data:{mime_type};base64,{encoded}"
216
-
217
- def clean_text(text):
218
- """Clean text by removing null bytes and control characters except newlines and tabs."""
219
- import re
220
- if not isinstance(text, str):
221
- return text
222
-
223
- # Remove null bytes
224
- text = text.replace('\x00', '')
225
-
226
- # Keep newlines, tabs, and other printable characters, remove other control chars
227
- # This regex matches control characters except \n, \r, \t
228
- return re.sub(r'[\x01-\x08\x0b\x0c\x0e-\x1f\x7f]', '', text)
229
-
230
-
231
- class Completions(BaseCompletions):
232
- def __init__(self, client: 'BLACKBOXAI'):
233
- self._client = client
234
-
235
- def create(
236
- self,
237
- *,
238
- model: str,
239
- messages: List[Dict[str, Any]],
240
- max_tokens: Optional[int] = None,
241
- stream: bool = False,
242
- temperature: Optional[float] = None,
243
- top_p: Optional[float] = None,
244
- timeout: Optional[int] = None,
245
- proxies: Optional[dict] = None,
246
- **kwargs: Any
247
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
248
- """
249
- Create a chat completion with BlackboxAI API.
250
-
251
- Args:
252
- model: The model to use (from AVAILABLE_MODELS)
253
- messages: List of message dictionaries with 'role' and 'content'
254
- max_tokens: Maximum number of tokens to generate
255
- stream: If True, yields streaming chunks
256
- temperature: Sampling temperature (0-1)
257
- top_p: Nucleus sampling parameter (0-1)
258
- **kwargs: Additional parameters to pass to the API
259
-
260
- Returns:
261
- Returns a ChatCompletion object or a generator for streaming
262
- """
263
- # Generate request ID and timestamp
264
- request_id = str(uuid.uuid4())
265
- created_time = int(time.time())
266
-
267
- # Extract system message if present
268
- system_message = "You are a helpful AI assistant."
269
- for msg in messages:
270
- if msg.get("role") == "system":
271
- system_message = msg.get("content")
272
- break
273
-
274
- # Look for any image content
275
- media = []
276
- for msg in messages:
277
- if msg.get("role") == "user":
278
- # Check for image attachments in content
279
- content = msg.get("content", [])
280
- if isinstance(content, list):
281
- for item in content:
282
- if isinstance(item, dict) and item.get("type") == "image_url":
283
- image_url = item.get("image_url", {})
284
- if isinstance(image_url, dict) and "url" in image_url:
285
- url = image_url["url"]
286
- if url.startswith("data:"):
287
- # It's already a data URI
288
- image_name = f"image_{len(media)}.png"
289
- media.append((url, image_name))
290
- else:
291
- # Need to fetch and convert to data URI
292
- try:
293
- image_response = requests.get(url)
294
- if image_response.ok:
295
- image_name = f"image_{len(media)}.png"
296
- media.append((image_response.content, image_name))
297
- except Exception as e:
298
- pass
299
-
300
- # Check if streaming is requested and raise an error
301
- if stream:
302
- return self._create_streaming(
303
- request_id=request_id,
304
- created_time=created_time,
305
- model=model,
306
- messages=messages,
307
- system_message=system_message,
308
- max_tokens=max_tokens,
309
- temperature=temperature,
310
- top_p=top_p,
311
- media=media,
312
- timeout=timeout,
313
- proxies=proxies
314
- )
315
-
316
- # Use non-streaming implementation
317
- return self._create_non_streaming(
318
- request_id=request_id,
319
- created_time=created_time,
320
- model=model,
321
- messages=messages,
322
- system_message=system_message,
323
- max_tokens=max_tokens,
324
- temperature=temperature,
325
- top_p=top_p,
326
- media=media,
327
- timeout=timeout,
328
- proxies=proxies
329
- )
330
-
331
-
332
- def _create_non_streaming(
333
- self,
334
- *,
335
- request_id: str,
336
- created_time: int,
337
- model: str,
338
- messages: List[Dict[str, Any]],
339
- system_message: str,
340
- max_tokens: Optional[int] = None,
341
- temperature: Optional[float] = None,
342
- top_p: Optional[float] = None,
343
- media: List = None,
344
- timeout: Optional[int] = None,
345
- proxies: Optional[dict] = None
346
- ) -> ChatCompletion:
347
- """Implementation for non-streaming chat completions."""
348
- original_proxies = self._client.session.proxies
349
- if proxies is not None:
350
- self._client.session.proxies = proxies
351
- try:
352
- # Prepare user messages for BlackboxAI API format
353
- blackbox_messages = []
354
- for i, msg in enumerate(messages):
355
- if msg["role"] == "system":
356
- continue # System message handled separately
357
-
358
- msg_id = self._client.generate_id() if i > 0 else request_id
359
- blackbox_messages.append({
360
- "id": msg_id,
361
- "content": msg["content"],
362
- "role": msg["role"]
363
- })
364
-
365
- # Add image data if provided
366
- if media and blackbox_messages:
367
- blackbox_messages[-1]['data'] = {
368
- "imagesData": [
369
- {
370
- "filePath": f"/",
371
- "contents": to_data_uri(image[0])
372
- } for image in media
373
- ],
374
- "fileText": "",
375
- "title": ""
376
- }
377
-
378
- # Generate request payload with session
379
- request_email = f"{self._client.generate_random_string(8)}@blackbox.ai"
380
- session_data = self._client.generate_session(request_email)
381
-
382
- # Create the API request payload
383
- payload = self._client.create_request_payload(
384
- messages=blackbox_messages,
385
- chat_id=request_id,
386
- system_message=system_message,
387
- max_tokens=max_tokens,
388
- temperature=temperature,
389
- top_p=top_p,
390
- session_data=session_data,
391
- model=model
392
- )
393
-
394
- # Make the API request with cookies
395
- response = self._client.session.post(
396
- self._client.api_endpoint,
397
- json=payload,
398
- headers=self._client.headers,
399
- cookies=self._client.cookies,
400
- timeout=timeout if timeout is not None else self._client.timeout
401
- )
402
-
403
- # Process the response
404
- full_content = ""
405
- if response.status_code == 200:
406
- # Check for Content-Encoding header
407
- content_encoding = response.headers.get('Content-Encoding')
408
-
409
- # Decompress the response if needed
410
- try:
411
- response_text = decompress_response(response.content, content_encoding)
412
- except IOError as e:
413
- # If decompression fails, fall back to the original method
414
- print(f"Warning: {e}. Falling back to original decoding method.")
415
- decoder = codecs.getincrementaldecoder("utf-8")("replace")
416
- response_text = decoder.decode(response.content, final=True)
417
-
418
- # Handle possible SSE format in response
419
- if "data: " in response_text:
420
- # Extract content from SSE format
421
- content_lines = []
422
- for line in response_text.split('\n'):
423
- if line.startswith("data: "):
424
- line = line[6:].strip()
425
- if line and not any(error_msg in line.lower() for error_msg in [
426
- "service has been suspended",
427
- "api request failed",
428
- "you have reached your request limit"
429
- ]):
430
- content_lines.append(line)
431
- full_content = "".join(content_lines)
432
- else:
433
- # Regular response
434
- full_content = response_text
435
- else:
436
- # Handle error response
437
- raise IOError(f"BlackboxAI request failed with status code {response.status_code}")
438
-
439
- # Clean and create the completion message
440
- cleaned_content = clean_text(full_content)
441
- message = ChatCompletionMessage(
442
- role="assistant",
443
- content=cleaned_content
444
- )
445
-
446
- # Create the choice with the message
447
- choice = Choice(
448
- index=0,
449
- message=message,
450
- finish_reason="stop"
451
- )
452
-
453
- # Estimate token usage using count_tokens
454
- prompt_tokens = count_tokens([str(msg.get("content", "")) for msg in messages])
455
- completion_tokens = count_tokens(cleaned_content)
456
-
457
- # Create the final completion object
458
- completion = ChatCompletion(
459
- id=request_id,
460
- choices=[choice],
461
- created=created_time,
462
- model=model,
463
- usage=CompletionUsage(
464
- prompt_tokens=prompt_tokens,
465
- completion_tokens=completion_tokens,
466
- total_tokens=prompt_tokens + completion_tokens
467
- )
468
- )
469
-
470
- return completion
471
-
472
- except Exception as e:
473
- raise IOError(f"BlackboxAI request failed: {str(e)}") from e
474
- finally:
475
- if proxies is not None:
476
- self._client.session.proxies = original_proxies
477
-
478
- def _create_streaming(
479
- self,
480
- *,
481
- request_id: str,
482
- created_time: int,
483
- model: str,
484
- messages: List[Dict[str, Any]],
485
- system_message: str,
486
- max_tokens: Optional[int] = None,
487
- temperature: Optional[float] = None,
488
- top_p: Optional[float] = None,
489
- media: List = None,
490
- timeout: Optional[int] = None,
491
- proxies: Optional[dict] = None
492
- ):
493
- """Implementation for streaming chat completions (OpenAI-compatible chunks)."""
494
- original_proxies = self._client.session.proxies
495
- if proxies is not None:
496
- self._client.session.proxies = proxies
497
- try:
498
- # Prepare user messages for BlackboxAI API format
499
- blackbox_messages = []
500
- for i, msg in enumerate(messages):
501
- if msg["role"] == "system":
502
- continue # System message handled separately
503
- msg_id = self._client.generate_id() if i > 0 else request_id
504
- blackbox_messages.append({
505
- "id": msg_id,
506
- "content": msg["content"],
507
- "role": msg["role"]
508
- })
509
- # Add image data if provided
510
- if media and blackbox_messages:
511
- blackbox_messages[-1]['data'] = {
512
- "imagesData": [
513
- {
514
- "filePath": f"/",
515
- "contents": to_data_uri(image[0])
516
- } for image in media
517
- ],
518
- "fileText": "",
519
- "title": ""
520
- }
521
- # Generate request payload with session
522
- request_email = f"{self._client.generate_random_string(8)}@blackbox.ai"
523
- session_data = self._client.generate_session(request_email)
524
- payload = self._client.create_request_payload(
525
- messages=blackbox_messages,
526
- chat_id=request_id,
527
- system_message=system_message,
528
- max_tokens=max_tokens,
529
- temperature=temperature,
530
- top_p=top_p,
531
- session_data=session_data,
532
- model=model
533
- )
534
- # Make the API request with cookies, stream=True
535
- response = self._client.session.post(
536
- self._client.api_endpoint,
537
- json=payload,
538
- headers=self._client.headers,
539
- cookies=self._client.cookies,
540
- stream=True,
541
- timeout=timeout if timeout is not None else self._client.timeout
542
- )
543
- # Blackbox streams as raw text, no line breaks, so chunk manually
544
- import codecs
545
- chunk_size = 32 # Tune as needed for smoothness
546
- from webscout.Provider.OPENAI.utils import ChatCompletionChunk, Choice, ChoiceDelta
547
-
548
- # Check if the response is compressed and create appropriate decompressor
549
- content_encoding = response.headers.get('Content-Encoding')
550
- streaming_decompressor = StreamingDecompressor(content_encoding)
551
-
552
- # Stream with real-time decompression
553
- for chunk in response.iter_content(chunk_size=chunk_size):
554
- if not chunk:
555
- continue
556
-
557
- # Decompress chunk in real-time
558
- text = streaming_decompressor.decompress_chunk(chunk)
559
-
560
- if text:
561
- cleaned_chunk = clean_text(text)
562
- if cleaned_chunk.strip():
563
- delta = ChoiceDelta(content=cleaned_chunk, role="assistant")
564
- choice = Choice(index=0, delta=delta, finish_reason=None)
565
- chunk_obj = ChatCompletionChunk(
566
- id=request_id,
567
- choices=[choice],
568
- created=created_time,
569
- model=model,
570
- system_fingerprint=None
571
- )
572
- yield chunk_obj
573
-
574
- # Finalize decompression and get any remaining text
575
- final_text = streaming_decompressor.finalize()
576
- if final_text.strip():
577
- cleaned_final = clean_text(final_text)
578
- delta = ChoiceDelta(content=cleaned_final, role="assistant")
579
- choice = Choice(index=0, delta=delta, finish_reason=None)
580
- chunk_obj = ChatCompletionChunk(
581
- id=request_id,
582
- choices=[choice],
583
- created=created_time,
584
- model=model,
585
- system_fingerprint=None
586
- )
587
- yield chunk_obj
588
-
589
- # Send final chunk with finish_reason="stop"
590
- delta = ChoiceDelta(content="", role="assistant")
591
- choice = Choice(index=0, delta=delta, finish_reason="stop")
592
- final_chunk = ChatCompletionChunk(
593
- id=request_id,
594
- choices=[choice],
595
- created=created_time,
596
- model=model,
597
- system_fingerprint=None
598
- )
599
- yield final_chunk
600
-
601
- except Exception as e:
602
- # Handle errors gracefully by yielding an error chunk
603
- error_delta = ChoiceDelta(content=f"Error: {str(e)}", role="assistant")
604
- error_choice = Choice(index=0, delta=error_delta, finish_reason="stop")
605
- error_chunk = ChatCompletionChunk(
606
- id=request_id,
607
- choices=[error_choice],
608
- created=created_time,
609
- model=model,
610
- system_fingerprint=None
611
- )
612
- yield error_chunk
613
- finally:
614
- if proxies is not None:
615
- self._client.session.proxies = original_proxies
616
-
617
-
618
- class Chat(BaseChat):
619
- def __init__(self, client: 'BLACKBOXAI'):
620
- self.completions = Completions(client)
621
-
622
-
623
- class BLACKBOXAI(OpenAICompatibleProvider):
624
- """
625
- OpenAI-compatible client for BlackboxAI API.
626
-
627
- Usage:
628
- client = BLACKBOXAI()
629
- response = client.chat.completions.create(
630
- model="GPT-4.1",
631
- messages=[{"role": "user", "content": "Hello!"}]
632
- )
633
- print(response.choices[0].message.content)
634
- """
635
- # Default model
636
- default_model = "GPT-4.1"
637
- default_vision_model = default_model
638
- api_endpoint = "https://www.blackbox.ai/api/chat"
639
- timeout = None
640
-
641
-
642
- # Default model (remains the same as per original class)
643
- default_model = "GPT-4.1"
644
- default_vision_model = default_model
645
-
646
- # New OpenRouter models list
647
- openrouter_models = [
648
- "Deepcoder 14B Preview",
649
- "DeepHermes 3 Llama 3 8B Preview",
650
- "DeepSeek R1 Zero",
651
- "Dolphin3.0 Mistral 24B",
652
- "Dolphin3.0 R1 Mistral 24B",
653
- "Flash 3",
654
- "Gemini 2.0 Flash Experimental",
655
- "Gemma 2 9B",
656
- "Gemma 3 12B",
657
- "Gemma 3 1B",
658
- "Gemma 3 27B",
659
- "Gemma 3 4B",
660
- "Kimi VL A3B Thinking",
661
- "Llama 3.1 8B Instruct",
662
- "Llama 3.1 Nemotron Ultra 253B v1",
663
- "Llama 3.2 11B Vision Instruct",
664
- "Llama 3.2 1B Instruct",
665
- "Llama 3.2 3B Instruct",
666
- "Llama 3.3 70B Instruct",
667
- "Llama 3.3 Nemotron Super 49B v1",
668
- "Llama 4 Maverick",
669
- "Llama 4 Scout",
670
- "Mistral 7B Instruct",
671
- "Mistral Nemo",
672
- "Mistral Small 3",
673
- "Mistral Small 3.1 24B",
674
- "Molmo 7B D",
675
- "Moonlight 16B A3B Instruct",
676
- "Qwen2.5 72B Instruct",
677
- "Qwen2.5 7B Instruct",
678
- "Qwen2.5 Coder 32B Instruct",
679
- "Qwen2.5 VL 32B Instruct",
680
- "Qwen2.5 VL 3B Instruct",
681
- "Qwen2.5 VL 72B Instruct",
682
- "Qwen2.5-VL 7B Instruct",
683
- "Qwerky 72B",
684
- "QwQ 32B",
685
- "QwQ 32B Preview",
686
- "QwQ 32B RpR v1",
687
- "R1",
688
- "R1 Distill Llama 70B",
689
- "R1 Distill Qwen 14B",
690
- "R1 Distill Qwen 32B",
691
- ]
692
-
693
- # New base models list
694
- models = [
695
- "gpt-4.1-mini", # Added new model
696
- default_model,
697
- "o3-mini",
698
- "gpt-4.1-nano",
699
- "Claude Opus 4", # Added Claude Opus 4
700
- "Claude Sonnet 4", # Added Claude Sonnet 4
701
- "Claude-sonnet-3.7",
702
- "Claude-sonnet-3.5",
703
- "Grok 3", # Added Grok 3
704
- "Gemini 2.5 Pro", # Added Gemini 2.5 Pro
705
- "UI-TARS 72B", # Added UI-TARS 72B
706
- "DeepSeek-R1",
707
- "Mistral-Small-24B-Instruct-2501",
708
- *openrouter_models,
709
- # Trending agent modes (names)
710
- 'Python Agent', 'HTML Agent', 'Builder Agent', 'Java Agent', 'JavaScript Agent',
711
- 'React Agent', 'Android Agent', 'Flutter Agent', 'Next.js Agent', 'AngularJS Agent',
712
- 'Swift Agent', 'MongoDB Agent', 'PyTorch Agent', 'Xcode Agent', 'Azure Agent',
713
- 'Bitbucket Agent', 'DigitalOcean Agent', 'Docker Agent', 'Electron Agent',
714
- 'Erlang Agent', 'FastAPI Agent', 'Firebase Agent', 'Flask Agent', 'Git Agent',
715
- 'Gitlab Agent', 'Go Agent', 'Godot Agent', 'Google Cloud Agent', 'Heroku Agent'
716
- ]
717
-
718
- # Models that support vision capabilities
719
- vision_models = [default_vision_model, 'o3-mini', "Llama 3.2 11B Vision Instruct", "Gemini 2.5 Pro", "Claude Sonnet 4", "Claude Opus 4", "UI-TARS 72B"] # Added Llama vision, Gemini 2.5 Pro, Claude Sonnet 4, Claude Opus 4, and UI-TARS 72B
720
-
721
- # Models that can be directly selected by users
722
- userSelectedModel = ['o3-mini', 'Claude Opus 4', 'Claude Sonnet 4', 'Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'Grok 3', 'Gemini 2.5 Pro', 'UI-TARS 72B', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
723
-
724
- # Agent mode configurations
725
- agentMode = {
726
- # OpenRouter Free
727
- 'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
728
- 'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
729
- 'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
730
- 'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
731
- 'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
732
- 'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
733
- 'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
734
- 'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
735
- 'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
736
- 'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
737
- 'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
738
- 'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
739
- 'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
740
- 'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
741
- 'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
742
- 'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
743
- 'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
744
- 'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
745
- 'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
746
- 'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
747
- 'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
748
- 'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
749
- 'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
750
- 'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
751
- 'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"}, # Matches Mistral-Small-24B-Instruct-2501
752
- 'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
753
- 'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
754
- 'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
755
- 'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
756
- 'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
757
- 'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
758
- 'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
759
- 'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
760
- 'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
761
- 'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
762
- 'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
763
- 'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
764
- 'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
765
- 'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
766
- 'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"}, # Matches DeepSeek-R1
767
- 'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
768
- 'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
769
- 'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
770
- # Default models from the new list
771
- 'Claude Opus 4': {'mode': True, 'id': "anthropic/claude-opus-4", 'name': "Claude Opus 4"},
772
- 'Claude Sonnet 4': {'mode': True, 'id': "anthropic/claude-sonnet-4", 'name': "Claude Sonnet 4"},
773
- 'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
774
- 'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
775
- 'Grok 3': {'mode': True, 'id': "x-ai/grok-3-beta", 'name': "Grok 3"},
776
- 'Gemini 2.5 Pro': {'mode': True, 'id': "google/gemini-2.5-pro-preview-03-25", 'name': "Gemini 2.5 Pro"},
777
- 'UI-TARS 72B': {'mode': True, 'id': "bytedance-research/ui-tars-72b:free", 'name': "UI-TARS 72B"},
778
- 'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"}, # This is 'R1' in openrouter, but 'DeepSeek-R1' in base models
779
- 'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
780
- # Add default_model if it's not covered and has an agent mode
781
- default_model: {'mode': True, 'id': "openai/gpt-4.1", 'name': default_model}, # Assuming GPT-4.1 is agent-compatible
782
- 'o3-mini': {'mode': True, 'id': "o3-mini", 'name': "o3-mini"}, # Assuming o3-mini is agent-compatible
783
- 'gpt-4.1-nano': {'mode': True, 'id': "gpt-4.1-nano", 'name': "gpt-4.1-nano"}, # Assuming gpt-4.1-nano is agent-compatible
784
- 'gpt-4.1-mini': {'mode': True, 'id': "gpt-4.1-mini", 'name': "gpt-4.1-mini"}, # Added agent mode for gpt-4.1-mini
785
- }
786
-
787
- # Trending agent modes
788
- trendingAgentMode = {
789
- 'Python Agent': {'mode': True, 'id': "python"},
790
- 'HTML Agent': {'mode': True, 'id': "html"},
791
- 'Builder Agent': {'mode': True, 'id': "builder"},
792
- 'Java Agent': {'mode': True, 'id': "java"},
793
- 'JavaScript Agent': {'mode': True, 'id': "javascript"},
794
- 'React Agent': {'mode': True, 'id': "react"},
795
- 'Android Agent': {'mode': True, 'id': "android"},
796
- 'Flutter Agent': {'mode': True, 'id': "flutter"},
797
- 'Next.js Agent': {'mode': True, 'id': "next.js"},
798
- 'AngularJS Agent': {'mode': True, 'id': "angularjs"},
799
- 'Swift Agent': {'mode': True, 'id': "swift"},
800
- 'MongoDB Agent': {'mode': True, 'id': "mongodb"},
801
- 'PyTorch Agent': {'mode': True, 'id': "pytorch"},
802
- 'Xcode Agent': {'mode': True, 'id': "xcode"},
803
- 'Azure Agent': {'mode': True, 'id': "azure"},
804
- 'Bitbucket Agent': {'mode': True, 'id': "bitbucket"},
805
- 'DigitalOcean Agent': {'mode': True, 'id': "digitalocean"},
806
- 'Docker Agent': {'mode': True, 'id': "docker"},
807
- 'Electron Agent': {'mode': True, 'id': "electron"},
808
- 'Erlang Agent': {'mode': True, 'id': "erlang"},
809
- 'FastAPI Agent': {'mode': True, 'id': "fastapi"},
810
- 'Firebase Agent': {'mode': True, 'id': "firebase"},
811
- 'Flask Agent': {'mode': True, 'id': "flask"},
812
- 'Git Agent': {'mode': True, 'id': "git"},
813
- 'Gitlab Agent': {'mode': True, 'id': "gitlab"},
814
- 'Go Agent': {'mode': True, 'id': "go"},
815
- 'Godot Agent': {'mode': True, 'id': "godot"},
816
- 'Google Cloud Agent': {'mode': True, 'id': "googlecloud"},
817
- 'Heroku Agent': {'mode': True, 'id': "heroku"},
818
- }
819
-
820
- # Create AVAILABLE_MODELS as a list with just the model aliases (no "BLACKBOXAI/" prefix)
821
- AVAILABLE_MODELS = list(models)
822
-
823
-
824
- def __init__(
825
- self
826
- ):
827
- """
828
- Initialize the BlackboxAI provider with OpenAI compatibility.
829
- """
830
- # Initialize session
831
- self.session = requests.Session()
832
- self.session.proxies = {}
833
-
834
- # Set headers based on GitHub reference
835
- self.headers = {
836
- 'Accept': 'text/event-stream',
837
- 'Accept-Encoding': 'gzip, deflate, br, zstd',
838
- 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
839
- 'Content-Type': 'application/json',
840
- 'DNT': '1',
841
- 'Origin': 'https://www.blackbox.ai',
842
- 'Referer': 'https://www.blackbox.ai/',
843
- 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
844
- 'Sec-CH-UA-Mobile': '?0',
845
- 'Sec-CH-UA-Platform': '"Windows"',
846
- 'Sec-Fetch-Dest': 'empty',
847
- 'Sec-Fetch-Mode': 'cors',
848
- 'Sec-Fetch-Site': 'same-origin',
849
- 'User-Agent': agent.random(),
850
- }
851
-
852
- # Set cookies for the session
853
- self.cookies = {
854
- 'cfzs_amplitude': self.generate_id(32),
855
- 'cfz_amplitude': self.generate_id(32),
856
- '__cf_bm': self.generate_id(32),
857
- }
858
-
859
- # Initialize chat interface with completions
860
- self.chat = Chat(self)
861
-
862
- @property
863
- def models(self):
864
- class _ModelList:
865
- def list(inner_self):
866
- return type(self).AVAILABLE_MODELS
867
- return _ModelList()
868
-
869
-
870
- @classmethod
871
- def get_model(cls, model: str) -> str:
872
- """Return the model name, removing BLACKBOXAI/ prefix if present, or default_model."""
873
- if model.startswith("BLACKBOXAI/"):
874
- model = model[len("BLACKBOXAI/"):]
875
- if model in cls.AVAILABLE_MODELS:
876
- return model
877
- return cls.default_model
878
-
879
- @classmethod
880
- def generate_random_string(cls, length: int = 8) -> str:
881
- """Generate a random string of specified length."""
882
- chars = string.ascii_lowercase + string.digits
883
- return ''.join(random.choice(chars) for _ in range(length))
884
-
885
- @classmethod
886
- def generate_id(cls, length: int = 7) -> str:
887
- """Generate a random ID of specified length."""
888
- chars = string.ascii_letters + string.digits
889
- return ''.join(random.choice(chars) for _ in range(length))
890
-
891
- @classmethod
892
- def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 30) -> dict:
893
- """
894
- Generate a dynamic session with proper ID and expiry format using a specific email.
895
-
896
- Args:
897
- email: The email to use for this session
898
- id_length: Length of the numeric ID (default: 21)
899
- days_ahead: Number of days ahead for expiry (default: 30)
900
-
901
- Returns:
902
- dict: A session dictionary with user information and expiry
903
- """
904
- # Generate a random name
905
- first_names = ["Alex", "Jordan", "Taylor", "Morgan", "Casey", "Riley", "Avery", "Quinn", "Skyler", "Dakota"]
906
- last_names = ["Smith", "Johnson", "Williams", "Brown", "Jones", "Miller", "Davis", "Garcia", "Rodriguez", "Wilson"]
907
- name = f"{random.choice(first_names)} {random.choice(last_names)}"
908
-
909
- # Generate numeric ID - using Google-like ID format
910
- numeric_id = ''.join(random.choice('0123456789') for _ in range(id_length))
911
-
912
- # Generate future expiry date
913
- future_date = datetime.now() + timedelta(days=days_ahead)
914
- expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
915
-
916
- # Generate random image ID for the new URL format
917
- chars = string.ascii_letters + string.digits + "-"
918
- random_img_id = ''.join(random.choice(chars) for _ in range(48))
919
- image_url = f"https://lh3.googleusercontent.com/a/ACg8oc{random_img_id}=s96-c"
920
-
921
- return {
922
- "user": {
923
- "name": name,
924
- "email": email,
925
- "image": image_url,
926
- "id": numeric_id
927
- },
928
- "expires": expiry,
929
- "isNewUser": False
930
- }
931
-
932
- def create_request_payload(
933
- self,
934
- messages: List[Dict[str, Any]],
935
- chat_id: str,
936
- system_message: str,
937
- max_tokens: int,
938
- temperature: Optional[float] = None,
939
- top_p: Optional[float] = None,
940
- session_data: Dict[str, Any] = None,
941
- model: str = None
942
- ) -> Dict[str, Any]:
943
- """Create the full request payload for the BlackboxAI API."""
944
- # Get the correct model ID and agent mode
945
- model_name = self.get_model(model or self.default_model)
946
- agent_mode = self.agentMode.get(model_name, {})
947
-
948
- # Generate a random customer ID for the subscription
949
- customer_id = "cus_" + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(14))
950
-
951
- # Create the full request payload
952
- return {
953
- "messages": messages,
954
- "agentMode": agent_mode,
955
- "id": chat_id,
956
- "previewToken": None,
957
- "userId": None,
958
- "codeModelMode": True,
959
- "trendingAgentMode": {},
960
- "isMicMode": False,
961
- "userSystemPrompt": system_message,
962
- "maxTokens": max_tokens,
963
- "playgroundTopP": top_p,
964
- "playgroundTemperature": temperature,
965
- "isChromeExt": False,
966
- "githubToken": "",
967
- "clickedAnswer2": False,
968
- "clickedAnswer3": False,
969
- "clickedForceWebSearch": False,
970
- "visitFromDelta": False,
971
- "isMemoryEnabled": False,
972
- "mobileClient": False,
973
- "userSelectedModel": model_name if model_name in self.userSelectedModel else None,
974
- "validated": "00f37b34-a166-4efb-bce5-1312d87f2f94",
975
- "imageGenerationMode": False,
976
- "webSearchModePrompt": False,
977
- "deepSearchMode": False,
978
- "designerMode": False,
979
- "domains": None,
980
- "vscodeClient": False,
981
- "codeInterpreterMode": False,
982
- "customProfile": {
983
- "name": "",
984
- "occupation": "",
985
- "traits": [],
986
- "additionalInfo": "",
987
- "enableNewChats": False
988
- },
989
- "webSearchModeOption": {
990
- "autoMode": True,
991
- "webMode": False,
992
- "offlineMode": False
993
- },
994
- "session": session_data,
995
- "isPremium": True,
996
- "subscriptionCache": {
997
- "status": "PREMIUM",
998
- "customerId": customer_id,
999
- "expiryTimestamp": int((datetime.now() + timedelta(days=30)).timestamp()),
1000
- "lastChecked": int(datetime.now().timestamp() * 1000),
1001
- "isTrialSubscription": True
1002
- },
1003
- "beastMode": False,
1004
- "reasoningMode": False,
1005
- "designerMode": False,
1006
- "workspaceId": ""
1007
- }
1008
- if __name__ == "__main__":
1009
- # Example usage
1010
- client = BLACKBOXAI()
1011
- response = client.chat.completions.create(
1012
- model="GPT-4.1",
1013
- messages=[{"role": "user", "content": "Tell me about india in points"}],
1014
- stream=True
1015
- )
1016
- for chunk in response:
1017
- print(chunk.choices[0].delta.content, end='', flush=True)
1
+ # from pickle import NONE
2
+ import requests
3
+ import requests
4
+ import random
5
+ import string
6
+ import base64
7
+ from datetime import datetime, timedelta
8
+ from typing import Generator, List, Dict, Optional, Any, Union
9
+ import uuid
10
+ import time
11
+ import codecs
12
+ import gzip
13
+ import zstandard as zstd
14
+ import brotli
15
+ import zlib
16
+ from webscout.Provider.OPENAI.utils import (
17
+ ChatCompletion, Choice,
18
+ ChatCompletionMessage, CompletionUsage, count_tokens,
19
+ ChatCompletionChunk, ChoiceDelta # Ensure ChoiceDelta is always imported at the top
20
+ )
21
+ try:
22
+ from webscout.litagent import LitAgent
23
+ agent = LitAgent()
24
+ except ImportError:
25
+ print("Warning: LitAgent not available, using default user agent")
26
+ class MockAgent:
27
+ def random(self):
28
+ return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36'
29
+ agent = MockAgent()
30
+
31
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
32
+
33
+ class StreamingDecompressor:
34
+ """
35
+ A streaming decompressor that can handle partial compressed data in real-time.
36
+ This allows for true streaming decompression without buffering entire response.
37
+ """
38
+ def __init__(self, content_encoding: str):
39
+ self.encoding = content_encoding.lower().strip() if content_encoding else None
40
+ self.decompressor = None
41
+ self.text_decoder = codecs.getincrementaldecoder("utf-8")("replace")
42
+ self.zstd_buffer = b"" # Buffer for zstd incomplete frames
43
+
44
+ if self.encoding == 'gzip':
45
+ self.decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS) # gzip format
46
+ elif self.encoding == 'deflate':
47
+ self.decompressor = zlib.decompressobj() # deflate format
48
+ elif self.encoding == 'zstd':
49
+ self.decompressor = zstd.ZstdDecompressor()
50
+ elif self.encoding == 'br':
51
+ self.decompressor = brotli.Decompressor()
52
+
53
+ def decompress_chunk(self, chunk: bytes) -> str:
54
+ """
55
+ Decompress a chunk of data and return decoded text.
56
+ Handles partial compressed data properly for real-time streaming.
57
+ """
58
+ try:
59
+ if not chunk:
60
+ return ""
61
+
62
+ if not self.encoding or self.encoding not in ['gzip', 'deflate', 'zstd', 'br']:
63
+ # No compression or unsupported - decode directly
64
+ return self.text_decoder.decode(chunk, final=False)
65
+
66
+ if self.encoding in ['gzip', 'deflate']:
67
+ # Use zlib decompressor for gzip/deflate
68
+ decompressed_data = self.decompressor.decompress(chunk)
69
+ return self.text_decoder.decode(decompressed_data, final=False)
70
+
71
+ elif self.encoding == 'zstd':
72
+ # Zstandard streaming decompression with buffering for incomplete frames
73
+ self.zstd_buffer += chunk
74
+ try:
75
+ # Try to decompress the current buffer
76
+ decompressed_data = self.decompressor.decompress(self.zstd_buffer)
77
+ # If successful, clear the buffer and return decoded text
78
+ self.zstd_buffer = b""
79
+ return self.text_decoder.decode(decompressed_data, final=False)
80
+ except zstd.ZstdError:
81
+ # Frame is incomplete, keep buffering
82
+ # Try to decompress any complete frames from buffer start
83
+ try:
84
+ # Process buffer in chunks to find complete frames
85
+ buffer_len = len(self.zstd_buffer)
86
+ if buffer_len > 4: # Minimum zstd frame size
87
+ # Try smaller chunks of the buffer
88
+ for end_pos in range(4, buffer_len + 1):
89
+ try:
90
+ partial_data = self.decompressor.decompress(self.zstd_buffer[:end_pos])
91
+ # If we got here, we found a complete frame
92
+ self.zstd_buffer = self.zstd_buffer[end_pos:]
93
+ return self.text_decoder.decode(partial_data, final=False)
94
+ except zstd.ZstdError:
95
+ continue
96
+ except Exception:
97
+ pass
98
+ return ""
99
+
100
+ elif self.encoding == 'br':
101
+ # Brotli streaming decompression
102
+ try:
103
+ decompressed_data = self.decompressor.decompress(chunk)
104
+ return self.text_decoder.decode(decompressed_data, final=False)
105
+ except brotli.error:
106
+ # If brotli fails, it might need more data or be at end
107
+ return ""
108
+
109
+ except Exception as e:
110
+ # If decompression fails, try to decode the chunk as-is (fallback)
111
+ try:
112
+ return self.text_decoder.decode(chunk, final=False)
113
+ except UnicodeDecodeError:
114
+ return ""
115
+
116
+ def finalize(self) -> str:
117
+ """
118
+ Finalize the decompression and return any remaining decoded text.
119
+ """
120
+ try:
121
+ remaining_text = ""
122
+
123
+ if self.encoding in ['gzip', 'deflate'] and self.decompressor:
124
+ # Flush any remaining compressed data
125
+ remaining_data = self.decompressor.flush()
126
+ if remaining_data:
127
+ remaining_text = self.text_decoder.decode(remaining_data, final=True)
128
+ else:
129
+ remaining_text = self.text_decoder.decode(b"", final=True)
130
+ elif self.encoding == 'zstd':
131
+ # Process any remaining buffered data
132
+ if self.zstd_buffer:
133
+ try:
134
+ remaining_data = self.decompressor.decompress(self.zstd_buffer)
135
+ remaining_text = self.text_decoder.decode(remaining_data, final=True)
136
+ except:
137
+ # If buffered data can't be decompressed, finalize decoder
138
+ remaining_text = self.text_decoder.decode(b"", final=True)
139
+ else:
140
+ remaining_text = self.text_decoder.decode(b"", final=True)
141
+ else:
142
+ # Finalize the text decoder for other encodings
143
+ remaining_text = self.text_decoder.decode(b"", final=True)
144
+
145
+ return remaining_text
146
+ except Exception:
147
+ # Ensure we always finalize the text decoder
148
+ try:
149
+ return self.text_decoder.decode(b"", final=True)
150
+ except:
151
+ return ""
152
+
153
+ def decompress_response(response_content: bytes, content_encoding: str) -> str:
154
+ """
155
+ Decompress response content based on the Content-Encoding header.
156
+
157
+ Args:
158
+ response_content: The raw response content as bytes
159
+ content_encoding: The Content-Encoding header value
160
+
161
+ Returns:
162
+ str: The decompressed and decoded content as UTF-8 string
163
+
164
+ Raises:
165
+ IOError: If decompression fails
166
+ """
167
+ try:
168
+ if not content_encoding:
169
+ # No compression, decode directly
170
+ return response_content.decode('utf-8')
171
+
172
+ encoding = content_encoding.lower().strip()
173
+
174
+ if encoding == 'zstd':
175
+ # Decompress using zstandard
176
+ dctx = zstd.ZstdDecompressor()
177
+ decompressed_data = dctx.decompress(response_content)
178
+ return decompressed_data.decode('utf-8')
179
+
180
+ elif encoding == 'gzip':
181
+ # Decompress using gzip
182
+ decompressed_data = gzip.decompress(response_content)
183
+ return decompressed_data.decode('utf-8')
184
+
185
+ elif encoding == 'br':
186
+ # Decompress using brotli
187
+ decompressed_data = brotli.decompress(response_content)
188
+ return decompressed_data.decode('utf-8')
189
+
190
+ elif encoding == 'deflate':
191
+ # Decompress using zlib (deflate)
192
+ import zlib
193
+ decompressed_data = zlib.decompress(response_content)
194
+ return decompressed_data.decode('utf-8')
195
+
196
+ else:
197
+ # Unknown or unsupported encoding, try to decode as-is
198
+ return response_content.decode('utf-8')
199
+
200
+ except Exception as e:
201
+ raise IOError(f"Failed to decompress response with encoding '{content_encoding}': {str(e)}") from e
202
+
203
+ def to_data_uri(image_data):
204
+ """Convert image data to a data URI format"""
205
+ if isinstance(image_data, str):
206
+ # Assume it's already a data URI
207
+ return image_data
208
+
209
+ # Encode binary data to base64
210
+ encoded = base64.b64encode(image_data).decode('utf-8')
211
+
212
+ # Determine MIME type (simplified)
213
+ mime_type = "image/jpeg" # Default
214
+ if image_data.startswith(b'\x89PNG'):
215
+ mime_type = "image/png"
216
+ elif image_data.startswith(b'\xff\xd8'):
217
+ mime_type = "image/jpeg"
218
+ elif image_data.startswith(b'GIF'):
219
+ mime_type = "image/gif"
220
+
221
+ return f"data:{mime_type};base64,{encoded}"
222
+
223
+ def clean_text(text):
224
+ """Clean text by removing null bytes and control characters except newlines and tabs."""
225
+ import re
226
+ if not isinstance(text, str):
227
+ return text
228
+
229
+ # Remove null bytes
230
+ text = text.replace('\x00', '')
231
+
232
+ # Keep newlines, tabs, and other printable characters, remove other control chars
233
+ # This regex matches control characters except \n, \r, \t
234
+ return re.sub(r'[\x01-\x08\x0b\x0c\x0e-\x1f\x7f]', '', text)
235
+
236
+
237
+ class Completions(BaseCompletions):
238
+ def __init__(self, client: 'BLACKBOXAI'):
239
+ self._client = client
240
+
241
+ def create(
242
+ self,
243
+ *,
244
+ model: str,
245
+ messages: List[Dict[str, Any]],
246
+ max_tokens: Optional[int] = None,
247
+ stream: bool = False,
248
+ temperature: Optional[float] = None,
249
+ top_p: Optional[float] = None,
250
+ timeout: Optional[int] = None,
251
+ proxies: Optional[dict] = None,
252
+ **kwargs: Any
253
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
254
+ """
255
+ Create a chat completion with BlackboxAI API.
256
+
257
+ Args:
258
+ model: The model to use (from AVAILABLE_MODELS)
259
+ messages: List of message dictionaries with 'role' and 'content'
260
+ max_tokens: Maximum number of tokens to generate
261
+ stream: If True, yields streaming chunks
262
+ temperature: Sampling temperature (0-1)
263
+ top_p: Nucleus sampling parameter (0-1)
264
+ **kwargs: Additional parameters to pass to the API
265
+
266
+ Returns:
267
+ Returns a ChatCompletion object or a generator for streaming
268
+ """
269
+ # Generate request ID and timestamp
270
+ request_id = str(uuid.uuid4())
271
+ created_time = int(time.time())
272
+
273
+ # Extract system message if present
274
+ system_message = "You are a helpful AI assistant."
275
+ for msg in messages:
276
+ if msg.get("role") == "system":
277
+ system_message = msg.get("content")
278
+ break
279
+
280
+ # Look for any image content
281
+ media = []
282
+ for msg in messages:
283
+ if msg.get("role") == "user":
284
+ # Check for image attachments in content
285
+ content = msg.get("content", [])
286
+ if isinstance(content, list):
287
+ for item in content:
288
+ if isinstance(item, dict) and item.get("type") == "image_url":
289
+ image_url = item.get("image_url", {})
290
+ if isinstance(image_url, dict) and "url" in image_url:
291
+ url = image_url["url"]
292
+ if url.startswith("data:"):
293
+ # It's already a data URI
294
+ image_name = f"image_{len(media)}.png"
295
+ media.append((url, image_name))
296
+ else:
297
+ # Need to fetch and convert to data URI
298
+ try:
299
+ image_response = requests.get(url)
300
+ if image_response.ok:
301
+ image_name = f"image_{len(media)}.png"
302
+ media.append((image_response.content, image_name))
303
+ except Exception as e:
304
+ pass
305
+
306
+ # Check if streaming is requested and raise an error
307
+ if stream:
308
+ return self._create_streaming(
309
+ request_id=request_id,
310
+ created_time=created_time,
311
+ model=model,
312
+ messages=messages,
313
+ system_message=system_message,
314
+ max_tokens=max_tokens,
315
+ temperature=temperature,
316
+ top_p=top_p,
317
+ media=media,
318
+ timeout=timeout,
319
+ proxies=proxies
320
+ )
321
+
322
+ # Use non-streaming implementation
323
+ return self._create_non_streaming(
324
+ request_id=request_id,
325
+ created_time=created_time,
326
+ model=model,
327
+ messages=messages,
328
+ system_message=system_message,
329
+ max_tokens=max_tokens,
330
+ temperature=temperature,
331
+ top_p=top_p,
332
+ media=media,
333
+ timeout=timeout,
334
+ proxies=proxies
335
+ )
336
+
337
+
338
+ def _create_non_streaming(
339
+ self,
340
+ *,
341
+ request_id: str,
342
+ created_time: int,
343
+ model: str,
344
+ messages: List[Dict[str, Any]],
345
+ system_message: str,
346
+ max_tokens: Optional[int] = None,
347
+ temperature: Optional[float] = None,
348
+ top_p: Optional[float] = None,
349
+ media: List = None,
350
+ timeout: Optional[int] = None,
351
+ proxies: Optional[dict] = None
352
+ ) -> ChatCompletion:
353
+ """Implementation for non-streaming chat completions."""
354
+ original_proxies = self._client.session.proxies.copy()
355
+ # Only use proxies if they are explicitly provided and not causing issues
356
+ if proxies is not None and proxies:
357
+ self._client.session.proxies.update(proxies)
358
+ else:
359
+ # Clear proxies to avoid connection issues
360
+ self._client.session.proxies = {}
361
+ try:
362
+ # Prepare user messages for BlackboxAI API format
363
+ blackbox_messages = []
364
+ for i, msg in enumerate(messages):
365
+ if msg["role"] == "system":
366
+ continue # System message handled separately
367
+
368
+ msg_id = self._client.generate_id() if i > 0 else request_id
369
+ blackbox_messages.append({
370
+ "id": msg_id,
371
+ "content": msg["content"],
372
+ "role": msg["role"]
373
+ })
374
+
375
+ # Add image data if provided
376
+ if media and blackbox_messages:
377
+ blackbox_messages[-1]['data'] = {
378
+ "imagesData": [
379
+ {
380
+ "filePath": f"/",
381
+ "contents": to_data_uri(image[0])
382
+ } for image in media
383
+ ],
384
+ "fileText": "",
385
+ "title": ""
386
+ }
387
+
388
+ # Generate request payload with session
389
+ request_email = f"{self._client.generate_random_string(8)}@blackbox.ai"
390
+ session_data = self._client.generate_session(request_email)
391
+
392
+ # Create the API request payload
393
+ payload = self._client.create_request_payload(
394
+ messages=blackbox_messages,
395
+ chat_id=request_id,
396
+ system_message=system_message,
397
+ max_tokens=max_tokens,
398
+ temperature=temperature,
399
+ top_p=top_p,
400
+ session_data=session_data,
401
+ model=model
402
+ )
403
+
404
+ # Make the API request with cookies and retry logic
405
+ max_retries = 3
406
+ for attempt in range(max_retries):
407
+ try:
408
+ response = self._client.session.post(
409
+ self._client.api_endpoint,
410
+ json=payload,
411
+ headers=self._client.headers,
412
+ cookies=self._client.cookies,
413
+ timeout=timeout if timeout is not None else self._client.timeout
414
+ )
415
+ break # Success, exit retry loop
416
+ except (requests.exceptions.ConnectionError, requests.exceptions.ProxyError) as e:
417
+ if attempt == max_retries - 1:
418
+ raise IOError(f"BlackboxAI connection failed after {max_retries} attempts: {str(e)}") from e
419
+ # Clear proxies and retry
420
+ self._client.session.proxies = {}
421
+ time.sleep(1) # Wait before retry
422
+
423
+ # Process the response
424
+ full_content = ""
425
+ if response.status_code == 200:
426
+ # Check for Content-Encoding header
427
+ content_encoding = response.headers.get('Content-Encoding')
428
+
429
+ # Decompress the response if needed
430
+ try:
431
+ response_text = decompress_response(response.content, content_encoding)
432
+ except IOError as e:
433
+ # If decompression fails, fall back to the original method
434
+ print(f"Warning: {e}. Falling back to original decoding method.")
435
+ decoder = codecs.getincrementaldecoder("utf-8")("replace")
436
+ response_text = decoder.decode(response.content, final=True)
437
+
438
+ # Handle possible SSE format in response
439
+ if "data: " in response_text:
440
+ # Extract content from SSE format
441
+ content_lines = []
442
+ for line in response_text.split('\n'):
443
+ if line.startswith("data: "):
444
+ line = line[6:].strip()
445
+ if line and not any(error_msg in line.lower() for error_msg in [
446
+ "service has been suspended",
447
+ "api request failed",
448
+ "you have reached your request limit"
449
+ ]):
450
+ content_lines.append(line)
451
+ full_content = "".join(content_lines)
452
+ else:
453
+ # Regular response
454
+ full_content = response_text
455
+ else:
456
+ # Handle error response
457
+ raise IOError(f"BlackboxAI request failed with status code {response.status_code}")
458
+
459
+ # Clean and create the completion message
460
+ cleaned_content = clean_text(full_content)
461
+ message = ChatCompletionMessage(
462
+ role="assistant",
463
+ content=cleaned_content
464
+ )
465
+
466
+ # Create the choice with the message
467
+ choice = Choice(
468
+ index=0,
469
+ message=message,
470
+ finish_reason="stop"
471
+ )
472
+
473
+ # Estimate token usage using count_tokens
474
+ prompt_tokens = count_tokens([str(msg.get("content", "")) for msg in messages])
475
+ completion_tokens = count_tokens(cleaned_content)
476
+
477
+ # Create the final completion object
478
+ completion = ChatCompletion(
479
+ id=request_id,
480
+ choices=[choice],
481
+ created=created_time,
482
+ model=model,
483
+ usage=CompletionUsage(
484
+ prompt_tokens=prompt_tokens,
485
+ completion_tokens=completion_tokens,
486
+ total_tokens=prompt_tokens + completion_tokens
487
+ )
488
+ )
489
+
490
+ return completion
491
+
492
+ except Exception as e:
493
+ raise IOError(f"BlackboxAI request failed: {str(e)}") from e
494
+ finally:
495
+ # Restore original proxies
496
+ self._client.session.proxies = original_proxies
497
+
498
+ def _create_streaming(
499
+ self,
500
+ *,
501
+ request_id: str,
502
+ created_time: int,
503
+ model: str,
504
+ messages: List[Dict[str, Any]],
505
+ system_message: str,
506
+ max_tokens: Optional[int] = None,
507
+ temperature: Optional[float] = None,
508
+ top_p: Optional[float] = None,
509
+ media: List = None,
510
+ timeout: Optional[int] = None,
511
+ proxies: Optional[dict] = None
512
+ ):
513
+ """Implementation for streaming chat completions (OpenAI-compatible chunks)."""
514
+ original_proxies = self._client.session.proxies.copy()
515
+ # Only use proxies if they are explicitly provided and not causing issues
516
+ if proxies is not None and proxies:
517
+ self._client.session.proxies.update(proxies)
518
+ else:
519
+ # Clear proxies to avoid connection issues
520
+ self._client.session.proxies = {}
521
+ try:
522
+ # Prepare user messages for BlackboxAI API format
523
+ blackbox_messages = []
524
+ for i, msg in enumerate(messages):
525
+ if msg["role"] == "system":
526
+ continue # System message handled separately
527
+ msg_id = self._client.generate_id() if i > 0 else request_id
528
+ blackbox_messages.append({
529
+ "id": msg_id,
530
+ "content": msg["content"],
531
+ "role": msg["role"]
532
+ })
533
+ # Add image data if provided
534
+ if media and blackbox_messages:
535
+ blackbox_messages[-1]['data'] = {
536
+ "imagesData": [
537
+ {
538
+ "filePath": f"/",
539
+ "contents": to_data_uri(image[0])
540
+ } for image in media
541
+ ],
542
+ "fileText": "",
543
+ "title": ""
544
+ }
545
+ # Generate request payload with session
546
+ request_email = f"{self._client.generate_random_string(8)}@blackbox.ai"
547
+ session_data = self._client.generate_session(request_email)
548
+ payload = self._client.create_request_payload(
549
+ messages=blackbox_messages,
550
+ chat_id=request_id,
551
+ system_message=system_message,
552
+ max_tokens=max_tokens,
553
+ temperature=temperature,
554
+ top_p=top_p,
555
+ session_data=session_data,
556
+ model=model
557
+ )
558
+ # Make the API request with cookies, stream=True and retry logic
559
+ max_retries = 3
560
+ for attempt in range(max_retries):
561
+ try:
562
+ response = self._client.session.post(
563
+ self._client.api_endpoint,
564
+ json=payload,
565
+ headers=self._client.headers,
566
+ cookies=self._client.cookies,
567
+ stream=True,
568
+ timeout=timeout if timeout is not None else self._client.timeout
569
+ )
570
+ break # Success, exit retry loop
571
+ except (requests.exceptions.ConnectionError, requests.exceptions.ProxyError) as e:
572
+ if attempt == max_retries - 1:
573
+ raise IOError(f"BlackboxAI connection failed after {max_retries} attempts: {str(e)}") from e
574
+ # Clear proxies and retry
575
+ self._client.session.proxies = {}
576
+ time.sleep(1) # Wait before retry
577
+ # Blackbox streams as raw text, no line breaks, so chunk manually
578
+ chunk_size = 32 # Tune as needed for smoothness
579
+ # ChoiceDelta is already imported at the top of the file
580
+
581
+ # Check if the response is compressed and create appropriate decompressor
582
+ content_encoding = response.headers.get('Content-Encoding')
583
+ streaming_decompressor = StreamingDecompressor(content_encoding)
584
+
585
+ # Stream with real-time decompression
586
+ for chunk in response.iter_content(chunk_size=chunk_size):
587
+ if not chunk:
588
+ continue
589
+
590
+ # Decompress chunk in real-time
591
+ text = streaming_decompressor.decompress_chunk(chunk)
592
+
593
+ if text:
594
+ cleaned_chunk = clean_text(text)
595
+ if cleaned_chunk.strip():
596
+ delta = ChoiceDelta(content=cleaned_chunk, role="assistant")
597
+ choice = Choice(index=0, delta=delta, finish_reason=None)
598
+ chunk_obj = ChatCompletionChunk(
599
+ id=request_id,
600
+ choices=[choice],
601
+ created=created_time,
602
+ model=model,
603
+ system_fingerprint=None
604
+ )
605
+ yield chunk_obj
606
+
607
+ # Finalize decompression and get any remaining text
608
+ final_text = streaming_decompressor.finalize()
609
+ if final_text.strip():
610
+ cleaned_final = clean_text(final_text)
611
+ delta = ChoiceDelta(content=cleaned_final, role="assistant")
612
+ choice = Choice(index=0, delta=delta, finish_reason=None)
613
+ chunk_obj = ChatCompletionChunk(
614
+ id=request_id,
615
+ choices=[choice],
616
+ created=created_time,
617
+ model=model,
618
+ system_fingerprint=None
619
+ )
620
+ yield chunk_obj
621
+
622
+ # Send final chunk with finish_reason="stop"
623
+ delta = ChoiceDelta(content="", role="assistant")
624
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
625
+ final_chunk = ChatCompletionChunk(
626
+ id=request_id,
627
+ choices=[choice],
628
+ created=created_time,
629
+ model=model,
630
+ system_fingerprint=None
631
+ )
632
+ yield final_chunk
633
+
634
+ except Exception as e:
635
+ # Handle errors gracefully by yielding an error chunk
636
+ error_delta = ChoiceDelta(content=f"Error: {str(e)}", role="assistant")
637
+ error_choice = Choice(index=0, delta=error_delta, finish_reason="stop")
638
+ error_chunk = ChatCompletionChunk(
639
+ id=request_id,
640
+ choices=[error_choice],
641
+ created=created_time,
642
+ model=model,
643
+ system_fingerprint=None
644
+ )
645
+ yield error_chunk
646
+ finally:
647
+ # Restore original proxies
648
+ self._client.session.proxies = original_proxies
649
+
650
+
651
+ class Chat(BaseChat):
652
+ def __init__(self, client: 'BLACKBOXAI'):
653
+ self.completions = Completions(client)
654
+
655
+
656
+ class BLACKBOXAI(OpenAICompatibleProvider):
657
+ """
658
+ OpenAI-compatible client for BlackboxAI API.
659
+
660
+ Usage:
661
+ client = BLACKBOXAI()
662
+ response = client.chat.completions.create(
663
+ model="GPT-4.1",
664
+ messages=[{"role": "user", "content": "Hello!"}]
665
+ )
666
+ print(response.choices[0].message.content)
667
+ """
668
+ # Default model
669
+ default_model = "GPT-4.1"
670
+ default_vision_model = default_model
671
+ api_endpoint = "https://www.blackbox.ai/api/chat"
672
+ timeout = None
673
+
674
+ # New OpenRouter models list
675
+ openrouter_models = [
676
+ "Deepcoder 14B Preview",
677
+ "DeepHermes 3 Llama 3 8B Preview",
678
+ "DeepSeek R1 Zero",
679
+ "Dolphin3.0 Mistral 24B",
680
+ "Dolphin3.0 R1 Mistral 24B",
681
+ "Flash 3",
682
+ "Gemini 2.0 Flash Experimental",
683
+ "Gemma 2 9B",
684
+ "Gemma 3 12B",
685
+ "Gemma 3 1B",
686
+ "Gemma 3 27B",
687
+ "Gemma 3 4B",
688
+ "Kimi VL A3B Thinking",
689
+ "Llama 3.1 8B Instruct",
690
+ "Llama 3.1 Nemotron Ultra 253B v1",
691
+ "Llama 3.2 11B Vision Instruct",
692
+ "Llama 3.2 1B Instruct",
693
+ "Llama 3.2 3B Instruct",
694
+ "Llama 3.3 70B Instruct",
695
+ "Llama 3.3 Nemotron Super 49B v1",
696
+ "Llama 4 Maverick",
697
+ "Llama 4 Scout",
698
+ "Mistral 7B Instruct",
699
+ "Mistral Nemo",
700
+ "Mistral Small 3",
701
+ "Mistral Small 3.1 24B",
702
+ "Molmo 7B D",
703
+ "Moonlight 16B A3B Instruct",
704
+ "Qwen2.5 72B Instruct",
705
+ "Qwen2.5 7B Instruct",
706
+ "Qwen2.5 Coder 32B Instruct",
707
+ "Qwen2.5 VL 32B Instruct",
708
+ "Qwen2.5 VL 3B Instruct",
709
+ "Qwen2.5 VL 72B Instruct",
710
+ "Qwen2.5-VL 7B Instruct",
711
+ "Qwerky 72B",
712
+ "QwQ 32B",
713
+ "QwQ 32B Preview",
714
+ "QwQ 32B RpR v1",
715
+ "R1",
716
+ "R1 Distill Llama 70B",
717
+ "R1 Distill Qwen 14B",
718
+ "R1 Distill Qwen 32B",
719
+ ]
720
+
721
+ # New base models list
722
+ models = [
723
+ "gpt-4.1-mini", # Added new model
724
+ default_model,
725
+ "o3-mini",
726
+ "gpt-4.1-nano",
727
+ "Claude Opus 4", # Added Claude Opus 4
728
+ "Claude Sonnet 4", # Added Claude Sonnet 4
729
+ "Claude-sonnet-3.7",
730
+ "Claude-sonnet-3.5",
731
+ "Grok 3", # Added Grok 3
732
+ "Gemini 2.5 Pro", # Added Gemini 2.5 Pro
733
+ "UI-TARS 72B", # Added UI-TARS 72B
734
+ "DeepSeek-R1",
735
+ "Mistral-Small-24B-Instruct-2501",
736
+ *openrouter_models,
737
+ # Trending agent modes (names)
738
+ 'Python Agent', 'HTML Agent', 'Builder Agent', 'Java Agent', 'JavaScript Agent',
739
+ 'React Agent', 'Android Agent', 'Flutter Agent', 'Next.js Agent', 'AngularJS Agent',
740
+ 'Swift Agent', 'MongoDB Agent', 'PyTorch Agent', 'Xcode Agent', 'Azure Agent',
741
+ 'Bitbucket Agent', 'DigitalOcean Agent', 'Docker Agent', 'Electron Agent',
742
+ 'Erlang Agent', 'FastAPI Agent', 'Firebase Agent', 'Flask Agent', 'Git Agent',
743
+ 'Gitlab Agent', 'Go Agent', 'Godot Agent', 'Google Cloud Agent', 'Heroku Agent'
744
+ ]
745
+
746
+ # Models that support vision capabilities
747
+ vision_models = [default_vision_model, 'o3-mini', "Llama 3.2 11B Vision Instruct", "Gemini 2.5 Pro", "Claude Sonnet 4", "Claude Opus 4", "UI-TARS 72B"] # Added Llama vision, Gemini 2.5 Pro, Claude Sonnet 4, Claude Opus 4, and UI-TARS 72B
748
+
749
+ # Models that can be directly selected by users
750
+ userSelectedModel = ['o3-mini', 'Claude Opus 4', 'Claude Sonnet 4', 'Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'Grok 3', 'Gemini 2.5 Pro', 'UI-TARS 72B', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
751
+
752
+ # Agent mode configurations
753
+ agentMode = {
754
+ # OpenRouter Free
755
+ 'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
756
+ 'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
757
+ 'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
758
+ 'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
759
+ 'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
760
+ 'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
761
+ 'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
762
+ 'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
763
+ 'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
764
+ 'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
765
+ 'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
766
+ 'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
767
+ 'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
768
+ 'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
769
+ 'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
770
+ 'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
771
+ 'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
772
+ 'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
773
+ 'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
774
+ 'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
775
+ 'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
776
+ 'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
777
+ 'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
778
+ 'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
779
+ 'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"}, # Matches Mistral-Small-24B-Instruct-2501
780
+ 'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
781
+ 'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
782
+ 'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
783
+ 'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
784
+ 'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
785
+ 'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
786
+ 'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
787
+ 'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
788
+ 'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
789
+ 'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
790
+ 'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
791
+ 'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
792
+ 'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
793
+ 'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
794
+ 'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"}, # Matches DeepSeek-R1
795
+ 'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
796
+ 'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
797
+ 'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
798
+ # Default models from the new list
799
+ 'Claude Opus 4': {'mode': True, 'id': "anthropic/claude-opus-4", 'name': "Claude Opus 4"},
800
+ 'Claude Sonnet 4': {'mode': True, 'id': "anthropic/claude-sonnet-4", 'name': "Claude Sonnet 4"},
801
+ 'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
802
+ 'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
803
+ 'Grok 3': {'mode': True, 'id': "x-ai/grok-3-beta", 'name': "Grok 3"},
804
+ 'Gemini 2.5 Pro': {'mode': True, 'id': "google/gemini-2.5-pro-preview-03-25", 'name': "Gemini 2.5 Pro"},
805
+ 'UI-TARS 72B': {'mode': True, 'id': "bytedance-research/ui-tars-72b:free", 'name': "UI-TARS 72B"},
806
+ 'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"}, # This is 'R1' in openrouter, but 'DeepSeek-R1' in base models
807
+ 'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
808
+ # Add default_model if it's not covered and has an agent mode
809
+ default_model: {'mode': True, 'id': "openai/gpt-4.1", 'name': default_model}, # Assuming GPT-4.1 is agent-compatible
810
+ 'o3-mini': {'mode': True, 'id': "o3-mini", 'name': "o3-mini"}, # Assuming o3-mini is agent-compatible
811
+ 'gpt-4.1-nano': {'mode': True, 'id': "gpt-4.1-nano", 'name': "gpt-4.1-nano"}, # Assuming gpt-4.1-nano is agent-compatible
812
+ 'gpt-4.1-mini': {'mode': True, 'id': "gpt-4.1-mini", 'name': "gpt-4.1-mini"}, # Added agent mode for gpt-4.1-mini
813
+ }
814
+
815
+ # Trending agent modes
816
+ trendingAgentMode = {
817
+ 'Python Agent': {'mode': True, 'id': "python"},
818
+ 'HTML Agent': {'mode': True, 'id': "html"},
819
+ 'Builder Agent': {'mode': True, 'id': "builder"},
820
+ 'Java Agent': {'mode': True, 'id': "java"},
821
+ 'JavaScript Agent': {'mode': True, 'id': "javascript"},
822
+ 'React Agent': {'mode': True, 'id': "react"},
823
+ 'Android Agent': {'mode': True, 'id': "android"},
824
+ 'Flutter Agent': {'mode': True, 'id': "flutter"},
825
+ 'Next.js Agent': {'mode': True, 'id': "next.js"},
826
+ 'AngularJS Agent': {'mode': True, 'id': "angularjs"},
827
+ 'Swift Agent': {'mode': True, 'id': "swift"},
828
+ 'MongoDB Agent': {'mode': True, 'id': "mongodb"},
829
+ 'PyTorch Agent': {'mode': True, 'id': "pytorch"},
830
+ 'Xcode Agent': {'mode': True, 'id': "xcode"},
831
+ 'Azure Agent': {'mode': True, 'id': "azure"},
832
+ 'Bitbucket Agent': {'mode': True, 'id': "bitbucket"},
833
+ 'DigitalOcean Agent': {'mode': True, 'id': "digitalocean"},
834
+ 'Docker Agent': {'mode': True, 'id': "docker"},
835
+ 'Electron Agent': {'mode': True, 'id': "electron"},
836
+ 'Erlang Agent': {'mode': True, 'id': "erlang"},
837
+ 'FastAPI Agent': {'mode': True, 'id': "fastapi"},
838
+ 'Firebase Agent': {'mode': True, 'id': "firebase"},
839
+ 'Flask Agent': {'mode': True, 'id': "flask"},
840
+ 'Git Agent': {'mode': True, 'id': "git"},
841
+ 'Gitlab Agent': {'mode': True, 'id': "gitlab"},
842
+ 'Go Agent': {'mode': True, 'id': "go"},
843
+ 'Godot Agent': {'mode': True, 'id': "godot"},
844
+ 'Google Cloud Agent': {'mode': True, 'id': "googlecloud"},
845
+ 'Heroku Agent': {'mode': True, 'id': "heroku"},
846
+ }
847
+
848
+ # Create AVAILABLE_MODELS as a list with just the model aliases (no "BLACKBOXAI/" prefix)
849
+ AVAILABLE_MODELS = list(models)
850
+
851
+
852
+ def __init__(
853
+ self
854
+ ):
855
+ """
856
+ Initialize the BlackboxAI provider with OpenAI compatibility.
857
+ """
858
+ # Initialize session
859
+ self.session = requests.Session()
860
+ # Remove any proxy configuration to avoid connection issues
861
+ self.session.proxies = {}
862
+
863
+ # Set headers based on GitHub reference
864
+ self.headers = {
865
+ 'Accept': 'text/event-stream',
866
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
867
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
868
+ 'Content-Type': 'application/json',
869
+ 'DNT': '1',
870
+ 'Origin': 'https://www.blackbox.ai',
871
+ 'Referer': 'https://www.blackbox.ai/',
872
+ 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
873
+ 'Sec-CH-UA-Mobile': '?0',
874
+ 'Sec-CH-UA-Platform': '"Windows"',
875
+ 'Sec-Fetch-Dest': 'empty',
876
+ 'Sec-Fetch-Mode': 'cors',
877
+ 'Sec-Fetch-Site': 'same-origin',
878
+ 'User-Agent': agent.random(),
879
+ }
880
+
881
+ # Set cookies for the session
882
+ self.cookies = {
883
+ 'cfzs_amplitude': self.generate_id(32),
884
+ 'cfz_amplitude': self.generate_id(32),
885
+ '__cf_bm': self.generate_id(32),
886
+ }
887
+
888
+ # Initialize chat interface with completions
889
+ self.chat = Chat(self)
890
+
891
+ @property
892
+ def models(self):
893
+ class _ModelList:
894
+ def list(inner_self):
895
+ return type(self).AVAILABLE_MODELS
896
+ return _ModelList()
897
+
898
+
899
+ @classmethod
900
+ def get_model(cls, model: str) -> str:
901
+ """Return the model name, removing BLACKBOXAI/ prefix if present, or default_model."""
902
+ if model.startswith("BLACKBOXAI/"):
903
+ model = model[len("BLACKBOXAI/"):]
904
+ if model in cls.AVAILABLE_MODELS:
905
+ return model
906
+ return cls.default_model
907
+
908
+ @classmethod
909
+ def generate_random_string(cls, length: int = 8) -> str:
910
+ """Generate a random string of specified length."""
911
+ chars = string.ascii_lowercase + string.digits
912
+ return ''.join(random.choice(chars) for _ in range(length))
913
+
914
+ @classmethod
915
+ def generate_id(cls, length: int = 7) -> str:
916
+ """Generate a random ID of specified length."""
917
+ chars = string.ascii_letters + string.digits
918
+ return ''.join(random.choice(chars) for _ in range(length))
919
+
920
+ @classmethod
921
+ def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 30) -> dict:
922
+ """
923
+ Generate a dynamic session with proper ID and expiry format using a specific email.
924
+
925
+ Args:
926
+ email: The email to use for this session
927
+ id_length: Length of the numeric ID (default: 21)
928
+ days_ahead: Number of days ahead for expiry (default: 30)
929
+
930
+ Returns:
931
+ dict: A session dictionary with user information and expiry
932
+ """
933
+ # Generate a random name
934
+ first_names = ["Alex", "Jordan", "Taylor", "Morgan", "Casey", "Riley", "Avery", "Quinn", "Skyler", "Dakota"]
935
+ last_names = ["Smith", "Johnson", "Williams", "Brown", "Jones", "Miller", "Davis", "Garcia", "Rodriguez", "Wilson"]
936
+ name = f"{random.choice(first_names)} {random.choice(last_names)}"
937
+
938
+ # Generate numeric ID - using Google-like ID format
939
+ numeric_id = ''.join(random.choice('0123456789') for _ in range(id_length))
940
+
941
+ # Generate future expiry date
942
+ future_date = datetime.now() + timedelta(days=days_ahead)
943
+ expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
944
+
945
+ # Generate random image ID for the new URL format
946
+ chars = string.ascii_letters + string.digits + "-"
947
+ random_img_id = ''.join(random.choice(chars) for _ in range(48))
948
+ image_url = f"https://lh3.googleusercontent.com/a/ACg8oc{random_img_id}=s96-c"
949
+
950
+ return {
951
+ "user": {
952
+ "name": name,
953
+ "email": email,
954
+ "image": image_url,
955
+ "id": numeric_id
956
+ },
957
+ "expires": expiry,
958
+ "isNewUser": False
959
+ }
960
+
961
+ def create_request_payload(
962
+ self,
963
+ messages: List[Dict[str, Any]],
964
+ chat_id: str,
965
+ system_message: str,
966
+ max_tokens: int,
967
+ temperature: Optional[float] = None,
968
+ top_p: Optional[float] = None,
969
+ session_data: Dict[str, Any] = None,
970
+ model: str = None
971
+ ) -> Dict[str, Any]:
972
+ """Create the full request payload for the BlackboxAI API."""
973
+ # Get the correct model ID and agent mode
974
+ model_name = self.get_model(model or self.default_model)
975
+ agent_mode = self.agentMode.get(model_name, {})
976
+
977
+ # Generate a random customer ID for the subscription
978
+ customer_id = "cus_" + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(14))
979
+
980
+ # Create the full request payload
981
+ return {
982
+ "messages": messages,
983
+ "agentMode": agent_mode,
984
+ "id": chat_id,
985
+ "previewToken": None,
986
+ "userId": None,
987
+ "codeModelMode": True,
988
+ "trendingAgentMode": {},
989
+ "isMicMode": False,
990
+ "userSystemPrompt": system_message,
991
+ "maxTokens": max_tokens,
992
+ "playgroundTopP": top_p,
993
+ "playgroundTemperature": temperature,
994
+ "isChromeExt": False,
995
+ "githubToken": "",
996
+ "clickedAnswer2": False,
997
+ "clickedAnswer3": False,
998
+ "clickedForceWebSearch": False,
999
+ "visitFromDelta": False,
1000
+ "isMemoryEnabled": False,
1001
+ "mobileClient": False,
1002
+ "userSelectedModel": model_name if model_name in self.userSelectedModel else None,
1003
+ "validated": "00f37b34-a166-4efb-bce5-1312d87f2f94",
1004
+ "imageGenerationMode": False,
1005
+ "webSearchModePrompt": False,
1006
+ "deepSearchMode": False,
1007
+ "designerMode": False,
1008
+ "domains": None,
1009
+ "vscodeClient": False,
1010
+ "codeInterpreterMode": False,
1011
+ "customProfile": {
1012
+ "name": "",
1013
+ "occupation": "",
1014
+ "traits": [],
1015
+ "additionalInfo": "",
1016
+ "enableNewChats": False
1017
+ },
1018
+ "webSearchModeOption": {
1019
+ "autoMode": True,
1020
+ "webMode": False,
1021
+ "offlineMode": False
1022
+ },
1023
+ "session": session_data,
1024
+ "isPremium": True,
1025
+ "subscriptionCache": {
1026
+ "status": "PREMIUM",
1027
+ "customerId": customer_id,
1028
+ "expiryTimestamp": int((datetime.now() + timedelta(days=30)).timestamp()),
1029
+ "lastChecked": int(datetime.now().timestamp() * 1000),
1030
+ "isTrialSubscription": True
1031
+ },
1032
+ "beastMode": False,
1033
+ "reasoningMode": False,
1034
+ "designerMode": False,
1035
+ "workspaceId": ""
1036
+ }
1037
+ if __name__ == "__main__":
1038
+ # Example usage
1039
+ client = BLACKBOXAI()
1040
+ response = client.chat.completions.create(
1041
+ model="GPT-4.1",
1042
+ messages=[{"role": "user", "content": "Tell me about india in points"}],
1043
+ stream=True
1044
+ )
1045
+ for chunk in response:
1046
+ print(chunk.choices[0].delta.content, end='', flush=True)
1047
+ print()
1048
+ print("Proxies on instance:", client.proxies)
1049
+ print("Proxies on session:", client.session.proxies)