webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,20 +1,198 @@
1
+ # from pickle import NONE
2
+ import requests
1
3
  import requests
2
4
  import random
3
5
  import string
4
6
  import base64
5
7
  from datetime import datetime, timedelta
6
- from typing import List, Dict, Optional, Any
7
- import json
8
+ from typing import Generator, List, Dict, Optional, Any, Union
8
9
  import uuid
9
10
  import time
11
+ import codecs
12
+ import gzip
13
+ import zstandard as zstd
14
+ import brotli
15
+ import zlib
10
16
 
11
17
  # Import base classes and utility structures
12
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
- from .utils import (
18
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
19
+ from webscout.Provider.OPENAI.utils import (
14
20
  ChatCompletion, Choice,
15
- ChatCompletionMessage, CompletionUsage
21
+ ChatCompletionMessage, CompletionUsage, count_tokens,
22
+ ChatCompletionChunk # Added for streaming return type
16
23
  )
24
+ from webscout.litagent import LitAgent
25
+ agent = LitAgent()
17
26
 
27
+ class StreamingDecompressor:
28
+ """
29
+ A streaming decompressor that can handle partial compressed data in real-time.
30
+ This allows for true streaming decompression without buffering entire response.
31
+ """
32
+ def __init__(self, content_encoding: str):
33
+ self.encoding = content_encoding.lower().strip() if content_encoding else None
34
+ self.decompressor = None
35
+ self.text_decoder = codecs.getincrementaldecoder("utf-8")("replace")
36
+ self.zstd_buffer = b"" # Buffer for zstd incomplete frames
37
+
38
+ if self.encoding == 'gzip':
39
+ self.decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS) # gzip format
40
+ elif self.encoding == 'deflate':
41
+ self.decompressor = zlib.decompressobj() # deflate format
42
+ elif self.encoding == 'zstd':
43
+ self.decompressor = zstd.ZstdDecompressor()
44
+ elif self.encoding == 'br':
45
+ self.decompressor = brotli.Decompressor()
46
+
47
+ def decompress_chunk(self, chunk: bytes) -> str:
48
+ """
49
+ Decompress a chunk of data and return decoded text.
50
+ Handles partial compressed data properly for real-time streaming.
51
+ """
52
+ try:
53
+ if not chunk:
54
+ return ""
55
+
56
+ if not self.encoding or self.encoding not in ['gzip', 'deflate', 'zstd', 'br']:
57
+ # No compression or unsupported - decode directly
58
+ return self.text_decoder.decode(chunk, final=False)
59
+
60
+ if self.encoding in ['gzip', 'deflate']:
61
+ # Use zlib decompressor for gzip/deflate
62
+ decompressed_data = self.decompressor.decompress(chunk)
63
+ return self.text_decoder.decode(decompressed_data, final=False)
64
+
65
+ elif self.encoding == 'zstd':
66
+ # Zstandard streaming decompression with buffering for incomplete frames
67
+ self.zstd_buffer += chunk
68
+ try:
69
+ # Try to decompress the current buffer
70
+ decompressed_data = self.decompressor.decompress(self.zstd_buffer)
71
+ # If successful, clear the buffer and return decoded text
72
+ self.zstd_buffer = b""
73
+ return self.text_decoder.decode(decompressed_data, final=False)
74
+ except zstd.ZstdError:
75
+ # Frame is incomplete, keep buffering
76
+ # Try to decompress any complete frames from buffer start
77
+ try:
78
+ # Process buffer in chunks to find complete frames
79
+ buffer_len = len(self.zstd_buffer)
80
+ if buffer_len > 4: # Minimum zstd frame size
81
+ # Try smaller chunks of the buffer
82
+ for end_pos in range(4, buffer_len + 1):
83
+ try:
84
+ partial_data = self.decompressor.decompress(self.zstd_buffer[:end_pos])
85
+ # If we got here, we found a complete frame
86
+ self.zstd_buffer = self.zstd_buffer[end_pos:]
87
+ return self.text_decoder.decode(partial_data, final=False)
88
+ except zstd.ZstdError:
89
+ continue
90
+ except Exception:
91
+ pass
92
+ return ""
93
+
94
+ elif self.encoding == 'br':
95
+ # Brotli streaming decompression
96
+ try:
97
+ decompressed_data = self.decompressor.decompress(chunk)
98
+ return self.text_decoder.decode(decompressed_data, final=False)
99
+ except brotli.error:
100
+ # If brotli fails, it might need more data or be at end
101
+ return ""
102
+
103
+ except Exception as e:
104
+ # If decompression fails, try to decode the chunk as-is (fallback)
105
+ try:
106
+ return self.text_decoder.decode(chunk, final=False)
107
+ except UnicodeDecodeError:
108
+ return ""
109
+
110
+ def finalize(self) -> str:
111
+ """
112
+ Finalize the decompression and return any remaining decoded text.
113
+ """
114
+ try:
115
+ remaining_text = ""
116
+
117
+ if self.encoding in ['gzip', 'deflate'] and self.decompressor:
118
+ # Flush any remaining compressed data
119
+ remaining_data = self.decompressor.flush()
120
+ if remaining_data:
121
+ remaining_text = self.text_decoder.decode(remaining_data, final=True)
122
+ else:
123
+ remaining_text = self.text_decoder.decode(b"", final=True)
124
+ elif self.encoding == 'zstd':
125
+ # Process any remaining buffered data
126
+ if self.zstd_buffer:
127
+ try:
128
+ remaining_data = self.decompressor.decompress(self.zstd_buffer)
129
+ remaining_text = self.text_decoder.decode(remaining_data, final=True)
130
+ except:
131
+ # If buffered data can't be decompressed, finalize decoder
132
+ remaining_text = self.text_decoder.decode(b"", final=True)
133
+ else:
134
+ remaining_text = self.text_decoder.decode(b"", final=True)
135
+ else:
136
+ # Finalize the text decoder for other encodings
137
+ remaining_text = self.text_decoder.decode(b"", final=True)
138
+
139
+ return remaining_text
140
+ except Exception:
141
+ # Ensure we always finalize the text decoder
142
+ try:
143
+ return self.text_decoder.decode(b"", final=True)
144
+ except:
145
+ return ""
146
+
147
+ def decompress_response(response_content: bytes, content_encoding: str) -> str:
148
+ """
149
+ Decompress response content based on the Content-Encoding header.
150
+
151
+ Args:
152
+ response_content: The raw response content as bytes
153
+ content_encoding: The Content-Encoding header value
154
+
155
+ Returns:
156
+ str: The decompressed and decoded content as UTF-8 string
157
+
158
+ Raises:
159
+ IOError: If decompression fails
160
+ """
161
+ try:
162
+ if not content_encoding:
163
+ # No compression, decode directly
164
+ return response_content.decode('utf-8')
165
+
166
+ encoding = content_encoding.lower().strip()
167
+
168
+ if encoding == 'zstd':
169
+ # Decompress using zstandard
170
+ dctx = zstd.ZstdDecompressor()
171
+ decompressed_data = dctx.decompress(response_content)
172
+ return decompressed_data.decode('utf-8')
173
+
174
+ elif encoding == 'gzip':
175
+ # Decompress using gzip
176
+ decompressed_data = gzip.decompress(response_content)
177
+ return decompressed_data.decode('utf-8')
178
+
179
+ elif encoding == 'br':
180
+ # Decompress using brotli
181
+ decompressed_data = brotli.decompress(response_content)
182
+ return decompressed_data.decode('utf-8')
183
+
184
+ elif encoding == 'deflate':
185
+ # Decompress using zlib (deflate)
186
+ import zlib
187
+ decompressed_data = zlib.decompress(response_content)
188
+ return decompressed_data.decode('utf-8')
189
+
190
+ else:
191
+ # Unknown or unsupported encoding, try to decode as-is
192
+ return response_content.decode('utf-8')
193
+
194
+ except Exception as e:
195
+ raise IOError(f"Failed to decompress response with encoding '{content_encoding}': {str(e)}") from e
18
196
 
19
197
  def to_data_uri(image_data):
20
198
  """Convert image data to a data URI format"""
@@ -36,6 +214,19 @@ def to_data_uri(image_data):
36
214
 
37
215
  return f"data:{mime_type};base64,{encoded}"
38
216
 
217
+ def clean_text(text):
218
+ """Clean text by removing null bytes and control characters except newlines and tabs."""
219
+ import re
220
+ if not isinstance(text, str):
221
+ return text
222
+
223
+ # Remove null bytes
224
+ text = text.replace('\x00', '')
225
+
226
+ # Keep newlines, tabs, and other printable characters, remove other control chars
227
+ # This regex matches control characters except \n, \r, \t
228
+ return re.sub(r'[\x01-\x08\x0b\x0c\x0e-\x1f\x7f]', '', text)
229
+
39
230
 
40
231
  class Completions(BaseCompletions):
41
232
  def __init__(self, client: 'BLACKBOXAI'):
@@ -50,8 +241,10 @@ class Completions(BaseCompletions):
50
241
  stream: bool = False,
51
242
  temperature: Optional[float] = None,
52
243
  top_p: Optional[float] = None,
244
+ timeout: Optional[int] = None,
245
+ proxies: Optional[dict] = None,
53
246
  **kwargs: Any
54
- ) -> ChatCompletion:
247
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
55
248
  """
56
249
  Create a chat completion with BlackboxAI API.
57
250
 
@@ -59,18 +252,14 @@ class Completions(BaseCompletions):
59
252
  model: The model to use (from AVAILABLE_MODELS)
60
253
  messages: List of message dictionaries with 'role' and 'content'
61
254
  max_tokens: Maximum number of tokens to generate
62
- stream: If True, raises an error as streaming is not supported
255
+ stream: If True, yields streaming chunks
63
256
  temperature: Sampling temperature (0-1)
64
257
  top_p: Nucleus sampling parameter (0-1)
65
258
  **kwargs: Additional parameters to pass to the API
66
259
 
67
260
  Returns:
68
- Returns a ChatCompletion object
261
+ Returns a ChatCompletion object or a generator for streaming
69
262
  """
70
- # Check if streaming is requested and raise an error
71
- if stream:
72
- raise ValueError("Streaming is not supported by the BLACKBOXAI provider. Please use stream=False.")
73
-
74
263
  # Generate request ID and timestamp
75
264
  request_id = str(uuid.uuid4())
76
265
  created_time = int(time.time())
@@ -108,6 +297,22 @@ class Completions(BaseCompletions):
108
297
  except Exception as e:
109
298
  pass
110
299
 
300
+ # Check if streaming is requested and raise an error
301
+ if stream:
302
+ return self._create_streaming(
303
+ request_id=request_id,
304
+ created_time=created_time,
305
+ model=model,
306
+ messages=messages,
307
+ system_message=system_message,
308
+ max_tokens=max_tokens,
309
+ temperature=temperature,
310
+ top_p=top_p,
311
+ media=media,
312
+ timeout=timeout,
313
+ proxies=proxies
314
+ )
315
+
111
316
  # Use non-streaming implementation
112
317
  return self._create_non_streaming(
113
318
  request_id=request_id,
@@ -118,11 +323,12 @@ class Completions(BaseCompletions):
118
323
  max_tokens=max_tokens,
119
324
  temperature=temperature,
120
325
  top_p=top_p,
121
- media=media
326
+ media=media,
327
+ timeout=timeout,
328
+ proxies=proxies
122
329
  )
123
330
 
124
331
 
125
-
126
332
  def _create_non_streaming(
127
333
  self,
128
334
  *,
@@ -134,9 +340,14 @@ class Completions(BaseCompletions):
134
340
  max_tokens: Optional[int] = None,
135
341
  temperature: Optional[float] = None,
136
342
  top_p: Optional[float] = None,
137
- media: List = None
343
+ media: List = None,
344
+ timeout: Optional[int] = None,
345
+ proxies: Optional[dict] = None
138
346
  ) -> ChatCompletion:
139
347
  """Implementation for non-streaming chat completions."""
348
+ original_proxies = self._client.session.proxies
349
+ if proxies is not None:
350
+ self._client.session.proxies = proxies
140
351
  try:
141
352
  # Prepare user messages for BlackboxAI API format
142
353
  blackbox_messages = []
@@ -157,7 +368,7 @@ class Completions(BaseCompletions):
157
368
  "imagesData": [
158
369
  {
159
370
  "filePath": f"/",
160
- "contents": to_data_uri(image)
371
+ "contents": to_data_uri(image[0])
161
372
  } for image in media
162
373
  ],
163
374
  "fileText": "",
@@ -186,14 +397,23 @@ class Completions(BaseCompletions):
186
397
  json=payload,
187
398
  headers=self._client.headers,
188
399
  cookies=self._client.cookies,
189
- timeout=self._client.timeout
400
+ timeout=timeout if timeout is not None else self._client.timeout
190
401
  )
191
402
 
192
403
  # Process the response
193
404
  full_content = ""
194
405
  if response.status_code == 200:
195
- # Extract content from response text
196
- response_text = response.text
406
+ # Check for Content-Encoding header
407
+ content_encoding = response.headers.get('Content-Encoding')
408
+
409
+ # Decompress the response if needed
410
+ try:
411
+ response_text = decompress_response(response.content, content_encoding)
412
+ except IOError as e:
413
+ # If decompression fails, fall back to the original method
414
+ print(f"Warning: {e}. Falling back to original decoding method.")
415
+ decoder = codecs.getincrementaldecoder("utf-8")("replace")
416
+ response_text = decoder.decode(response.content, final=True)
197
417
 
198
418
  # Handle possible SSE format in response
199
419
  if "data: " in response_text:
@@ -216,10 +436,11 @@ class Completions(BaseCompletions):
216
436
  # Handle error response
217
437
  raise IOError(f"BlackboxAI request failed with status code {response.status_code}")
218
438
 
219
- # Create the completion message
439
+ # Clean and create the completion message
440
+ cleaned_content = clean_text(full_content)
220
441
  message = ChatCompletionMessage(
221
442
  role="assistant",
222
- content=full_content
443
+ content=cleaned_content
223
444
  )
224
445
 
225
446
  # Create the choice with the message
@@ -229,9 +450,9 @@ class Completions(BaseCompletions):
229
450
  finish_reason="stop"
230
451
  )
231
452
 
232
- # Estimate token usage
233
- prompt_tokens = sum(len(str(msg.get("content", ""))) // 4 for msg in messages)
234
- completion_tokens = len(full_content) // 4
453
+ # Estimate token usage using count_tokens
454
+ prompt_tokens = count_tokens([str(msg.get("content", "")) for msg in messages])
455
+ completion_tokens = count_tokens(cleaned_content)
235
456
 
236
457
  # Create the final completion object
237
458
  completion = ChatCompletion(
@@ -250,6 +471,148 @@ class Completions(BaseCompletions):
250
471
 
251
472
  except Exception as e:
252
473
  raise IOError(f"BlackboxAI request failed: {str(e)}") from e
474
+ finally:
475
+ if proxies is not None:
476
+ self._client.session.proxies = original_proxies
477
+
478
+ def _create_streaming(
479
+ self,
480
+ *,
481
+ request_id: str,
482
+ created_time: int,
483
+ model: str,
484
+ messages: List[Dict[str, Any]],
485
+ system_message: str,
486
+ max_tokens: Optional[int] = None,
487
+ temperature: Optional[float] = None,
488
+ top_p: Optional[float] = None,
489
+ media: List = None,
490
+ timeout: Optional[int] = None,
491
+ proxies: Optional[dict] = None
492
+ ):
493
+ """Implementation for streaming chat completions (OpenAI-compatible chunks)."""
494
+ original_proxies = self._client.session.proxies
495
+ if proxies is not None:
496
+ self._client.session.proxies = proxies
497
+ try:
498
+ # Prepare user messages for BlackboxAI API format
499
+ blackbox_messages = []
500
+ for i, msg in enumerate(messages):
501
+ if msg["role"] == "system":
502
+ continue # System message handled separately
503
+ msg_id = self._client.generate_id() if i > 0 else request_id
504
+ blackbox_messages.append({
505
+ "id": msg_id,
506
+ "content": msg["content"],
507
+ "role": msg["role"]
508
+ })
509
+ # Add image data if provided
510
+ if media and blackbox_messages:
511
+ blackbox_messages[-1]['data'] = {
512
+ "imagesData": [
513
+ {
514
+ "filePath": f"/",
515
+ "contents": to_data_uri(image[0])
516
+ } for image in media
517
+ ],
518
+ "fileText": "",
519
+ "title": ""
520
+ }
521
+ # Generate request payload with session
522
+ request_email = f"{self._client.generate_random_string(8)}@blackbox.ai"
523
+ session_data = self._client.generate_session(request_email)
524
+ payload = self._client.create_request_payload(
525
+ messages=blackbox_messages,
526
+ chat_id=request_id,
527
+ system_message=system_message,
528
+ max_tokens=max_tokens,
529
+ temperature=temperature,
530
+ top_p=top_p,
531
+ session_data=session_data,
532
+ model=model
533
+ )
534
+ # Make the API request with cookies, stream=True
535
+ response = self._client.session.post(
536
+ self._client.api_endpoint,
537
+ json=payload,
538
+ headers=self._client.headers,
539
+ cookies=self._client.cookies,
540
+ stream=True,
541
+ timeout=timeout if timeout is not None else self._client.timeout
542
+ )
543
+ # Blackbox streams as raw text, no line breaks, so chunk manually
544
+ import codecs
545
+ chunk_size = 32 # Tune as needed for smoothness
546
+ from webscout.Provider.OPENAI.utils import ChatCompletionChunk, Choice, ChoiceDelta
547
+
548
+ # Check if the response is compressed and create appropriate decompressor
549
+ content_encoding = response.headers.get('Content-Encoding')
550
+ streaming_decompressor = StreamingDecompressor(content_encoding)
551
+
552
+ # Stream with real-time decompression
553
+ for chunk in response.iter_content(chunk_size=chunk_size):
554
+ if not chunk:
555
+ continue
556
+
557
+ # Decompress chunk in real-time
558
+ text = streaming_decompressor.decompress_chunk(chunk)
559
+
560
+ if text:
561
+ cleaned_chunk = clean_text(text)
562
+ if cleaned_chunk.strip():
563
+ delta = ChoiceDelta(content=cleaned_chunk, role="assistant")
564
+ choice = Choice(index=0, delta=delta, finish_reason=None)
565
+ chunk_obj = ChatCompletionChunk(
566
+ id=request_id,
567
+ choices=[choice],
568
+ created=created_time,
569
+ model=model,
570
+ system_fingerprint=None
571
+ )
572
+ yield chunk_obj
573
+
574
+ # Finalize decompression and get any remaining text
575
+ final_text = streaming_decompressor.finalize()
576
+ if final_text.strip():
577
+ cleaned_final = clean_text(final_text)
578
+ delta = ChoiceDelta(content=cleaned_final, role="assistant")
579
+ choice = Choice(index=0, delta=delta, finish_reason=None)
580
+ chunk_obj = ChatCompletionChunk(
581
+ id=request_id,
582
+ choices=[choice],
583
+ created=created_time,
584
+ model=model,
585
+ system_fingerprint=None
586
+ )
587
+ yield chunk_obj
588
+
589
+ # Send final chunk with finish_reason="stop"
590
+ delta = ChoiceDelta(content="", role="assistant")
591
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
592
+ final_chunk = ChatCompletionChunk(
593
+ id=request_id,
594
+ choices=[choice],
595
+ created=created_time,
596
+ model=model,
597
+ system_fingerprint=None
598
+ )
599
+ yield final_chunk
600
+
601
+ except Exception as e:
602
+ # Handle errors gracefully by yielding an error chunk
603
+ error_delta = ChoiceDelta(content=f"Error: {str(e)}", role="assistant")
604
+ error_choice = Choice(index=0, delta=error_delta, finish_reason="stop")
605
+ error_chunk = ChatCompletionChunk(
606
+ id=request_id,
607
+ choices=[error_choice],
608
+ created=created_time,
609
+ model=model,
610
+ system_fingerprint=None
611
+ )
612
+ yield error_chunk
613
+ finally:
614
+ if proxies is not None:
615
+ self._client.session.proxies = original_proxies
253
616
 
254
617
 
255
618
  class Chat(BaseChat):
@@ -273,7 +636,7 @@ class BLACKBOXAI(OpenAICompatibleProvider):
273
636
  default_model = "GPT-4.1"
274
637
  default_vision_model = default_model
275
638
  api_endpoint = "https://www.blackbox.ai/api/chat"
276
- timeout = 30
639
+ timeout = None
277
640
 
278
641
 
279
642
  # Default model (remains the same as per original class)
@@ -329,11 +692,17 @@ class BLACKBOXAI(OpenAICompatibleProvider):
329
692
 
330
693
  # New base models list
331
694
  models = [
695
+ "gpt-4.1-mini", # Added new model
332
696
  default_model,
333
697
  "o3-mini",
334
698
  "gpt-4.1-nano",
699
+ "Claude Opus 4", # Added Claude Opus 4
700
+ "Claude Sonnet 4", # Added Claude Sonnet 4
335
701
  "Claude-sonnet-3.7",
336
702
  "Claude-sonnet-3.5",
703
+ "Grok 3", # Added Grok 3
704
+ "Gemini 2.5 Pro", # Added Gemini 2.5 Pro
705
+ "UI-TARS 72B", # Added UI-TARS 72B
337
706
  "DeepSeek-R1",
338
707
  "Mistral-Small-24B-Instruct-2501",
339
708
  *openrouter_models,
@@ -347,10 +716,10 @@ class BLACKBOXAI(OpenAICompatibleProvider):
347
716
  ]
348
717
 
349
718
  # Models that support vision capabilities
350
- vision_models = [default_vision_model, 'o3-mini', "Llama 3.2 11B Vision Instruct"] # Added Llama vision
719
+ vision_models = [default_vision_model, 'o3-mini', "Llama 3.2 11B Vision Instruct", "Gemini 2.5 Pro", "Claude Sonnet 4", "Claude Opus 4", "UI-TARS 72B"] # Added Llama vision, Gemini 2.5 Pro, Claude Sonnet 4, Claude Opus 4, and UI-TARS 72B
351
720
 
352
721
  # Models that can be directly selected by users
353
- userSelectedModel = ['o3-mini','Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
722
+ userSelectedModel = ['o3-mini', 'Claude Opus 4', 'Claude Sonnet 4', 'Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'Grok 3', 'Gemini 2.5 Pro', 'UI-TARS 72B', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
354
723
 
355
724
  # Agent mode configurations
356
725
  agentMode = {
@@ -399,14 +768,20 @@ class BLACKBOXAI(OpenAICompatibleProvider):
399
768
  'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
400
769
  'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
401
770
  # Default models from the new list
771
+ 'Claude Opus 4': {'mode': True, 'id': "anthropic/claude-opus-4", 'name': "Claude Opus 4"},
772
+ 'Claude Sonnet 4': {'mode': True, 'id': "anthropic/claude-sonnet-4", 'name': "Claude Sonnet 4"},
402
773
  'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
403
774
  'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
775
+ 'Grok 3': {'mode': True, 'id': "x-ai/grok-3-beta", 'name': "Grok 3"},
776
+ 'Gemini 2.5 Pro': {'mode': True, 'id': "google/gemini-2.5-pro-preview-03-25", 'name': "Gemini 2.5 Pro"},
777
+ 'UI-TARS 72B': {'mode': True, 'id': "bytedance-research/ui-tars-72b:free", 'name': "UI-TARS 72B"},
404
778
  'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"}, # This is 'R1' in openrouter, but 'DeepSeek-R1' in base models
405
779
  'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
406
780
  # Add default_model if it's not covered and has an agent mode
407
781
  default_model: {'mode': True, 'id': "openai/gpt-4.1", 'name': default_model}, # Assuming GPT-4.1 is agent-compatible
408
782
  'o3-mini': {'mode': True, 'id': "o3-mini", 'name': "o3-mini"}, # Assuming o3-mini is agent-compatible
409
783
  'gpt-4.1-nano': {'mode': True, 'id': "gpt-4.1-nano", 'name': "gpt-4.1-nano"}, # Assuming gpt-4.1-nano is agent-compatible
784
+ 'gpt-4.1-mini': {'mode': True, 'id': "gpt-4.1-mini", 'name': "gpt-4.1-mini"}, # Added agent mode for gpt-4.1-mini
410
785
  }
411
786
 
412
787
  # Trending agent modes
@@ -442,101 +817,19 @@ class BLACKBOXAI(OpenAICompatibleProvider):
442
817
  'Heroku Agent': {'mode': True, 'id': "heroku"},
443
818
  }
444
819
 
445
- # Create a list of all model aliases
446
- _all_model_aliases = list(dict.fromkeys([
447
- # Add all model aliases
448
- "gpt-4", "gpt-4.1", "gpt-4o", "gpt-4o-mini",
449
- "claude-3.7-sonnet", "claude-3.5-sonnet",
450
- "deepcoder-14b", "deephermes-3-8b", "deepseek-r1-zero", "deepseek-r1",
451
- "dolphin-3.0-24b", "dolphin-3.0-r1-24b", "reka-flash", "gemini-2.0-flash",
452
- "gemma-2-9b", "gemma-3-12b", "gemma-3-1b", "gemma-3-27b", "gemma-3-4b",
453
- "kimi-vl-a3b-thinking", "llama-3.1-8b", "nemotron-253b", "llama-3.2-11b",
454
- "llama-3.2-1b", "llama-3.2-3b", "llama-3.3-70b", "nemotron-49b",
455
- "llama-4-maverick", "llama-4-scout", "mistral-7b", "mistral-nemo",
456
- "mistral-small-24b", "mistral-small-24b-instruct-2501", "mistral-small-3.1-24b",
457
- "molmo-7b", "moonlight-16b", "qwen-2.5-72b", "qwen-2.5-7b", "qwen-2.5-coder-32b",
458
- "qwen-2.5-vl-32b", "qwen-2.5-vl-3b", "qwen-2.5-vl-72b", "qwen-2.5-vl-7b",
459
- "qwerky-72b", "qwq-32b", "qwq-32b-preview", "qwq-32b-arliai",
460
- "deepseek-r1-distill-llama-70b", "deepseek-r1-distill-qwen-14b", "deepseek-r1-distill-qwen-32b",
461
- # Add base models
462
- "o3-mini", "gpt-4.1-nano"
463
- ]))
464
-
465
- # Create AVAILABLE_MODELS as a list with the format "BLACKBOXAI/model"
466
- AVAILABLE_MODELS = [f"BLACKBOXAI/{name}" for name in _all_model_aliases]
467
-
468
- # Create a mapping dictionary for internal use
469
- _model_mapping = {name: f"BLACKBOXAI/{name}" for name in _all_model_aliases}
470
-
471
-
472
- # Model aliases for easier reference
473
- model_aliases = {
474
- "gpt-4": default_model, # default_model is "GPT-4.1"
475
- "gpt-4.1": default_model,
476
- "gpt-4o": default_model, # Defaulting to GPT-4.1 as per previous logic if specific GPT-4o handling isn't defined elsewhere
477
- "gpt-4o-mini": default_model, # Defaulting
478
- "claude-3.7-sonnet": "Claude-sonnet-3.7",
479
- "claude-3.5-sonnet": "Claude-sonnet-3.5",
480
- # "deepseek-r1": "DeepSeek-R1", # This is in base models, maps to R1 or DeepSeek R1 Zero in agentMode
481
- #
482
- "deepcoder-14b": "Deepcoder 14B Preview",
483
- "deephermes-3-8b": "DeepHermes 3 Llama 3 8B Preview",
484
- "deepseek-r1-zero": "DeepSeek R1 Zero",
485
- "deepseek-r1": "R1", # Alias for R1 (which is deepseek/deepseek-r1:free)
486
- "dolphin-3.0-24b": "Dolphin3.0 Mistral 24B",
487
- "dolphin-3.0-r1-24b": "Dolphin3.0 R1 Mistral 24B",
488
- "reka-flash": "Flash 3",
489
- "gemini-2.0-flash": "Gemini 2.0 Flash Experimental",
490
- "gemma-2-9b": "Gemma 2 9B",
491
- "gemma-3-12b": "Gemma 3 12B",
492
- "gemma-3-1b": "Gemma 3 1B",
493
- "gemma-3-27b": "Gemma 3 27B",
494
- "gemma-3-4b": "Gemma 3 4B",
495
- "kimi-vl-a3b-thinking": "Kimi VL A3B Thinking",
496
- "llama-3.1-8b": "Llama 3.1 8B Instruct",
497
- "nemotron-253b": "Llama 3.1 Nemotron Ultra 253B v1",
498
- "llama-3.2-11b": "Llama 3.2 11B Vision Instruct",
499
- "llama-3.2-1b": "Llama 3.2 1B Instruct",
500
- "llama-3.2-3b": "Llama 3.2 3B Instruct",
501
- "llama-3.3-70b": "Llama 3.3 70B Instruct",
502
- "nemotron-49b": "Llama 3.3 Nemotron Super 49B v1",
503
- "llama-4-maverick": "Llama 4 Maverick",
504
- "llama-4-scout": "Llama 4 Scout",
505
- "mistral-7b": "Mistral 7B Instruct",
506
- "mistral-nemo": "Mistral Nemo",
507
- "mistral-small-24b": "Mistral Small 3", # Alias for "Mistral Small 3"
508
- "mistral-small-24b-instruct-2501": "Mistral-Small-24B-Instruct-2501", # Specific name
509
- "mistral-small-3.1-24b": "Mistral Small 3.1 24B",
510
- "molmo-7b": "Molmo 7B D",
511
- "moonlight-16b": "Moonlight 16B A3B Instruct",
512
- "qwen-2.5-72b": "Qwen2.5 72B Instruct",
513
- "qwen-2.5-7b": "Qwen2.5 7B Instruct",
514
- "qwen-2.5-coder-32b": "Qwen2.5 Coder 32B Instruct",
515
- "qwen-2.5-vl-32b": "Qwen2.5 VL 32B Instruct",
516
- "qwen-2.5-vl-3b": "Qwen2.5 VL 3B Instruct",
517
- "qwen-2.5-vl-72b": "Qwen2.5 VL 72B Instruct",
518
- "qwen-2.5-vl-7b": "Qwen2.5-VL 7B Instruct",
519
- "qwerky-72b": "Qwerky 72B",
520
- "qwq-32b": "QwQ 32B",
521
- "qwq-32b-preview": "QwQ 32B Preview",
522
- "qwq-32b-arliai": "QwQ 32B RpR v1",
523
- "deepseek-r1-distill-llama-70b": "R1 Distill Llama 70B",
524
- "deepseek-r1-distill-qwen-14b": "R1 Distill Qwen 14B",
525
- "deepseek-r1-distill-qwen-32b": "R1 Distill Qwen 32B",
526
- }
820
+ # Create AVAILABLE_MODELS as a list with just the model aliases (no "BLACKBOXAI/" prefix)
821
+ AVAILABLE_MODELS = list(models)
822
+
527
823
 
528
824
  def __init__(
529
- self,
530
- proxies: dict = {}
825
+ self
531
826
  ):
532
827
  """
533
828
  Initialize the BlackboxAI provider with OpenAI compatibility.
534
-
535
- Args:
536
- proxies: Optional proxy configuration
537
829
  """
538
830
  # Initialize session
539
831
  self.session = requests.Session()
832
+ self.session.proxies = {}
540
833
 
541
834
  # Set headers based on GitHub reference
542
835
  self.headers = {
@@ -553,7 +846,7 @@ class BLACKBOXAI(OpenAICompatibleProvider):
553
846
  'Sec-Fetch-Dest': 'empty',
554
847
  'Sec-Fetch-Mode': 'cors',
555
848
  'Sec-Fetch-Site': 'same-origin',
556
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
849
+ 'User-Agent': agent.random(),
557
850
  }
558
851
 
559
852
  # Set cookies for the session
@@ -563,9 +856,6 @@ class BLACKBOXAI(OpenAICompatibleProvider):
563
856
  '__cf_bm': self.generate_id(32),
564
857
  }
565
858
 
566
- # Set proxies if provided
567
- self.session.proxies = proxies
568
-
569
859
  # Initialize chat interface with completions
570
860
  self.chat = Chat(self)
571
861
 
@@ -579,29 +869,11 @@ class BLACKBOXAI(OpenAICompatibleProvider):
579
869
 
580
870
  @classmethod
581
871
  def get_model(cls, model: str) -> str:
582
- """Resolve model name from alias."""
583
- # Remove BLACKBOXAI/ prefix if present
872
+ """Return the model name, removing BLACKBOXAI/ prefix if present, or default_model."""
584
873
  if model.startswith("BLACKBOXAI/"):
585
874
  model = model[len("BLACKBOXAI/"):]
586
-
587
- # Convert to lowercase for case-insensitive matching
588
- model_lower = model.lower()
589
-
590
- # Check aliases (case-insensitive)
591
- for alias, target in cls.model_aliases.items():
592
- if model_lower == alias.lower():
593
- return target
594
-
595
- # If the model is directly in available models (without the prefix), return it
596
- for available_model in cls._all_model_aliases:
597
- if model_lower == available_model.lower():
598
- # Find the corresponding model in model_aliases or use the model directly
599
- for alias, target in cls.model_aliases.items():
600
- if available_model.lower() == alias.lower():
601
- return target
602
- return available_model
603
-
604
- # If we get here, use the default model
875
+ if model in cls.AVAILABLE_MODELS:
876
+ return model
605
877
  return cls.default_model
606
878
 
607
879
  @classmethod
@@ -733,3 +1005,13 @@ class BLACKBOXAI(OpenAICompatibleProvider):
733
1005
  "designerMode": False,
734
1006
  "workspaceId": ""
735
1007
  }
1008
+ if __name__ == "__main__":
1009
+ # Example usage
1010
+ client = BLACKBOXAI()
1011
+ response = client.chat.completions.create(
1012
+ model="GPT-4.1",
1013
+ messages=[{"role": "user", "content": "Tell me about india in points"}],
1014
+ stream=True
1015
+ )
1016
+ for chunk in response:
1017
+ print(chunk.choices[0].delta.content, end='', flush=True)