webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -37,6 +37,8 @@ class Completions(BaseCompletions):
37
37
  top_p: Optional[float] = None, # Note: LLMChatCo doesn't seem to use top_p directly in payload
38
38
  web_search: bool = False, # LLMChatCo specific parameter
39
39
  system_prompt: Optional[str] = "You are a helpful assistant.", # Default system prompt if not provided
40
+ timeout: Optional[int] = None,
41
+ proxies: Optional[Dict[str, str]] = None,
40
42
  **kwargs: Any
41
43
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
44
  """
@@ -88,12 +90,12 @@ class Completions(BaseCompletions):
88
90
  created_time = int(time.time())
89
91
 
90
92
  if stream:
91
- return self._create_stream(request_id, created_time, actual_model, payload)
93
+ return self._create_stream(request_id, created_time, actual_model, payload, timeout, proxies)
92
94
  else:
93
- return self._create_non_stream(request_id, created_time, actual_model, payload)
95
+ return self._create_non_stream(request_id, created_time, actual_model, payload, timeout, proxies)
94
96
 
95
97
  def _create_stream(
96
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
98
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
97
99
  ) -> Generator[ChatCompletionChunk, None, None]:
98
100
  try:
99
101
  response = self._client.session.post(
@@ -101,7 +103,8 @@ class Completions(BaseCompletions):
101
103
  headers=self._client.headers,
102
104
  json=payload,
103
105
  stream=True,
104
- timeout=self._client.timeout
106
+ timeout=timeout or self._client.timeout,
107
+ proxies=proxies or getattr(self._client, "proxies", None)
105
108
  )
106
109
 
107
110
  if not response.ok:
@@ -197,14 +200,14 @@ class Completions(BaseCompletions):
197
200
 
198
201
 
199
202
  def _create_non_stream(
200
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
203
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
201
204
  ) -> ChatCompletion:
202
205
  # Non-streaming requires accumulating stream chunks
203
206
  full_response_content = ""
204
207
  finish_reason = "stop" # Assume stop unless error occurs
205
208
 
206
209
  try:
207
- stream_generator = self._create_stream(request_id, created_time, model, payload)
210
+ stream_generator = self._create_stream(request_id, created_time, model, payload, timeout, proxies)
208
211
  for chunk in stream_generator:
209
212
  if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
210
213
  full_response_content += chunk.choices[0].delta.content
@@ -332,4 +335,3 @@ class LLMChatCo(OpenAICompatibleProvider):
332
335
  def list(inner_self):
333
336
  return type(self).AVAILABLE_MODELS
334
337
  return _ModelList()
335
-
@@ -43,6 +43,8 @@ class Completions(BaseCompletions):
43
43
  stream: bool = False,
44
44
  temperature: Optional[float] = None,
45
45
  top_p: Optional[float] = None,
46
+ timeout: Optional[int] = None,
47
+ proxies: Optional[Dict[str, str]] = None,
46
48
  **kwargs: Any
47
49
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
48
50
  """
@@ -75,12 +77,12 @@ class Completions(BaseCompletions):
75
77
  created_time = int(time.time())
76
78
 
77
79
  if stream:
78
- return self._create_stream(request_id, created_time, model, payload)
80
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
79
81
  else:
80
- return self._create_non_stream_from_stream(request_id, created_time, model, payload)
82
+ return self._create_non_stream_from_stream(request_id, created_time, model, payload, timeout, proxies)
81
83
 
82
84
  def _create_stream(
83
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
85
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
84
86
  ) -> Generator[ChatCompletionChunk, None, None]:
85
87
  """Handles the streaming response from MCPCore."""
86
88
  final_usage_data = None # To store usage if received
@@ -90,7 +92,8 @@ class Completions(BaseCompletions):
90
92
  headers=self._client.headers,
91
93
  json=payload,
92
94
  stream=True,
93
- timeout=self._client.timeout,
95
+ timeout=timeout or self._client.timeout,
96
+ proxies=proxies or getattr(self._client, "proxies", None),
94
97
  impersonate="chrome110" # Impersonation often helps
95
98
  )
96
99
 
@@ -167,9 +170,15 @@ class Completions(BaseCompletions):
167
170
  # system_fingerprint=..., # Can be added if available in final event
168
171
  )
169
172
  # Add usage to the final chunk dictionary representation if available
170
- final_chunk_dict = final_chunk.to_dict()
173
+ if hasattr(final_chunk, "model_dump"):
174
+ final_chunk_dict = final_chunk.model_dump(exclude_none=True)
175
+ else:
176
+ final_chunk_dict = final_chunk.dict(exclude_none=True)
171
177
  if usage_obj:
172
- final_chunk_dict["usage"] = usage_obj.to_dict()
178
+ if hasattr(usage_obj, "model_dump"):
179
+ final_chunk_dict["usage"] = usage_obj.model_dump(exclude_none=True)
180
+ else:
181
+ final_chunk_dict["usage"] = usage_obj.dict(exclude_none=True)
173
182
 
174
183
  # Yield the final dictionary or object as needed by downstream consumers
175
184
  # Yielding the object aligns better with the generator type hint
@@ -187,7 +196,7 @@ class Completions(BaseCompletions):
187
196
  raise IOError(f"MCPCore stream processing failed: {e}{error_details}") from e
188
197
 
189
198
  def _create_non_stream_from_stream(
190
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
199
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
191
200
  ) -> ChatCompletion:
192
201
  """Handles the non-streaming response by making a single POST request (like deepinfra)."""
193
202
  try:
@@ -199,7 +208,8 @@ class Completions(BaseCompletions):
199
208
  self._client.api_endpoint,
200
209
  headers=self._client.headers,
201
210
  json=payload,
202
- timeout=self._client.timeout,
211
+ timeout=timeout or self._client.timeout,
212
+ proxies=proxies or getattr(self._client, "proxies", None),
203
213
  impersonate="chrome110"
204
214
  )
205
215
  if not response.ok:
@@ -380,4 +390,3 @@ class MCPCore(OpenAICompatibleProvider):
380
390
  def list(inner_self):
381
391
  return type(self).AVAILABLE_MODELS
382
392
  return _ModelList()
383
-
@@ -9,7 +9,7 @@ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
9
  from .utils import (
10
10
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
11
  ChatCompletionMessage, CompletionUsage,
12
- format_prompt
12
+ format_prompt, count_tokens
13
13
  )
14
14
 
15
15
  # Import curl_cffi for Cloudflare bypass
@@ -94,6 +94,8 @@ class Completions(BaseCompletions):
94
94
  stream: bool = False,
95
95
  temperature: Optional[float] = None,
96
96
  top_p: Optional[float] = None,
97
+ timeout: Optional[int] = None,
98
+ proxies: Optional[Dict[str, str]] = None,
97
99
  **kwargs: Any
98
100
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
99
101
  """
@@ -132,7 +134,7 @@ class Completions(BaseCompletions):
132
134
  created_time = int(time.time())
133
135
 
134
136
  # Make the API request
135
- response_text = self._client._make_api_request(user_message)
137
+ response_text = self._client._make_api_request(user_message, timeout=timeout, proxies=proxies)
136
138
 
137
139
  # If streaming is requested, simulate streaming with the full response
138
140
  if stream:
@@ -154,9 +156,9 @@ class Completions(BaseCompletions):
154
156
  message = ChatCompletionMessage(role="assistant", content=response_text)
155
157
  choice = Choice(index=0, message=message, finish_reason="stop")
156
158
 
157
- # Estimate token usage (this is approximate)
158
- prompt_tokens = len(user_message) // 4 # Rough estimate
159
- completion_tokens = len(response_text) // 4 # Rough estimate
159
+ # Estimate token usage using count_tokens
160
+ prompt_tokens = count_tokens(user_message)
161
+ completion_tokens = count_tokens(response_text)
160
162
  total_tokens = prompt_tokens + completion_tokens
161
163
 
162
164
  usage = CompletionUsage(
@@ -15,7 +15,8 @@ from .utils import (
15
15
  ChoiceDelta,
16
16
  CompletionUsage,
17
17
  format_prompt,
18
- get_system_prompt
18
+ get_system_prompt,
19
+ count_tokens
19
20
  )
20
21
 
21
22
  # ANSI escape codes for formatting
@@ -36,6 +37,8 @@ class Completions(BaseCompletions):
36
37
  stream: bool = False,
37
38
  temperature: Optional[float] = None,
38
39
  top_p: Optional[float] = None,
40
+ timeout: Optional[int] = None,
41
+ proxies: Optional[Dict[str, str]] = None,
39
42
  **kwargs: Any
40
43
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
41
44
  """
@@ -60,19 +63,20 @@ class Completions(BaseCompletions):
60
63
  created_time = int(time.time())
61
64
 
62
65
  if stream:
63
- return self._create_stream(request_id, created_time, model, payload)
66
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
64
67
  else:
65
- return self._create_non_stream(request_id, created_time, model, payload)
68
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
66
69
 
67
70
  def _create_stream(
68
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
71
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
69
72
  ) -> Generator[ChatCompletionChunk, None, None]:
70
73
  try:
71
74
  response = self._client.session.post(
72
75
  "https://netwrck.com/api/chatpred_or",
73
76
  json=payload,
74
77
  headers=self._client.headers,
75
- timeout=self._client.timeout,
78
+ timeout=timeout or self._client.timeout,
79
+ proxies=proxies or getattr(self._client, "proxies", None),
76
80
  stream=True
77
81
  )
78
82
  response.raise_for_status()
@@ -91,7 +95,7 @@ class Completions(BaseCompletions):
91
95
  # Format the decoded line using the client's formatter
92
96
  formatted_content = self._client.format_text(decoded_line)
93
97
  streaming_text += formatted_content
94
- completion_tokens += len(formatted_content) // 4 # Rough estimate
98
+ completion_tokens += count_tokens(formatted_content)
95
99
 
96
100
  # Create a delta object for this chunk
97
101
  delta = ChoiceDelta(content=formatted_content)
@@ -126,14 +130,15 @@ class Completions(BaseCompletions):
126
130
  raise IOError(f"Netwrck request failed: {e}") from e
127
131
 
128
132
  def _create_non_stream(
129
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
133
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
130
134
  ) -> ChatCompletion:
131
135
  try:
132
136
  response = self._client.session.post(
133
137
  "https://netwrck.com/api/chatpred_or",
134
138
  json=payload,
135
139
  headers=self._client.headers,
136
- timeout=self._client.timeout
140
+ timeout=timeout or self._client.timeout,
141
+ proxies=proxies or getattr(self._client, "proxies", None)
137
142
  )
138
143
  response.raise_for_status()
139
144
 
@@ -142,9 +147,9 @@ class Completions(BaseCompletions):
142
147
  # Format the full response using the client's formatter
143
148
  full_response = self._client.format_text(raw_response)
144
149
 
145
- # Create usage statistics (estimated)
146
- prompt_tokens = len(payload["query"]) // 4
147
- completion_tokens = len(full_response) // 4
150
+ # Create usage statistics using count_tokens
151
+ prompt_tokens = count_tokens(payload.get("query", ""))
152
+ completion_tokens = count_tokens(full_response)
148
153
  total_tokens = prompt_tokens + completion_tokens
149
154
 
150
155
  usage = CompletionUsage(
@@ -0,0 +1,290 @@
1
+ import random
2
+ import secrets
3
+ import requests
4
+ import json
5
+ import time
6
+ import uuid
7
+ import string
8
+ from typing import List, Dict, Optional, Union, Generator, Any
9
+
10
+ # Import base classes and utility structures
11
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
12
+ from webscout.Provider.OPENAI.utils import (
13
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
14
+ ChatCompletionMessage, CompletionUsage
15
+ )
16
+
17
+ # --- oivscode Client ---
18
+
19
+ class Completions(BaseCompletions):
20
+ def __init__(self, client: 'oivscode'):
21
+ self._client = client
22
+
23
+ def create(
24
+ self,
25
+ *,
26
+ model: str,
27
+ messages: List[Dict[str, str]],
28
+ max_tokens: Optional[int] = 2049,
29
+ stream: bool = False,
30
+ temperature: Optional[float] = None,
31
+ top_p: Optional[float] = None,
32
+ timeout: Optional[int] = None,
33
+ proxies: Optional[Dict[str, str]] = None,
34
+ **kwargs: Any
35
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
36
+ """
37
+ Creates a model response for the given chat conversation.
38
+ Mimics openai.chat.completions.create
39
+ """
40
+ payload = {
41
+ "model": model,
42
+ "messages": messages,
43
+ "max_tokens": max_tokens,
44
+ "stream": stream,
45
+ }
46
+ if temperature is not None:
47
+ payload["temperature"] = temperature
48
+ if top_p is not None:
49
+ payload["top_p"] = top_p
50
+
51
+ payload.update(kwargs)
52
+
53
+ request_id = f"chatcmpl-{uuid.uuid4()}"
54
+ created_time = int(time.time())
55
+
56
+ if stream:
57
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
58
+ else:
59
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
60
+
61
+ def _post_with_retry(self, payload, stream=False, timeout=None, proxies=None):
62
+ """
63
+ Try all endpoints until one succeeds or all fail.
64
+ """
65
+ last_exception = None
66
+ for endpoint in self._client.api_endpoints:
67
+ try:
68
+ response = self._client.session.post(
69
+ endpoint,
70
+ headers=self._client.headers,
71
+ json=payload,
72
+ stream=stream,
73
+ timeout=timeout or self._client.timeout,
74
+ proxies=proxies or getattr(self._client, "proxies", None)
75
+ )
76
+ response.raise_for_status()
77
+ self._client.base_url = endpoint # Update to working endpoint
78
+ return response
79
+ except requests.exceptions.RequestException as e:
80
+ last_exception = e
81
+ continue
82
+ raise IOError(f"All oivscode endpoints failed: {last_exception}") from last_exception
83
+
84
+ def _create_stream(
85
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
86
+ ) -> Generator[ChatCompletionChunk, None, None]:
87
+ try:
88
+ response = self._post_with_retry(payload, stream=True, timeout=timeout, proxies=proxies)
89
+ prompt_tokens = 0
90
+ completion_tokens = 0
91
+ total_tokens = 0
92
+
93
+ for line in response.iter_lines():
94
+ if line:
95
+ decoded_line = line.decode('utf-8').strip()
96
+
97
+ if decoded_line.startswith("data: "):
98
+ json_str = decoded_line[6:]
99
+ if json_str == "[DONE]":
100
+ break
101
+ try:
102
+ data = json.loads(json_str)
103
+ choice_data = data.get('choices', [{}])[0]
104
+ delta_data = choice_data.get('delta', {})
105
+ finish_reason = choice_data.get('finish_reason')
106
+
107
+ usage_data = data.get('usage', {})
108
+ if usage_data:
109
+ prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
110
+ completion_tokens = usage_data.get('completion_tokens', completion_tokens)
111
+ total_tokens = usage_data.get('total_tokens', total_tokens)
112
+
113
+ delta = ChoiceDelta(
114
+ content=delta_data.get('content'),
115
+ role=delta_data.get('role'),
116
+ tool_calls=delta_data.get('tool_calls')
117
+ )
118
+
119
+ choice = Choice(
120
+ index=choice_data.get('index', 0),
121
+ delta=delta,
122
+ finish_reason=finish_reason,
123
+ logprobs=choice_data.get('logprobs')
124
+ )
125
+
126
+ chunk = ChatCompletionChunk(
127
+ id=request_id,
128
+ choices=[choice],
129
+ created=created_time,
130
+ model=model,
131
+ system_fingerprint=data.get('system_fingerprint')
132
+ )
133
+
134
+ if hasattr(chunk, "model_dump"):
135
+ chunk_dict = chunk.model_dump(exclude_none=True)
136
+ else:
137
+ chunk_dict = chunk.dict(exclude_none=True)
138
+
139
+ usage_dict = {
140
+ "prompt_tokens": prompt_tokens or 10,
141
+ "completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
142
+ "total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
143
+ "estimated_cost": None
144
+ }
145
+
146
+ if delta_data.get('content'):
147
+ completion_tokens += 1
148
+ total_tokens = prompt_tokens + completion_tokens
149
+ usage_dict["completion_tokens"] = completion_tokens
150
+ usage_dict["total_tokens"] = total_tokens
151
+
152
+ chunk_dict["usage"] = usage_dict
153
+
154
+ yield chunk
155
+ except json.JSONDecodeError:
156
+ print(f"Warning: Could not decode JSON line: {json_str}")
157
+ continue
158
+ except requests.exceptions.RequestException as e:
159
+ print(f"Error during oivscode stream request: {e}")
160
+ raise IOError(f"oivscode request failed: {e}") from e
161
+ except Exception as e:
162
+ print(f"Error processing oivscode stream: {e}")
163
+ raise
164
+
165
+ def _create_non_stream(
166
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
167
+ ) -> ChatCompletion:
168
+ try:
169
+ response = self._post_with_retry(payload, stream=False, timeout=timeout, proxies=proxies)
170
+ data = response.json()
171
+
172
+ choices_data = data.get('choices', [])
173
+ usage_data = data.get('usage', {})
174
+
175
+ choices = []
176
+ for choice_d in choices_data:
177
+ message_d = choice_d.get('message', {})
178
+ message = ChatCompletionMessage(
179
+ role=message_d.get('role', 'assistant'),
180
+ content=message_d.get('content', '')
181
+ )
182
+ choice = Choice(
183
+ index=choice_d.get('index', 0),
184
+ message=message,
185
+ finish_reason=choice_d.get('finish_reason', 'stop')
186
+ )
187
+ choices.append(choice)
188
+
189
+ usage = CompletionUsage(
190
+ prompt_tokens=usage_data.get('prompt_tokens', 0),
191
+ completion_tokens=usage_data.get('completion_tokens', 0),
192
+ total_tokens=usage_data.get('total_tokens', 0)
193
+ )
194
+
195
+ completion = ChatCompletion(
196
+ id=request_id,
197
+ choices=choices,
198
+ created=created_time,
199
+ model=data.get('model', model),
200
+ usage=usage,
201
+ )
202
+ return completion
203
+
204
+ except requests.exceptions.RequestException as e:
205
+ print(f"Error during oivscode non-stream request: {e}")
206
+ raise IOError(f"oivscode request failed: {e}") from e
207
+ except Exception as e:
208
+ print(f"Error processing oivscode response: {e}")
209
+ raise
210
+
211
+ class Chat(BaseChat):
212
+ def __init__(self, client: 'oivscode'):
213
+ self.completions = Completions(client)
214
+
215
+ class oivscode(OpenAICompatibleProvider):
216
+
217
+ AVAILABLE_MODELS = [
218
+ "*",
219
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
220
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
221
+ "claude-3-5-sonnet-20240620",
222
+ "claude-3-5-sonnet-20241022",
223
+ "claude-3-7-sonnet-20250219",
224
+ "custom/blackbox-base",
225
+ "custom/blackbox-pro",
226
+ "custom/blackbox-pro-designer",
227
+ "custom/blackbox-pro-plus",
228
+ "deepseek-r1",
229
+ "deepseek-v3",
230
+ "deepseek/deepseek-chat",
231
+ "gemini-2.5-pro-preview-03-25",
232
+ "gpt-4o-mini",
233
+ "grok-3-beta",
234
+ "image-gen",
235
+ "llama-4-maverick-17b-128e-instruct-fp8",
236
+ "o1",
237
+ "o3-mini",
238
+ "o4-mini",
239
+ "transcribe",
240
+ "anthropic/claude-sonnet-4"
241
+ ]
242
+
243
+ def __init__(self, timeout: Optional[int] = None):
244
+ self.timeout = timeout
245
+ self.api_endpoints = [
246
+ "https://oi-vscode-server.onrender.com/v1/chat/completions",
247
+ "https://oi-vscode-server-2.onrender.com/v1/chat/completions",
248
+ "https://oi-vscode-server-5.onrender.com/v1/chat/completions",
249
+ "https://oi-vscode-server-0501.onrender.com/v1/chat/completions"
250
+ ]
251
+ self.api_endpoint = random.choice(self.api_endpoints)
252
+ self.base_url = self.api_endpoint
253
+ self.session = requests.Session()
254
+ self.headers = {
255
+ "accept": "*/*",
256
+ "accept-language": "en-US,en;q=0.9,en-GB;q=0.8,en-IN;q=0.7",
257
+ "cache-control": "no-cache",
258
+ "content-type": "application/json",
259
+ "pragma": "no-cache",
260
+ "priority": "u=1, i",
261
+ "sec-ch-ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
262
+ "sec-ch-ua-mobile": "?0",
263
+ "sec-ch-ua-platform": '"Windows"',
264
+ "sec-fetch-dest": "empty",
265
+ "sec-fetch-mode": "cors",
266
+ "sec-fetch-site": "same-site",
267
+ }
268
+ self.userid = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(21))
269
+ self.headers["userid"] = self.userid
270
+ self.session.headers.update(self.headers)
271
+ self.chat = Chat(self)
272
+
273
+ @property
274
+ def models(self):
275
+ class _ModelList:
276
+ def list(inner_self):
277
+ return type(self).AVAILABLE_MODELS
278
+ return _ModelList()
279
+
280
+ if __name__ == "__main__":
281
+ # Example usage
282
+ client = oivscode()
283
+ chat = client.chat
284
+ response = chat.completions.create(
285
+ model="Qwen/Qwen2.5-72B-Instruct-Turbo",
286
+ messages=[{"role": "user", "content": "Hello, how are you?"}],
287
+ max_tokens=50,
288
+ stream=False
289
+ )
290
+ print(response)