webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -9,7 +9,7 @@ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
9
  from .utils import (
10
10
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
11
  ChatCompletionMessage, CompletionUsage,
12
- format_prompt, get_system_prompt # Import format_prompt and get_system_prompt
12
+ format_prompt, get_system_prompt, count_tokens # Import format_prompt, get_system_prompt and count_tokens
13
13
  )
14
14
 
15
15
  # Import LitAgent for browser fingerprinting
@@ -36,6 +36,8 @@ class Completions(BaseCompletions):
36
36
  max_tokens: Optional[int] = None,
37
37
  stream: bool = False,
38
38
  temperature: Optional[float] = None,
39
+ timeout: Optional[int] = None,
40
+ proxies: Optional[Dict[str, str]] = None,
39
41
  **kwargs: Any
40
42
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
41
43
  """
@@ -67,16 +69,18 @@ class Completions(BaseCompletions):
67
69
  created_time = int(time.time())
68
70
 
69
71
  if stream:
70
- return self._create_streaming(request_id, created_time, model, payload)
72
+ return self._create_streaming(request_id, created_time, model, payload, timeout, proxies)
71
73
  else:
72
- return self._create_non_streaming(request_id, created_time, model, payload)
74
+ return self._create_non_streaming(request_id, created_time, model, payload, timeout, proxies)
73
75
 
74
76
  def _create_streaming(
75
77
  self,
76
78
  request_id: str,
77
79
  created_time: int,
78
80
  model: str,
79
- payload: Dict[str, Any]
81
+ payload: Dict[str, Any],
82
+ timeout: Optional[int] = None,
83
+ proxies: Optional[Dict[str, str]] = None
80
84
  ) -> Generator[ChatCompletionChunk, None, None]:
81
85
  """Implementation for streaming chat completions."""
82
86
  try:
@@ -86,7 +90,8 @@ class Completions(BaseCompletions):
86
90
  headers=self._client.headers,
87
91
  json=payload,
88
92
  stream=True,
89
- timeout=self._client.timeout,
93
+ timeout=timeout or self._client.timeout,
94
+ proxies=proxies or getattr(self._client, "proxies", None),
90
95
  impersonate="chrome120"
91
96
  )
92
97
 
@@ -161,7 +166,9 @@ class Completions(BaseCompletions):
161
166
  request_id: str,
162
167
  created_time: int,
163
168
  model: str,
164
- payload: Dict[str, Any]
169
+ payload: Dict[str, Any],
170
+ timeout: Optional[int] = None,
171
+ proxies: Optional[Dict[str, str]] = None
165
172
  ) -> ChatCompletion:
166
173
  """Implementation for non-streaming chat completions."""
167
174
  try:
@@ -171,7 +178,8 @@ class Completions(BaseCompletions):
171
178
  headers=self._client.headers,
172
179
  json=payload,
173
180
  stream=True,
174
- timeout=self._client.timeout,
181
+ timeout=timeout or self._client.timeout,
182
+ proxies=proxies or getattr(self._client, "proxies", None),
175
183
  impersonate="chrome120"
176
184
  )
177
185
 
@@ -194,8 +202,8 @@ class Completions(BaseCompletions):
194
202
  full_text = full_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
195
203
 
196
204
  # Estimate token counts
197
- prompt_tokens = len(payload.get("prompt", "").split()) + len(payload.get("systemPrompt", "").split())
198
- completion_tokens = len(full_text.split())
205
+ prompt_tokens = count_tokens(payload.get("prompt", "")) + count_tokens(payload.get("systemPrompt", ""))
206
+ completion_tokens = count_tokens(full_text)
199
207
  total_tokens = prompt_tokens + completion_tokens
200
208
 
201
209
  # Create the message object
@@ -352,4 +360,3 @@ class TypefullyAI(OpenAICompatibleProvider):
352
360
  def list(inner_self):
353
361
  return type(self).AVAILABLE_MODELS
354
362
  return _ModelList()
355
-
@@ -8,7 +8,7 @@ from typing import List, Dict, Optional, Union, Generator, Any
8
8
  from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
9
  from .utils import (
10
10
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage
11
+ ChatCompletionMessage, CompletionUsage, count_tokens
12
12
  )
13
13
 
14
14
  # Attempt to import LitAgent, fallback if not available
@@ -34,6 +34,8 @@ class Completions(BaseCompletions):
34
34
  top_p: Optional[float] = None,
35
35
  presence_penalty: Optional[float] = None,
36
36
  frequency_penalty: Optional[float] = None,
37
+ timeout: Optional[int] = None,
38
+ proxies: Optional[Dict[str, str]] = None,
37
39
  **kwargs: Any
38
40
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
41
  """
@@ -61,12 +63,12 @@ class Completions(BaseCompletions):
61
63
  created_time = int(time.time())
62
64
 
63
65
  if stream:
64
- return self._create_stream(request_id, created_time, model, payload)
66
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
65
67
  else:
66
- return self._create_non_stream(request_id, created_time, model, payload)
68
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
67
69
 
68
70
  def _create_stream(
69
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
71
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
70
72
  ) -> Generator[ChatCompletionChunk, None, None]:
71
73
  try:
72
74
  response = self._client.session.post(
@@ -74,7 +76,8 @@ class Completions(BaseCompletions):
74
76
  headers=self._client.headers,
75
77
  json=payload,
76
78
  stream=True,
77
- timeout=self._client.timeout
79
+ timeout=timeout or self._client.timeout,
80
+ proxies=proxies or getattr(self._client, "proxies", None)
78
81
  )
79
82
 
80
83
  # Handle non-200 responses
@@ -90,7 +93,7 @@ class Completions(BaseCompletions):
90
93
 
91
94
  # Estimate prompt tokens based on message length
92
95
  for msg in payload.get("messages", []):
93
- prompt_tokens += len(msg.get("content", "").split())
96
+ prompt_tokens += count_tokens(msg.get("content", ""))
94
97
 
95
98
  for line in response.iter_lines():
96
99
  if not line:
@@ -140,8 +143,11 @@ class Completions(BaseCompletions):
140
143
  system_fingerprint=data.get('system_fingerprint')
141
144
  )
142
145
 
143
- # Convert to dict for proper formatting
144
- chunk_dict = chunk.to_dict()
146
+ # Convert chunk to dict using Pydantic's API
147
+ if hasattr(chunk, "model_dump"):
148
+ chunk_dict = chunk.model_dump(exclude_none=True)
149
+ else:
150
+ chunk_dict = chunk.dict(exclude_none=True)
145
151
 
146
152
  # Add usage information to match OpenAI format
147
153
  usage_dict = {
@@ -188,7 +194,10 @@ class Completions(BaseCompletions):
188
194
  system_fingerprint=None
189
195
  )
190
196
 
191
- chunk_dict = chunk.to_dict()
197
+ if hasattr(chunk, "model_dump"):
198
+ chunk_dict = chunk.model_dump(exclude_none=True)
199
+ else:
200
+ chunk_dict = chunk.dict(exclude_none=True)
192
201
  chunk_dict["usage"] = {
193
202
  "prompt_tokens": prompt_tokens,
194
203
  "completion_tokens": completion_tokens,
@@ -203,14 +212,15 @@ class Completions(BaseCompletions):
203
212
  raise IOError(f"TypeGPT request failed: {e}") from e
204
213
 
205
214
  def _create_non_stream(
206
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
215
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
207
216
  ) -> ChatCompletion:
208
217
  try:
209
218
  response = self._client.session.post(
210
219
  self._client.api_endpoint,
211
220
  headers=self._client.headers,
212
221
  json=payload,
213
- timeout=self._client.timeout
222
+ timeout=timeout or self._client.timeout,
223
+ proxies=proxies or getattr(self._client, "proxies", None)
214
224
  )
215
225
 
216
226
  # Handle non-200 responses