webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (150) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -239
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  5. webscout/Extra/YTToolkit/README.md +375 -0
  6. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  7. webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
  8. webscout/Extra/autocoder/autocoder.py +309 -114
  9. webscout/Extra/autocoder/autocoder_utiles.py +15 -15
  10. webscout/Extra/gguf.md +430 -0
  11. webscout/Extra/tempmail/README.md +488 -0
  12. webscout/Extra/weather.md +281 -0
  13. webscout/Litlogger/Readme.md +175 -0
  14. webscout/Provider/AISEARCH/DeepFind.py +41 -37
  15. webscout/Provider/AISEARCH/README.md +279 -0
  16. webscout/Provider/AISEARCH/__init__.py +0 -1
  17. webscout/Provider/AISEARCH/genspark_search.py +228 -86
  18. webscout/Provider/AISEARCH/hika_search.py +11 -11
  19. webscout/Provider/AISEARCH/scira_search.py +324 -322
  20. webscout/Provider/AllenAI.py +7 -14
  21. webscout/Provider/Blackboxai.py +518 -74
  22. webscout/Provider/Cloudflare.py +0 -1
  23. webscout/Provider/Deepinfra.py +23 -21
  24. webscout/Provider/Flowith.py +217 -0
  25. webscout/Provider/FreeGemini.py +250 -0
  26. webscout/Provider/GizAI.py +15 -5
  27. webscout/Provider/Glider.py +11 -8
  28. webscout/Provider/HeckAI.py +80 -52
  29. webscout/Provider/Koboldai.py +7 -4
  30. webscout/Provider/LambdaChat.py +2 -2
  31. webscout/Provider/Marcus.py +10 -18
  32. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  33. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  35. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  36. webscout/Provider/OPENAI/README.md +1253 -0
  37. webscout/Provider/OPENAI/__init__.py +8 -0
  38. webscout/Provider/OPENAI/ai4chat.py +293 -286
  39. webscout/Provider/OPENAI/api.py +810 -0
  40. webscout/Provider/OPENAI/base.py +217 -14
  41. webscout/Provider/OPENAI/c4ai.py +373 -367
  42. webscout/Provider/OPENAI/chatgpt.py +7 -0
  43. webscout/Provider/OPENAI/chatgptclone.py +7 -0
  44. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  45. webscout/Provider/OPENAI/deepinfra.py +30 -20
  46. webscout/Provider/OPENAI/e2b.py +6 -0
  47. webscout/Provider/OPENAI/exaai.py +7 -0
  48. webscout/Provider/OPENAI/exachat.py +6 -0
  49. webscout/Provider/OPENAI/flowith.py +162 -0
  50. webscout/Provider/OPENAI/freeaichat.py +359 -352
  51. webscout/Provider/OPENAI/glider.py +323 -316
  52. webscout/Provider/OPENAI/groq.py +361 -354
  53. webscout/Provider/OPENAI/heckai.py +30 -64
  54. webscout/Provider/OPENAI/llmchatco.py +8 -0
  55. webscout/Provider/OPENAI/mcpcore.py +7 -0
  56. webscout/Provider/OPENAI/multichat.py +8 -0
  57. webscout/Provider/OPENAI/netwrck.py +356 -350
  58. webscout/Provider/OPENAI/opkfc.py +8 -0
  59. webscout/Provider/OPENAI/scirachat.py +471 -462
  60. webscout/Provider/OPENAI/sonus.py +9 -0
  61. webscout/Provider/OPENAI/standardinput.py +9 -1
  62. webscout/Provider/OPENAI/textpollinations.py +339 -329
  63. webscout/Provider/OPENAI/toolbaz.py +7 -0
  64. webscout/Provider/OPENAI/typefully.py +355 -0
  65. webscout/Provider/OPENAI/typegpt.py +358 -346
  66. webscout/Provider/OPENAI/uncovrAI.py +7 -0
  67. webscout/Provider/OPENAI/utils.py +103 -7
  68. webscout/Provider/OPENAI/venice.py +12 -0
  69. webscout/Provider/OPENAI/wisecat.py +19 -19
  70. webscout/Provider/OPENAI/writecream.py +7 -0
  71. webscout/Provider/OPENAI/x0gpt.py +7 -0
  72. webscout/Provider/OPENAI/yep.py +50 -21
  73. webscout/Provider/OpenGPT.py +1 -1
  74. webscout/Provider/TTI/AiForce/README.md +159 -0
  75. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  76. webscout/Provider/TTI/ImgSys/README.md +174 -0
  77. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  78. webscout/Provider/TTI/Nexra/README.md +155 -0
  79. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  80. webscout/Provider/TTI/README.md +128 -0
  81. webscout/Provider/TTI/aiarta/README.md +134 -0
  82. webscout/Provider/TTI/artbit/README.md +100 -0
  83. webscout/Provider/TTI/fastflux/README.md +129 -0
  84. webscout/Provider/TTI/huggingface/README.md +114 -0
  85. webscout/Provider/TTI/piclumen/README.md +161 -0
  86. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  87. webscout/Provider/TTI/talkai/README.md +139 -0
  88. webscout/Provider/TTS/README.md +192 -0
  89. webscout/Provider/TTS/__init__.py +2 -1
  90. webscout/Provider/TTS/speechma.py +500 -100
  91. webscout/Provider/TTS/sthir.py +94 -0
  92. webscout/Provider/TeachAnything.py +3 -7
  93. webscout/Provider/TextPollinationsAI.py +4 -2
  94. webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
  95. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  96. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  97. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  98. webscout/Provider/Writecream.py +11 -2
  99. webscout/Provider/__init__.py +8 -14
  100. webscout/Provider/ai4chat.py +4 -58
  101. webscout/Provider/asksteve.py +17 -9
  102. webscout/Provider/cerebras.py +3 -1
  103. webscout/Provider/koala.py +170 -268
  104. webscout/Provider/llmchat.py +3 -0
  105. webscout/Provider/lmarena.py +198 -0
  106. webscout/Provider/meta.py +7 -4
  107. webscout/Provider/samurai.py +223 -0
  108. webscout/Provider/scira_chat.py +4 -2
  109. webscout/Provider/typefully.py +23 -151
  110. webscout/__init__.py +4 -2
  111. webscout/cli.py +3 -28
  112. webscout/conversation.py +35 -35
  113. webscout/litagent/Readme.md +276 -0
  114. webscout/scout/README.md +402 -0
  115. webscout/swiftcli/Readme.md +323 -0
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +2 -182
  118. webscout/webscout_search_async.py +1 -179
  119. webscout/zeroart/README.md +89 -0
  120. webscout/zeroart/__init__.py +134 -54
  121. webscout/zeroart/base.py +19 -13
  122. webscout/zeroart/effects.py +101 -99
  123. webscout/zeroart/fonts.py +1239 -816
  124. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
  125. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
  126. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  127. webscout-8.2.8.dist-info/entry_points.txt +3 -0
  128. webscout-8.2.8.dist-info/top_level.txt +1 -0
  129. webscout/Provider/AISEARCH/ISou.py +0 -256
  130. webscout/Provider/ElectronHub.py +0 -773
  131. webscout/Provider/Free2GPT.py +0 -241
  132. webscout/Provider/GPTWeb.py +0 -249
  133. webscout/Provider/bagoodex.py +0 -145
  134. webscout/Provider/geminiprorealtime.py +0 -160
  135. webscout/scout/core.py +0 -881
  136. webscout-8.2.6.dist-info/entry_points.txt +0 -3
  137. webscout-8.2.6.dist-info/top_level.txt +0 -2
  138. webstoken/__init__.py +0 -30
  139. webstoken/classifier.py +0 -189
  140. webstoken/keywords.py +0 -216
  141. webstoken/language.py +0 -128
  142. webstoken/ner.py +0 -164
  143. webstoken/normalizer.py +0 -35
  144. webstoken/processor.py +0 -77
  145. webstoken/sentiment.py +0 -206
  146. webstoken/stemmer.py +0 -73
  147. webstoken/tagger.py +0 -60
  148. webstoken/tokenizer.py +0 -158
  149. /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
  150. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
@@ -392,6 +392,13 @@ class Toolbaz(OpenAICompatibleProvider):
392
392
  print(f"{RED}Error getting Toolbaz authentication: {e}{RESET}")
393
393
  return None
394
394
 
395
+ @property
396
+ def models(self):
397
+ class _ModelList:
398
+ def list(inner_self):
399
+ return type(self).AVAILABLE_MODELS
400
+ return _ModelList()
401
+
395
402
  # Example usage
396
403
  if __name__ == "__main__":
397
404
  # Test the provider
@@ -0,0 +1,355 @@
1
+ import time
2
+ import uuid
3
+ import json
4
+ import re
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage,
12
+ format_prompt, get_system_prompt # Import format_prompt and get_system_prompt
13
+ )
14
+
15
+ # Import LitAgent for browser fingerprinting
16
+ from webscout.litagent import LitAgent
17
+
18
+ # Import curl_cffi for better request handling
19
+ from curl_cffi.requests import Session
20
+ from curl_cffi import CurlError
21
+
22
+ # ANSI escape codes for formatting
23
+ BOLD = "\033[1m"
24
+ RED = "\033[91m"
25
+ RESET = "\033[0m"
26
+
27
+ class Completions(BaseCompletions):
28
+ def __init__(self, client: 'TypefullyAI'):
29
+ self._client = client
30
+
31
+ def create(
32
+ self,
33
+ *,
34
+ model: str,
35
+ messages: List[Dict[str, str]],
36
+ max_tokens: Optional[int] = None,
37
+ stream: bool = False,
38
+ temperature: Optional[float] = None,
39
+ **kwargs: Any
40
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
41
+ """
42
+ Creates a model response for the given chat conversation.
43
+ Mimics openai.chat.completions.create
44
+ """
45
+ # Extract system message using get_system_prompt utility
46
+ system_prompt = get_system_prompt(messages) or self._client.system_prompt
47
+
48
+ # Format the conversation using format_prompt utility
49
+ # Use add_special_tokens=True to format as "User: ... Assistant: ..."
50
+ # Use do_continue=True to ensure it ends with "Assistant: " for model to continue
51
+ conversation_prompt = format_prompt(
52
+ messages,
53
+ add_special_tokens=True,
54
+ do_continue=True,
55
+ include_system=False # System prompt is sent separately
56
+ )
57
+
58
+ # Prepare the payload for Typefully API
59
+ payload = {
60
+ "prompt": conversation_prompt,
61
+ "systemPrompt": system_prompt,
62
+ "modelIdentifier": self._client.convert_model_name(model),
63
+ "outputLength": max_tokens if max_tokens is not None else self._client.output_length
64
+ }
65
+
66
+ request_id = f"chatcmpl-{uuid.uuid4()}"
67
+ created_time = int(time.time())
68
+
69
+ if stream:
70
+ return self._create_streaming(request_id, created_time, model, payload)
71
+ else:
72
+ return self._create_non_streaming(request_id, created_time, model, payload)
73
+
74
+ def _create_streaming(
75
+ self,
76
+ request_id: str,
77
+ created_time: int,
78
+ model: str,
79
+ payload: Dict[str, Any]
80
+ ) -> Generator[ChatCompletionChunk, None, None]:
81
+ """Implementation for streaming chat completions."""
82
+ try:
83
+ # Make the streaming request
84
+ response = self._client.session.post(
85
+ self._client.api_endpoint,
86
+ headers=self._client.headers,
87
+ json=payload,
88
+ stream=True,
89
+ timeout=self._client.timeout,
90
+ impersonate="chrome120"
91
+ )
92
+
93
+ if not response.ok:
94
+ raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
95
+
96
+ streaming_text = ""
97
+
98
+ for chunk in response.iter_content(chunk_size=None):
99
+ if not chunk:
100
+ continue
101
+
102
+ chunk_str = chunk.decode('utf-8', errors='replace')
103
+ content = self._client._typefully_extractor(chunk_str)
104
+
105
+ if content:
106
+ streaming_text += content
107
+
108
+ # Create the delta object
109
+ delta = ChoiceDelta(
110
+ content=content,
111
+ role="assistant"
112
+ )
113
+
114
+ # Create the choice object
115
+ choice = Choice(
116
+ index=0,
117
+ delta=delta,
118
+ finish_reason=None
119
+ )
120
+
121
+ # Create the chunk object
122
+ chunk = ChatCompletionChunk(
123
+ id=request_id,
124
+ choices=[choice],
125
+ created=created_time,
126
+ model=model
127
+ )
128
+
129
+ yield chunk
130
+
131
+ # Final chunk with finish_reason="stop"
132
+ delta = ChoiceDelta(
133
+ content=None,
134
+ role=None
135
+ )
136
+
137
+ choice = Choice(
138
+ index=0,
139
+ delta=delta,
140
+ finish_reason="stop"
141
+ )
142
+
143
+ chunk = ChatCompletionChunk(
144
+ id=request_id,
145
+ choices=[choice],
146
+ created=created_time,
147
+ model=model
148
+ )
149
+
150
+ yield chunk
151
+
152
+ except CurlError as e:
153
+ print(f"{RED}Error during Typefully streaming request (CurlError): {e}{RESET}")
154
+ raise IOError(f"Typefully streaming request failed (CurlError): {e}") from e
155
+ except Exception as e:
156
+ print(f"{RED}Error during Typefully streaming request: {e}{RESET}")
157
+ raise IOError(f"Typefully streaming request failed: {e}") from e
158
+
159
+ def _create_non_streaming(
160
+ self,
161
+ request_id: str,
162
+ created_time: int,
163
+ model: str,
164
+ payload: Dict[str, Any]
165
+ ) -> ChatCompletion:
166
+ """Implementation for non-streaming chat completions."""
167
+ try:
168
+ # Make the non-streaming request
169
+ response = self._client.session.post(
170
+ self._client.api_endpoint,
171
+ headers=self._client.headers,
172
+ json=payload,
173
+ stream=True,
174
+ timeout=self._client.timeout,
175
+ impersonate="chrome120"
176
+ )
177
+
178
+ if not response.ok:
179
+ raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
180
+
181
+ # Collect the full response
182
+ full_text = ""
183
+ for chunk in response.iter_content(chunk_size=None):
184
+ if not chunk:
185
+ continue
186
+
187
+ chunk_str = chunk.decode('utf-8', errors='replace')
188
+ content = self._client._typefully_extractor(chunk_str)
189
+
190
+ if content:
191
+ full_text += content
192
+
193
+ # Format the text (replace escaped newlines)
194
+ full_text = full_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
195
+
196
+ # Estimate token counts
197
+ prompt_tokens = len(payload.get("prompt", "").split()) + len(payload.get("systemPrompt", "").split())
198
+ completion_tokens = len(full_text.split())
199
+ total_tokens = prompt_tokens + completion_tokens
200
+
201
+ # Create the message object
202
+ message = ChatCompletionMessage(
203
+ role="assistant",
204
+ content=full_text
205
+ )
206
+
207
+ # Create the choice object
208
+ choice = Choice(
209
+ index=0,
210
+ message=message,
211
+ finish_reason="stop"
212
+ )
213
+
214
+ # Create the usage object
215
+ usage = CompletionUsage(
216
+ prompt_tokens=prompt_tokens,
217
+ completion_tokens=completion_tokens,
218
+ total_tokens=total_tokens
219
+ )
220
+
221
+ # Create the completion object
222
+ completion = ChatCompletion(
223
+ id=request_id,
224
+ choices=[choice],
225
+ created=created_time,
226
+ model=model,
227
+ usage=usage,
228
+ )
229
+
230
+ return completion
231
+
232
+ except CurlError as e:
233
+ print(f"{RED}Error during Typefully non-streaming request (CurlError): {e}{RESET}")
234
+ raise IOError(f"Typefully request failed (CurlError): {e}") from e
235
+ except Exception as e:
236
+ print(f"{RED}Error during Typefully non-streaming request: {e}{RESET}")
237
+ raise IOError(f"Typefully request failed: {e}") from e
238
+
239
+ class Chat(BaseChat):
240
+ def __init__(self, client: 'TypefullyAI'):
241
+ self.completions = Completions(client)
242
+
243
+ class TypefullyAI(OpenAICompatibleProvider):
244
+ """
245
+ OpenAI-compatible client for Typefully AI API.
246
+
247
+ Usage:
248
+ client = TypefullyAI()
249
+ response = client.chat.completions.create(
250
+ model="openai:gpt-4o-mini",
251
+ messages=[{"role": "user", "content": "Hello!"}]
252
+ )
253
+ print(response.choices[0].message.content)
254
+ """
255
+
256
+ AVAILABLE_MODELS = [
257
+ "openai:gpt-4o-mini",
258
+ "openai:gpt-4o",
259
+ "anthropic:claude-3-5-haiku-20241022",
260
+ "groq:llama-3.3-70b-versatile"
261
+ ]
262
+
263
+ def __init__(
264
+ self,
265
+ timeout: int = 30,
266
+
267
+ ):
268
+ """
269
+ Initialize the TypefullyAI client.
270
+
271
+ Args:
272
+ timeout: Request timeout in seconds
273
+ proxies: Optional proxy configuration
274
+ system_prompt: Default system prompt
275
+ output_length: Maximum length of the generated output
276
+ """
277
+ self.timeout = timeout
278
+ self.api_endpoint = "https://typefully.com/tools/ai/api/completion"
279
+
280
+ # Initialize curl_cffi Session
281
+ self.session = Session()
282
+
283
+ # Initialize LitAgent for user agent generation
284
+ agent = LitAgent()
285
+ self.user_agent = agent.random()
286
+
287
+ # Set headers
288
+ self.headers = {
289
+ "authority": "typefully.com",
290
+ "accept": "*/*",
291
+ "accept-encoding": "gzip, deflate, br, zstd",
292
+ "accept-language": "en-US,en;q=0.9",
293
+ "content-type": "application/json",
294
+ "dnt": "1",
295
+ "origin": "https://typefully.com",
296
+ "referer": "https://typefully.com/tools/ai/chat-gpt-alternative",
297
+ "sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
298
+ "sec-ch-ua-mobile": "?0",
299
+ "sec-ch-ua-platform": '"Windows"',
300
+ "user-agent": self.user_agent
301
+ }
302
+
303
+ # Update curl_cffi session headers and proxies
304
+ self.session.headers.update(self.headers)
305
+
306
+ # Initialize chat interface
307
+ self.chat = Chat(self)
308
+
309
+ @staticmethod
310
+ def _typefully_extractor(chunk: str) -> Optional[str]:
311
+ """Extracts content from the Typefully stream format '0:"..."'."""
312
+ if isinstance(chunk, str):
313
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk)
314
+ if match:
315
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
316
+ content = match.group(1).encode().decode('unicode_escape')
317
+ return content.replace('\\\\', '\\').replace('\\"', '"')
318
+ return None
319
+
320
+ def convert_model_name(self, model: str) -> str:
321
+ """
322
+ Convert model names to ones supported by Typefully.
323
+
324
+ Args:
325
+ model: Model name to convert
326
+
327
+ Returns:
328
+ Typefully model name
329
+ """
330
+ # If the model is already a valid Typefully model, return it
331
+ if model in self.AVAILABLE_MODELS:
332
+ return model
333
+
334
+ # Map common OpenAI model names to Typefully models
335
+ model_mapping = {
336
+ "gpt-4o-mini": "openai:gpt-4o-mini",
337
+ "gpt-4o": "openai:gpt-4o",
338
+ "claude-3-5-haiku": "anthropic:claude-3-5-haiku-20241022",
339
+ "llama-3.3-70b": "groq:llama-3.3-70b-versatile"
340
+ }
341
+
342
+ if model in model_mapping:
343
+ return model_mapping[model]
344
+
345
+ # Default to the most capable model
346
+ print(f"{RED}Warning: Unknown model '{model}'. Using 'openai:gpt-4o-mini' instead.{RESET}")
347
+ return "openai:gpt-4o-mini"
348
+
349
+ @property
350
+ def models(self):
351
+ class _ModelList:
352
+ def list(inner_self):
353
+ return type(self).AVAILABLE_MODELS
354
+ return _ModelList()
355
+