webscout 7.5__py3-none-any.whl → 7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (118) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/autocoder_utiles.py +0 -4
  9. webscout/Extra/autocoder/rawdog.py +13 -41
  10. webscout/Extra/gguf.py +652 -428
  11. webscout/Extra/weather.py +178 -156
  12. webscout/Extra/weather_ascii.py +70 -17
  13. webscout/Litlogger/core/logger.py +1 -2
  14. webscout/Litlogger/handlers/file.py +1 -1
  15. webscout/Litlogger/styles/formats.py +0 -2
  16. webscout/Litlogger/utils/detectors.py +0 -1
  17. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  18. webscout/Provider/AISEARCH/ISou.py +1 -1
  19. webscout/Provider/AISEARCH/felo_search.py +0 -1
  20. webscout/Provider/AllenAI.py +24 -9
  21. webscout/Provider/C4ai.py +29 -11
  22. webscout/Provider/ChatGPTGratis.py +24 -56
  23. webscout/Provider/DeepSeek.py +25 -17
  24. webscout/Provider/Deepinfra.py +115 -48
  25. webscout/Provider/Gemini.py +1 -1
  26. webscout/Provider/Glider.py +25 -8
  27. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  28. webscout/Provider/HeckAI.py +23 -7
  29. webscout/Provider/Jadve.py +20 -5
  30. webscout/Provider/Netwrck.py +42 -19
  31. webscout/Provider/PI.py +4 -2
  32. webscout/Provider/Perplexitylabs.py +26 -6
  33. webscout/Provider/PizzaGPT.py +10 -51
  34. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  35. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  36. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  37. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
  38. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
  39. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  40. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  41. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  42. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  43. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  44. webscout/Provider/TTI/__init__.py +2 -3
  45. webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
  46. webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
  47. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  48. webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
  49. webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
  50. webscout/Provider/TTS/__init__.py +2 -2
  51. webscout/Provider/TTS/deepgram.py +12 -39
  52. webscout/Provider/TTS/elevenlabs.py +14 -40
  53. webscout/Provider/TTS/gesserit.py +11 -35
  54. webscout/Provider/TTS/murfai.py +13 -39
  55. webscout/Provider/TTS/parler.py +17 -40
  56. webscout/Provider/TTS/speechma.py +180 -0
  57. webscout/Provider/TTS/streamElements.py +17 -44
  58. webscout/Provider/TextPollinationsAI.py +39 -59
  59. webscout/Provider/Venice.py +25 -8
  60. webscout/Provider/WiseCat.py +27 -5
  61. webscout/Provider/Youchat.py +64 -37
  62. webscout/Provider/__init__.py +0 -6
  63. webscout/Provider/akashgpt.py +20 -5
  64. webscout/Provider/flowith.py +20 -5
  65. webscout/Provider/freeaichat.py +32 -45
  66. webscout/Provider/koala.py +20 -5
  67. webscout/Provider/llamatutor.py +1 -1
  68. webscout/Provider/llmchat.py +30 -8
  69. webscout/Provider/multichat.py +65 -9
  70. webscout/Provider/talkai.py +1 -0
  71. webscout/Provider/turboseek.py +3 -0
  72. webscout/Provider/tutorai.py +2 -0
  73. webscout/Provider/typegpt.py +154 -64
  74. webscout/Provider/x0gpt.py +3 -1
  75. webscout/Provider/yep.py +102 -20
  76. webscout/__init__.py +3 -0
  77. webscout/cli.py +4 -40
  78. webscout/conversation.py +1 -10
  79. webscout/litagent/__init__.py +2 -2
  80. webscout/litagent/agent.py +351 -20
  81. webscout/litagent/constants.py +34 -5
  82. webscout/litprinter/__init__.py +0 -3
  83. webscout/models.py +181 -0
  84. webscout/optimizers.py +1 -1
  85. webscout/prompt_manager.py +2 -8
  86. webscout/scout/core/scout.py +1 -4
  87. webscout/scout/core/search_result.py +1 -1
  88. webscout/scout/core/text_utils.py +1 -1
  89. webscout/scout/core.py +2 -5
  90. webscout/scout/element.py +1 -1
  91. webscout/scout/parsers/html_parser.py +1 -1
  92. webscout/scout/utils.py +0 -1
  93. webscout/swiftcli/__init__.py +1 -3
  94. webscout/tempid.py +1 -1
  95. webscout/update_checker.py +1 -3
  96. webscout/version.py +1 -1
  97. webscout/webscout_search_async.py +1 -2
  98. webscout/yep_search.py +297 -297
  99. {webscout-7.5.dist-info → webscout-7.6.dist-info}/LICENSE.md +4 -4
  100. {webscout-7.5.dist-info → webscout-7.6.dist-info}/METADATA +101 -390
  101. {webscout-7.5.dist-info → webscout-7.6.dist-info}/RECORD +104 -110
  102. webscout/Extra/autollama.py +0 -231
  103. webscout/Provider/Amigo.py +0 -274
  104. webscout/Provider/Bing.py +0 -243
  105. webscout/Provider/DiscordRocks.py +0 -253
  106. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  107. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  108. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  109. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  110. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  111. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  112. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  113. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  114. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  115. webscout/Provider/TTS/voicepod.py +0 -117
  116. {webscout-7.5.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
  117. {webscout-7.5.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
  118. {webscout-7.5.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
@@ -9,11 +9,10 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
9
  from webscout.AIbase import Provider, AsyncProvider
10
10
  from webscout import exceptions
11
11
  from webscout import LitAgent
12
- from webscout.Litlogger import Logger, LogFormat
13
12
 
14
13
  class FreeAIChat(Provider):
15
14
  """
16
- A class to interact with the FreeAIChat API with logging and LitAgent user-agent.
15
+ A class to interact with the FreeAIChat API with LitAgent user-agent.
17
16
  """
18
17
 
19
18
  AVAILABLE_MODELS = [
@@ -23,10 +22,10 @@ class FreeAIChat(Provider):
23
22
  "gemini-1.5-pro",
24
23
  "gemini-1.5-flash",
25
24
  "gemini-2.0-pro-exp-02-05",
26
- "deepseek-r1",
25
+ # "deepseek-r1", >>>> NOT WORKING
27
26
  "deepseek-v3",
28
- "Deepseek r1 14B",
29
- "Deepseek r1 32B",
27
+ # "Deepseek r1 14B", >>>> NOT WORKING
28
+ # "Deepseek r1 32B", >>>> NOT WORKING
30
29
  "o3-mini-high",
31
30
  "o3-mini-medium",
32
31
  "o3-mini-low",
@@ -36,10 +35,10 @@ class FreeAIChat(Provider):
36
35
  "o1-mini",
37
36
  "GPT-4o",
38
37
  "Qwen coder",
39
- "Qwen 2.5 72B",
38
+ # "Qwen 2.5 72B", >>>> NOT WORKING
40
39
  "Llama 3.1 405B",
41
- "llama3.1-70b-fast",
42
- "Llama 3.3 70B",
40
+ # "llama3.1-70b-fast", >>>> NOT WORKING
41
+ # "Llama 3.3 70B", >>>> NOT WORKING
43
42
  "claude 3.5 haiku",
44
43
  "claude 3.5 sonnet",
45
44
  ]
@@ -57,9 +56,8 @@ class FreeAIChat(Provider):
57
56
  act: str = None,
58
57
  model: str = "GPT-4o",
59
58
  system_prompt: str = "You are a helpful AI assistant.",
60
- logging: bool = False
61
59
  ):
62
- """Initializes the FreeAIChat API client with logging support."""
60
+ """Initializes the FreeAIChat API client."""
63
61
  if model not in self.AVAILABLE_MODELS:
64
62
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
65
63
 
@@ -102,14 +100,6 @@ class FreeAIChat(Provider):
102
100
  )
103
101
  self.conversation.history_offset = history_offset
104
102
 
105
- self.logger = Logger(
106
- name="FreeAIChat",
107
- format=LogFormat.MODERN_EMOJI,
108
- ) if logging else None
109
-
110
- if self.logger:
111
- self.logger.info(f"FreeAIChat initialized successfully with model: {model}")
112
-
113
103
  def ask(
114
104
  self,
115
105
  prompt: str,
@@ -124,11 +114,7 @@ class FreeAIChat(Provider):
124
114
  conversation_prompt = getattr(Optimizers, optimizer)(
125
115
  conversation_prompt if conversationally else prompt
126
116
  )
127
- if self.logger:
128
- self.logger.debug(f"Applied optimizer: {optimizer}")
129
117
  else:
130
- if self.logger:
131
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
132
118
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
133
119
 
134
120
  messages = [
@@ -148,13 +134,9 @@ class FreeAIChat(Provider):
148
134
  }
149
135
 
150
136
  def for_stream():
151
- if self.logger:
152
- self.logger.debug("Sending streaming request to FreeAIChat API...")
153
137
  try:
154
138
  with requests.post(self.url, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
155
139
  if response.status_code != 200:
156
- if self.logger:
157
- self.logger.error(f"Request failed with status code {response.status_code}")
158
140
  raise exceptions.FailedToGenerateResponseError(
159
141
  f"Request failed with status code {response.status_code}"
160
142
  )
@@ -177,17 +159,11 @@ class FreeAIChat(Provider):
177
159
  resp = dict(text=content)
178
160
  yield resp if raw else resp
179
161
  except json.JSONDecodeError:
180
- if self.logger:
181
- self.logger.error("JSON decode error in streaming data")
182
162
  pass
183
163
 
184
164
  self.conversation.update_chat_history(prompt, streaming_text)
185
- if self.logger:
186
- self.logger.info("Streaming response completed successfully")
187
165
 
188
166
  except requests.RequestException as e:
189
- if self.logger:
190
- self.logger.error(f"Request failed: {e}")
191
167
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
192
168
 
193
169
  def for_non_stream():
@@ -234,18 +210,29 @@ class FreeAIChat(Provider):
234
210
  except (UnicodeError, AttributeError) as e:
235
211
  return text
236
212
  return text
237
-
238
213
 
239
214
  if __name__ == "__main__":
240
- from rich import print
241
- ai = FreeAIChat(model="GPT-4o", logging=True)
242
- # response = ai.chat(input(">>>"), stream=True)
243
- # full_text = ""
244
-
245
- # for chunk in response:
246
- # corrected_chunk = ai.fix_encoding(chunk)
247
- # full_text += corrected_chunk
248
-
249
- response = ai.chat(input(">>>"), stream=False)
250
- response = ai.fix_encoding(response)
251
- print(response)
215
+ print("-" * 80)
216
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
217
+ print("-" * 80)
218
+
219
+ for model in FreeAIChat.AVAILABLE_MODELS:
220
+ try:
221
+ test_ai = FreeAIChat(model=model, timeout=60)
222
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
223
+ response_text = ""
224
+ for chunk in response:
225
+ response_text += chunk
226
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
227
+
228
+ if response_text and len(response_text.strip()) > 0:
229
+ status = "✓"
230
+ # Clean and truncate response
231
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
232
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
233
+ else:
234
+ status = "✗"
235
+ display_text = "Empty or invalid response"
236
+ print(f"\r{model:<50} {status:<10} {display_text}")
237
+ except Exception as e:
238
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -246,8 +246,23 @@ class KOALA(Provider):
246
246
  assert isinstance(response, dict), "Response should be of dict data-type only"
247
247
  return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
248
248
  if __name__ == '__main__':
249
- from rich import print
250
- ai = KOALA()
251
- response = ai.chat("tell me about india")
252
- for chunk in response:
253
- print(chunk, end="", flush=True)
249
+ print("-" * 80)
250
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
251
+ print("-" * 80)
252
+
253
+ for model in KOALA.AVAILABLE_MODELS:
254
+ try:
255
+ test_ai = KOALA(model=model, timeout=60)
256
+ response = test_ai.chat("Say 'Hello' in one word")
257
+ response_text = response
258
+
259
+ if response_text and len(response_text.strip()) > 0:
260
+ status = "✓"
261
+ # Truncate response if too long
262
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
263
+ else:
264
+ status = "✗"
265
+ display_text = "Empty or invalid response"
266
+ print(f"{model:<50} {status:<10} {display_text}")
267
+ except Exception as e:
268
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -12,7 +12,7 @@ class LlamaTutor(Provider):
12
12
  """
13
13
  A class to interact with the LlamaTutor API (Together.ai)
14
14
  """
15
-
15
+ AVAILABLE_MODELS = ["UNKNOWN"]
16
16
  def __init__(
17
17
  self,
18
18
  is_conversation: bool = True,
@@ -18,9 +18,9 @@ class LLMChat(Provider):
18
18
  "@cf/meta/llama-3.1-70b-instruct",
19
19
  "@cf/meta/llama-3.1-8b-instruct",
20
20
  "@cf/meta/llama-3.2-3b-instruct",
21
- "@cf/meta/llama-3.2-1b-instruct"
22
- "@cf/meta/llama-3.3-70b-instruct-fp8-fast"
23
- "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b"
21
+ "@cf/meta/llama-3.2-1b-instruct",
22
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
23
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
24
24
  ]
25
25
 
26
26
  def __init__(
@@ -184,8 +184,30 @@ class LLMChat(Provider):
184
184
  return response["text"]
185
185
 
186
186
  if __name__ == "__main__":
187
- from rich import print
188
- ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct')
189
- response = ai.chat("What's the meaning of life?", stream=True)
190
- for chunk in response:
191
- print(chunk, end="", flush=True)
187
+ print("-" * 80)
188
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
189
+ print("-" * 80)
190
+
191
+ # Test all available models
192
+ working = 0
193
+ total = len(LLMChat.AVAILABLE_MODELS)
194
+
195
+ for model in LLMChat.AVAILABLE_MODELS:
196
+ try:
197
+ test_ai = LLMChat(model=model, timeout=60)
198
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
199
+ response_text = ""
200
+ for chunk in response:
201
+ response_text += chunk
202
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
203
+
204
+ if response_text and len(response_text.strip()) > 0:
205
+ status = "✓"
206
+ # Truncate response if too long
207
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
208
+ else:
209
+ status = "✗"
210
+ display_text = "Empty or invalid response"
211
+ print(f"\r{model:<50} {status:<10} {display_text}")
212
+ except Exception as e:
213
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -66,6 +66,45 @@ MODEL_CONFIGS = {
66
66
  }
67
67
 
68
68
  class MultiChatAI(Provider):
69
+ """
70
+ A class to interact with the MultiChatAI API.
71
+ """
72
+ AVAILABLE_MODELS = [
73
+ # Llama Models
74
+ "llama-3.3-70b-versatile",
75
+ "llama-3.2-11b-vision-preview",
76
+ "deepseek-r1-distill-llama-70b",
77
+
78
+ # Cohere Models
79
+ # "command-r", >>>> NOT WORKING
80
+ # "command", >>>> NOT WORKING
81
+
82
+ # Google Models
83
+ # "gemini-1.5-flash-002", >>>> NOT WORKING
84
+ "gemma2-9b-it",
85
+ "gemini-2.0-flash",
86
+
87
+ # DeepInfra Models
88
+ "Sao10K/L3.1-70B-Euryale-v2.2",
89
+ "Gryphe/MythoMax-L2-13b",
90
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct",
91
+ "deepseek-ai/DeepSeek-V3",
92
+ "meta-llama/Meta-Llama-3.1-405B-Instruct",
93
+ "NousResearch/Hermes-3-Llama-3.1-405B",
94
+ # "gemma-2-27b-it", >>>> NOT WORKING
95
+
96
+ # Mistral Models
97
+ # "mistral-small-latest", >>>> NOT WORKING
98
+ # "codestral-latest", >>>> NOT WORKING
99
+ # "open-mistral-7b", >>>> NOT WORKING
100
+ # "open-mixtral-8x7b", >>>> NOT WORKING
101
+
102
+ # Alibaba Models
103
+ "Qwen/Qwen2.5-72B-Instruct",
104
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
105
+ "Qwen/QwQ-32B-Preview"
106
+ ]
107
+
69
108
  def __init__(
70
109
  self,
71
110
  is_conversation: bool = True,
@@ -85,6 +124,8 @@ class MultiChatAI(Provider):
85
124
  top_p: float = 1
86
125
  ):
87
126
  """Initializes the MultiChatAI API client."""
127
+ if model not in self.AVAILABLE_MODELS:
128
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
88
129
  self.session = requests.Session()
89
130
  self.is_conversation = is_conversation
90
131
  self.max_tokens_to_sample = max_tokens
@@ -258,12 +299,27 @@ class MultiChatAI(Provider):
258
299
  return str(response)
259
300
 
260
301
  if __name__ == "__main__":
261
- from rich import print
262
-
263
- # Example usage
264
- ai = MultiChatAI(model="Qwen/QwQ-32B-Preview")
265
- try:
266
- response = ai.chat("What is quantum computing?")
267
- print(response)
268
- except Exception as e:
269
- print(f"Error: {str(e)}")
302
+ print("-" * 80)
303
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
304
+ print("-" * 80)
305
+
306
+ # Test all available models
307
+ working = 0
308
+ total = len(MultiChatAI.AVAILABLE_MODELS)
309
+
310
+ for model in MultiChatAI.AVAILABLE_MODELS:
311
+ try:
312
+ test_ai = MultiChatAI(model=model, timeout=60)
313
+ response = test_ai.chat("Say 'Hello' in one word")
314
+ response_text = response
315
+
316
+ if response_text and len(response_text.strip()) > 0:
317
+ status = "✓"
318
+ # Truncate response if too long
319
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
320
+ else:
321
+ status = "✗"
322
+ display_text = "Empty or invalid response"
323
+ print(f"{model:<50} {status:<10} {display_text}")
324
+ except Exception as e:
325
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -9,6 +9,7 @@ from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
11
11
  from webscout.litagent import LitAgent
12
+
12
13
  class Talkai(Provider):
13
14
  """
14
15
  A class to interact with the Talkai.info API.
@@ -13,6 +13,8 @@ class TurboSeek(Provider):
13
13
  """
14
14
  This class provides methods for interacting with the TurboSeek API.
15
15
  """
16
+ AVAILABLE_MODELS = ["Llama 3.1 70B"]
17
+
16
18
  def __init__(
17
19
  self,
18
20
  is_conversation: bool = True,
@@ -24,6 +26,7 @@ class TurboSeek(Provider):
24
26
  proxies: dict = {},
25
27
  history_offset: int = 10250,
26
28
  act: str = None,
29
+ model: str = "Llama 3.1 70B"
27
30
  ):
28
31
  """Instantiates TurboSeek
29
32
 
@@ -15,6 +15,7 @@ class TutorAI(Provider):
15
15
  """
16
16
  A class to interact with the TutorAI.me API.
17
17
  """
18
+ AVAILABLE_MODELS = ["gpt-4o"]
18
19
 
19
20
  def __init__(
20
21
  self,
@@ -28,6 +29,7 @@ class TutorAI(Provider):
28
29
  history_offset: int = 10250,
29
30
  act: str = None,
30
31
  system_prompt: str = "You are a helpful AI assistant.",
32
+ model: str = "gpt-4o"
31
33
  ):
32
34
  """
33
35
  Initializes the TutorAI.me API with given parameters.