webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (159) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  5. webscout/Provider/AISEARCH/__init__.py +18 -11
  6. webscout/Provider/AISEARCH/scira_search.py +3 -1
  7. webscout/Provider/Aitopia.py +2 -3
  8. webscout/Provider/Andi.py +3 -3
  9. webscout/Provider/ChatGPTClone.py +1 -1
  10. webscout/Provider/ChatSandbox.py +1 -0
  11. webscout/Provider/Cloudflare.py +1 -1
  12. webscout/Provider/Cohere.py +1 -0
  13. webscout/Provider/Deepinfra.py +13 -10
  14. webscout/Provider/ExaAI.py +1 -1
  15. webscout/Provider/ExaChat.py +1 -80
  16. webscout/Provider/Flowith.py +6 -1
  17. webscout/Provider/Gemini.py +7 -5
  18. webscout/Provider/GeminiProxy.py +1 -0
  19. webscout/Provider/GithubChat.py +4 -1
  20. webscout/Provider/Groq.py +1 -1
  21. webscout/Provider/HeckAI.py +8 -4
  22. webscout/Provider/Jadve.py +23 -38
  23. webscout/Provider/K2Think.py +308 -0
  24. webscout/Provider/Koboldai.py +8 -186
  25. webscout/Provider/LambdaChat.py +2 -4
  26. webscout/Provider/Nemotron.py +3 -4
  27. webscout/Provider/Netwrck.py +6 -8
  28. webscout/Provider/OLLAMA.py +1 -0
  29. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  30. webscout/Provider/OPENAI/FalconH1.py +2 -7
  31. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  32. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  33. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  34. webscout/Provider/OPENAI/PI.py +5 -4
  35. webscout/Provider/OPENAI/Qwen3.py +2 -3
  36. webscout/Provider/OPENAI/README.md +2 -1
  37. webscout/Provider/OPENAI/TogetherAI.py +52 -57
  38. webscout/Provider/OPENAI/TwoAI.py +3 -4
  39. webscout/Provider/OPENAI/__init__.py +17 -56
  40. webscout/Provider/OPENAI/ai4chat.py +313 -303
  41. webscout/Provider/OPENAI/base.py +9 -29
  42. webscout/Provider/OPENAI/chatgpt.py +7 -2
  43. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  44. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  45. webscout/Provider/OPENAI/deepinfra.py +12 -6
  46. webscout/Provider/OPENAI/e2b.py +60 -8
  47. webscout/Provider/OPENAI/flowith.py +4 -3
  48. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  49. webscout/Provider/OPENAI/heckai.py +4 -1
  50. webscout/Provider/OPENAI/netwrck.py +9 -12
  51. webscout/Provider/OPENAI/refact.py +274 -0
  52. webscout/Provider/OPENAI/scirachat.py +6 -0
  53. webscout/Provider/OPENAI/textpollinations.py +3 -14
  54. webscout/Provider/OPENAI/toolbaz.py +14 -10
  55. webscout/Provider/OpenGPT.py +1 -1
  56. webscout/Provider/Openai.py +150 -402
  57. webscout/Provider/PI.py +1 -0
  58. webscout/Provider/Perplexitylabs.py +1 -2
  59. webscout/Provider/QwenLM.py +107 -89
  60. webscout/Provider/STT/__init__.py +17 -2
  61. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  62. webscout/Provider/StandardInput.py +1 -1
  63. webscout/Provider/TTI/__init__.py +18 -12
  64. webscout/Provider/TTI/bing.py +14 -2
  65. webscout/Provider/TTI/together.py +10 -9
  66. webscout/Provider/TTS/README.md +0 -1
  67. webscout/Provider/TTS/__init__.py +18 -11
  68. webscout/Provider/TTS/base.py +479 -159
  69. webscout/Provider/TTS/deepgram.py +409 -156
  70. webscout/Provider/TTS/elevenlabs.py +425 -111
  71. webscout/Provider/TTS/freetts.py +317 -140
  72. webscout/Provider/TTS/gesserit.py +192 -128
  73. webscout/Provider/TTS/murfai.py +248 -113
  74. webscout/Provider/TTS/openai_fm.py +347 -129
  75. webscout/Provider/TTS/speechma.py +620 -586
  76. webscout/Provider/TeachAnything.py +1 -0
  77. webscout/Provider/TextPollinationsAI.py +5 -15
  78. webscout/Provider/TogetherAI.py +136 -142
  79. webscout/Provider/TwoAI.py +53 -309
  80. webscout/Provider/TypliAI.py +2 -1
  81. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  82. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  83. webscout/Provider/Venice.py +2 -1
  84. webscout/Provider/VercelAI.py +1 -0
  85. webscout/Provider/WiseCat.py +2 -1
  86. webscout/Provider/WrDoChat.py +2 -1
  87. webscout/Provider/__init__.py +18 -174
  88. webscout/Provider/ai4chat.py +1 -1
  89. webscout/Provider/akashgpt.py +7 -10
  90. webscout/Provider/cerebras.py +194 -38
  91. webscout/Provider/chatglm.py +170 -83
  92. webscout/Provider/cleeai.py +1 -2
  93. webscout/Provider/deepseek_assistant.py +1 -1
  94. webscout/Provider/elmo.py +1 -1
  95. webscout/Provider/geminiapi.py +1 -1
  96. webscout/Provider/granite.py +1 -1
  97. webscout/Provider/hermes.py +1 -3
  98. webscout/Provider/julius.py +1 -0
  99. webscout/Provider/learnfastai.py +1 -1
  100. webscout/Provider/llama3mitril.py +1 -1
  101. webscout/Provider/llmchat.py +1 -1
  102. webscout/Provider/llmchatco.py +1 -1
  103. webscout/Provider/meta.py +3 -3
  104. webscout/Provider/oivscode.py +2 -2
  105. webscout/Provider/scira_chat.py +51 -124
  106. webscout/Provider/searchchat.py +1 -0
  107. webscout/Provider/sonus.py +1 -1
  108. webscout/Provider/toolbaz.py +15 -11
  109. webscout/Provider/turboseek.py +31 -22
  110. webscout/Provider/typefully.py +2 -1
  111. webscout/Provider/x0gpt.py +1 -0
  112. webscout/Provider/yep.py +2 -1
  113. webscout/conversation.py +22 -20
  114. webscout/sanitize.py +14 -10
  115. webscout/scout/README.md +20 -23
  116. webscout/scout/core/crawler.py +125 -38
  117. webscout/scout/core/scout.py +26 -5
  118. webscout/tempid.py +6 -0
  119. webscout/version.py +1 -1
  120. webscout/webscout_search.py +13 -6
  121. webscout/webscout_search_async.py +10 -8
  122. webscout/yep_search.py +13 -5
  123. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
  124. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
  125. webscout/Provider/AllenAI.py +0 -440
  126. webscout/Provider/Blackboxai.py +0 -793
  127. webscout/Provider/FreeGemini.py +0 -250
  128. webscout/Provider/Glider.py +0 -225
  129. webscout/Provider/Hunyuan.py +0 -283
  130. webscout/Provider/MCPCore.py +0 -322
  131. webscout/Provider/MiniMax.py +0 -207
  132. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  133. webscout/Provider/OPENAI/MiniMax.py +0 -298
  134. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  135. webscout/Provider/OPENAI/c4ai.py +0 -394
  136. webscout/Provider/OPENAI/copilot.py +0 -305
  137. webscout/Provider/OPENAI/glider.py +0 -330
  138. webscout/Provider/OPENAI/mcpcore.py +0 -431
  139. webscout/Provider/OPENAI/multichat.py +0 -378
  140. webscout/Provider/Reka.py +0 -214
  141. webscout/Provider/TTS/sthir.py +0 -94
  142. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  143. webscout/Provider/asksteve.py +0 -220
  144. webscout/Provider/copilot.py +0 -422
  145. webscout/Provider/freeaichat.py +0 -294
  146. webscout/Provider/koala.py +0 -182
  147. webscout/Provider/lmarena.py +0 -198
  148. webscout/Provider/monochat.py +0 -275
  149. webscout/Provider/multichat.py +0 -375
  150. webscout/Provider/scnet.py +0 -244
  151. webscout/Provider/talkai.py +0 -194
  152. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  153. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  154. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  155. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  156. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  157. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  158. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  159. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -16,52 +16,18 @@ class SciraAI(Provider):
16
16
  """
17
17
  A class to interact with the Scira AI chat API.
18
18
  """
19
-
19
+ required_auth = False
20
20
  # Model mapping: actual model names to Scira API format
21
21
  MODEL_MAPPING = {
22
22
  "grok-3-mini": "scira-default",
23
- "grok-3-mini-fast": "scira-x-fast-mini",
24
- "grok-3-fast": "scira-x-fast",
25
- "gpt-4.1-nano": "scira-nano",
26
- "grok-3": "scira-grok-3",
27
- "grok-4": "scira-grok-4",
28
- "grok-2-vision-1212": "scira-vision",
29
- "grok-2-latest": "scira-g2",
30
- "gpt-4o-mini": "scira-4o-mini",
31
- "o4-mini-2025-04-16": "scira-o4-mini",
32
- "o3": "scira-o3",
33
- "qwen/qwen3-32b": "scira-qwen-32b",
34
- "qwen3-30b-a3b": "scira-qwen-30b",
35
- "deepseek-v3-0324": "scira-deepseek-v3",
36
- "claude-3-5-haiku-20241022": "scira-haiku",
37
- "mistral-small-latest": "scira-mistral",
38
- "gemini-2.5-flash-lite-preview-06-17": "scira-google-lite",
39
- "gemini-2.5-flash": "scira-google",
40
- "gemini-2.5-pro": "scira-google-pro",
41
- "claude-sonnet-4-20250514": "scira-anthropic",
42
- "claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
43
- "claude-4-opus-20250514": "scira-opus",
44
- "claude-4-opus-20250514-pro": "scira-opus-pro",
45
- "meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
46
- "kimi-k2-instruct": "scira-kimi-k2",
47
- "scira-kimi-k2": "kimi-k2-instruct",
23
+ "llama-4-maverick": "scira-llama-4",
24
+ "qwen3-4b": "scira-qwen-4b",
25
+ "qwen3-32b": "scira-qwen-32b",
26
+ "qwen3-4b-thinking": "scira-qwen-4b-thinking",
48
27
  }
49
28
 
50
29
  # Reverse mapping: Scira format to actual model names
51
30
  SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
52
- # Add special cases for aliases and duplicate mappings
53
- SCIRA_TO_MODEL["scira-anthropic-thinking"] = "claude-sonnet-4-20250514"
54
- SCIRA_TO_MODEL["scira-opus-pro"] = "claude-4-opus-20250514"
55
- SCIRA_TO_MODEL["scira-x-fast"] = "grok-3-fast"
56
- SCIRA_TO_MODEL["scira-x-fast-mini"] = "grok-3-mini-fast"
57
- SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
58
- SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
59
- SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
60
- SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
61
- SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
62
- SCIRA_TO_MODEL["scira-kimi-k2"] = "kimi-k2-instruct"
63
- SCIRA_TO_MODEL["kimi-k2-instruct"] = "scira-kimi-k2"
64
- MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
65
31
  # Available models list (actual model names + scira aliases)
66
32
  AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
67
33
 
@@ -139,19 +105,21 @@ class SciraAI(Provider):
139
105
 
140
106
  # Use the fingerprint for headers
141
107
  self.headers = {
142
- "Accept": self.fingerprint["accept"],
108
+ "Accept": "*/*",
143
109
  "Accept-Encoding": "gzip, deflate, br, zstd",
144
- "Accept-Language": self.fingerprint["accept_language"],
110
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
145
111
  "Content-Type": "application/json",
146
112
  "Origin": "https://scira.ai",
147
113
  "Referer": "https://scira.ai/",
148
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
114
+ "Sec-CH-UA": '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
149
115
  "Sec-CH-UA-Mobile": "?0",
150
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
151
- "User-Agent": self.fingerprint["user_agent"],
116
+ "Sec-CH-UA-Platform": '"Windows"',
117
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0",
152
118
  "Sec-Fetch-Dest": "empty",
153
119
  "Sec-Fetch-Mode": "cors",
154
- "Sec-Fetch-Site": "same-origin"
120
+ "Sec-Fetch-Site": "same-origin",
121
+ "DNT": "1",
122
+ "Priority": "u=1, i"
155
123
  }
156
124
 
157
125
  self.session = Session() # Use curl_cffi Session
@@ -196,13 +164,13 @@ class SciraAI(Provider):
196
164
  browser = browser or self.fingerprint.get("browser_type", "chrome")
197
165
  self.fingerprint = self.agent.generate_fingerprint(browser)
198
166
 
199
- # Update headers with new fingerprint
167
+ # Update headers with new fingerprint (keeping the updated values)
200
168
  self.headers.update({
201
- "Accept": self.fingerprint["accept"],
202
- "Accept-Language": self.fingerprint["accept_language"],
203
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
204
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
205
- "User-Agent": self.fingerprint["user_agent"],
169
+ "Accept": "*/*",
170
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
171
+ "Sec-CH-UA": '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
172
+ "Sec-CH-UA-Platform": '"Windows"',
173
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0",
206
174
  })
207
175
 
208
176
  # Update session headers
@@ -213,19 +181,18 @@ class SciraAI(Provider):
213
181
 
214
182
  @staticmethod
215
183
  def _scira_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[dict]:
216
- """Extracts g and 0 chunks from the Scira stream format.
217
- Returns a dict: {"g": [g1, g2, ...], "0": zero} if present.
184
+ """Extracts JSON chunks from the Scira stream format.
185
+ Returns a dict with the parsed JSON data.
218
186
  """
219
187
  if isinstance(chunk, str):
220
- g_matches = re.findall(r'g:"(.*?)"', chunk)
221
- zero_match = re.search(r'0:"(.*?)"(?=,|$)', chunk)
222
- result = {}
223
- if g_matches:
224
- result["g"] = [g.encode().decode('unicode_escape').replace('\\', '\\').replace('\\"', '"') for g in g_matches]
225
- if zero_match:
226
- result["0"] = zero_match.group(1).encode().decode('unicode_escape').replace('\\', '\\').replace('\\"', '"')
227
- if result:
228
- return result
188
+ if chunk.startswith("data: "):
189
+ json_str = chunk[6:].strip() # Remove "data: " prefix
190
+ if json_str == "[DONE]":
191
+ return {"type": "done"}
192
+ try:
193
+ return json.loads(json_str)
194
+ except json.JSONDecodeError:
195
+ return None
229
196
  return None
230
197
 
231
198
  def ask(
@@ -246,8 +213,7 @@ class SciraAI(Provider):
246
213
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
247
214
 
248
215
  messages = [
249
- {"role": "system", "content": self.system_prompt},
250
- {"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}]}
216
+ {"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}], "id": str(uuid.uuid4())[:16]}
251
217
  ]
252
218
 
253
219
  # Prepare the request payload
@@ -257,7 +223,9 @@ class SciraAI(Provider):
257
223
  "model": self.model,
258
224
  "group": self.search_mode,
259
225
  "user_id": self.user_id,
260
- "timezone": "Asia/Calcutta"
226
+ "timezone": "Asia/Calcutta",
227
+ "isCustomInstructionsEnabled": False,
228
+ "searchProvider": "parallel"
261
229
  }
262
230
 
263
231
  def for_stream():
@@ -306,41 +274,37 @@ class SciraAI(Provider):
306
274
  if content is None:
307
275
  continue
308
276
  if isinstance(content, dict):
309
- # Handle g chunks
310
- g_chunks = content.get("g", [])
311
- zero_chunk = content.get("0")
312
- if g_chunks:
277
+ event_type = content.get("type")
278
+ if event_type == "reasoning-start":
313
279
  if not in_think:
314
280
  if raw:
315
281
  yield "<think>\n\n"
316
282
  else:
317
283
  yield "<think>\n\n"
318
284
  in_think = True
319
- for g in g_chunks:
285
+ elif event_type == "reasoning-delta":
286
+ if in_think:
287
+ delta = content.get("delta", "")
320
288
  if raw:
321
- yield g
289
+ yield delta
322
290
  else:
323
- yield dict(text=g)
324
- if zero_chunk is not None:
291
+ yield dict(text=delta)
292
+ elif event_type == "reasoning-end":
325
293
  if in_think:
326
294
  if raw:
327
295
  yield "</think>\n\n"
328
296
  else:
329
297
  yield "</think>\n\n"
330
298
  in_think = False
299
+ elif event_type == "text-delta":
300
+ delta = content.get("delta", "")
331
301
  if raw:
332
- yield zero_chunk
302
+ yield delta
333
303
  else:
334
- streaming_response += zero_chunk
335
- yield dict(text=zero_chunk)
336
- else:
337
- # fallback for old string/list logic
338
- if raw:
339
- yield content
340
- else:
341
- if content and isinstance(content, str):
342
- streaming_response += content
343
- yield dict(text=content)
304
+ streaming_response += delta
305
+ yield dict(text=delta)
306
+ elif event_type == "done":
307
+ break # End of stream
344
308
  if not raw:
345
309
  self.last_response = {"text": streaming_response}
346
310
  self.conversation.update_chat_history(prompt, streaming_response)
@@ -415,43 +379,6 @@ class SciraAI(Provider):
415
379
  return response.get("text", "")
416
380
 
417
381
  if __name__ == "__main__":
418
- print("-" * 80)
419
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
420
- print("-" * 80)
421
-
422
- # Test all available models
423
- working = 0
424
- total = len(SciraAI.AVAILABLE_MODELS)
425
-
426
- for model in SciraAI.AVAILABLE_MODELS:
427
- try:
428
- test_ai = SciraAI(model=model, timeout=60)
429
- # Test stream first
430
- response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
431
- response_text = ""
432
- print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
433
- for chunk in response_stream:
434
- response_text += chunk
435
- # Optional: print chunks as they arrive for visual feedback
436
- # print(chunk, end="", flush=True)
437
-
438
- if response_text and len(response_text.strip()) > 0:
439
- status = "✓"
440
- # Clean and truncate response
441
- clean_text = response_text.strip() # Already decoded in get_message
442
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
443
- else:
444
- status = "✗ (Stream)"
445
- display_text = "Empty or invalid stream response"
446
- print(f"\r{model:<50} {status:<10} {display_text}")
447
-
448
- # Optional: Add non-stream test if needed, but stream test covers basic functionality
449
- # print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
450
- # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
451
- # if not response_non_stream or len(response_non_stream.strip()) == 0:
452
- # print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
453
-
454
-
455
- except Exception as e:
456
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
457
-
382
+ ai = SciraAI(model="grok-3-mini", is_conversation=True, system_prompt="You are a helpful assistant.")
383
+ for resp in ai.chat("Explain the theory of relativity in simple terms.", stream=True, raw=False):
384
+ print(resp, end="", flush=True)
@@ -15,6 +15,7 @@ class SearchChatAI(Provider):
15
15
  """
16
16
  A class to interact with the SearchChatAI API.
17
17
  """
18
+ required_auth = False
18
19
  AVAILABLE_MODELS = ["gpt-4o-mini-2024-07-18"]
19
20
  def __init__(
20
21
  self,
@@ -12,7 +12,7 @@ class SonusAI(Provider):
12
12
  """
13
13
  A class to interact with the Sonus AI chat API.
14
14
  """
15
-
15
+ required_auth = False
16
16
  AVAILABLE_MODELS = [
17
17
  "pro",
18
18
  "air",
@@ -21,29 +21,33 @@ class Toolbaz(Provider):
21
21
  A class to interact with the Toolbaz API. Supports streaming responses.
22
22
  """
23
23
 
24
+ required_auth = False
24
25
  AVAILABLE_MODELS = [
25
26
  "gemini-2.5-flash",
27
+ "gemini-2.5-pro",
26
28
  "gemini-2.0-flash-thinking",
27
- "sonar",
28
29
  "gemini-2.0-flash",
29
- "gemini-1.5-flash",
30
+
31
+ "claude-sonnet-4",
32
+
33
+ "gpt-5",
34
+ "gpt-oss-120b",
30
35
  "o3-mini",
31
36
  "gpt-4o-latest",
32
- "gpt-4o",
37
+
38
+ "toolbaz_v4",
39
+ "toolbaz_v3.5_pro",
40
+
33
41
  "deepseek-r1",
42
+ "deepseek-v3.1",
43
+ "deepseek-v3",
44
+
34
45
  "Llama-4-Maverick",
35
- "Llama-4-Scout",
36
46
  "Llama-3.3-70B",
37
- "Qwen2.5-72B",
38
- "grok-2-1212",
39
- "grok-3-beta",
40
- "toolbaz_v3",
41
- "toolbaz_v3.5_pro",
42
- "toolbaz_v4",
47
+
43
48
  "mixtral_8x22b",
44
49
  "L3-70B-Euryale-v2.1",
45
50
  "midnight-rose",
46
- "unity",
47
51
  "unfiltered_x"
48
52
  ]
49
53
 
@@ -1,19 +1,21 @@
1
+
2
+ import re
3
+ from typing import Optional, Union, Any, AsyncGenerator, Dict
1
4
  from curl_cffi.requests import Session
2
5
  from curl_cffi import CurlError
3
- import json
4
6
 
5
7
  from webscout.AIutel import Optimizers
6
8
  from webscout.AIutel import Conversation
7
9
  from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
8
10
  from webscout.AIbase import Provider
9
11
  from webscout import exceptions
10
- from typing import Optional, Union, Any, AsyncGenerator, Dict
11
12
  from webscout.litagent import LitAgent
12
13
 
13
14
  class TurboSeek(Provider):
14
15
  """
15
16
  This class provides methods for interacting with the TurboSeek API.
16
17
  """
18
+ required_auth = False
17
19
  AVAILABLE_MODELS = ["Llama 3.1 70B"]
18
20
 
19
21
  def __init__(
@@ -58,13 +60,14 @@ class TurboSeek(Provider):
58
60
  "dnt": "1",
59
61
  "origin": "https://www.turboseek.io",
60
62
  "priority": "u=1, i",
61
- "referer": "https://www.turboseek.io/?ref=taaft&utm_source=taaft&utm_medium=referral",
62
- "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
63
+ "referer": "https://www.turboseek.io/",
64
+ "sec-ch-ua": '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
63
65
  "sec-ch-ua-mobile": "?0",
64
66
  "sec-ch-ua-platform": '"Windows"',
65
67
  "sec-fetch-dest": "empty",
66
68
  "sec-fetch-mode": "cors",
67
69
  "sec-fetch-site": "same-origin",
70
+ "sec-gpc": "1",
68
71
  "user-agent": LitAgent().random(),
69
72
  }
70
73
 
@@ -88,11 +91,27 @@ class TurboSeek(Provider):
88
91
  )
89
92
  self.conversation.history_offset = history_offset
90
93
 
94
+ @staticmethod
95
+ def _strip_html_tags(text: str) -> str:
96
+ """Remove HTML tags from text."""
97
+ import re
98
+ # Remove HTML tags and entities
99
+ text = re.sub(r'<[^>]*>', '', text)
100
+ text = re.sub(r'&[^;]+;', ' ', text)
101
+ text = re.sub(r'\s+', ' ', text).strip()
102
+ return text
103
+
91
104
  @staticmethod
92
105
  def _turboseek_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
93
106
  """Extracts content from TurboSeek stream JSON objects."""
94
107
  if isinstance(chunk, dict) and "text" in chunk:
95
- return chunk.get("text") # json.loads already handles unicode escapes
108
+ text = chunk.get("text")
109
+ if text:
110
+ # Clean HTML tags from the response
111
+ return TurboSeek._strip_html_tags(str(text))
112
+ elif isinstance(chunk, str):
113
+ # Handle raw string content
114
+ return TurboSeek._strip_html_tags(chunk)
96
115
  return None
97
116
 
98
117
  def ask(
@@ -155,7 +174,9 @@ class TurboSeek(Provider):
155
174
  to_json=True,
156
175
  content_extractor=self._turboseek_extractor,
157
176
  yield_raw_on_error=False,
158
- raw=raw
177
+ raw=raw,
178
+ extract_regexes=[r'<[^>]*>([^<]*)<[^>]*>', r'([^<]+)'],
179
+ skip_regexes=[r'<script[^>]*>.*?</script>', r'<style[^>]*>.*?</style>']
159
180
  )
160
181
  for content_chunk in processed_stream:
161
182
  if isinstance(content_chunk, bytes):
@@ -247,19 +268,7 @@ class TurboSeek(Provider):
247
268
  if __name__ == '__main__':
248
269
  # Ensure curl_cffi is installed
249
270
  from rich import print
250
- try: # Add try-except block for testing
251
- ai = TurboSeek(timeout=60)
252
- print("[bold blue]Testing Stream:[/bold blue]")
253
- response_stream = ai.chat("yooooooooooo", stream=True, raw=False)
254
- for chunk in response_stream:
255
- print(chunk, end="", flush=True)
256
- # Optional: Test non-stream
257
- # print("[bold blue]Testing Non-Stream:[/bold blue]")
258
- # response_non_stream = ai.chat("What is the capital of France?", stream=False)
259
- # print(response_non_stream)
260
- # print("[bold green]Non-Stream Test Complete.[/bold green]")
261
-
262
- except exceptions.FailedToGenerateResponseError as e:
263
- print(f"\n[bold red]API Error:[/bold red] {e}")
264
- except Exception as e:
265
- print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
271
+ ai = TurboSeek(timeout=60)
272
+ response_stream = ai.chat("How can I get a 6 pack in 3 months?", stream=True, raw=False)
273
+ for chunk in response_stream:
274
+ print(chunk, end="", flush=True)
@@ -12,6 +12,7 @@ from curl_cffi.requests import Session
12
12
  from curl_cffi import CurlError
13
13
 
14
14
  class TypefullyAI(Provider):
15
+ required_auth = False
15
16
  AVAILABLE_MODELS = ["openai:gpt-4o-mini", "openai:gpt-4o", "anthropic:claude-3-5-haiku-20241022", "groq:llama-3.3-70b-versatile"]
16
17
 
17
18
  def __init__(
@@ -204,4 +205,4 @@ if __name__ == "__main__":
204
205
  display_text = "Empty or invalid stream response"
205
206
  print(f"\r{model:<50} {status:<10} {display_text}")
206
207
  except Exception as e:
207
- print(f"\r{model:<50} {'FAIL':<10} {str(e)}")
208
+ print(f"\r{model:<50} {'FAIL':<10} {str(e)}")
@@ -27,6 +27,7 @@ class X0GPT(Provider):
27
27
  >>> print(response)
28
28
  'The weather today is sunny with a high of 75°F.'
29
29
  """
30
+ required_auth = False
30
31
  AVAILABLE_MODELS = ["UNKNOWN"]
31
32
 
32
33
  def __init__(
webscout/Provider/yep.py CHANGED
@@ -21,6 +21,7 @@ class YEPCHAT(Provider):
21
21
  AVAILABLE_MODELS (list): List of available models for the provider.
22
22
  """
23
23
 
24
+ required_auth = False
24
25
  AVAILABLE_MODELS = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
25
26
 
26
27
  def __init__(
@@ -366,7 +367,7 @@ if __name__ == "__main__":
366
367
  # except Exception as e:
367
368
  # print(f"{model:<50} {'✗':<10} {str(e)}")
368
369
  ai = YEPCHAT(model="DeepSeek-R1-Distill-Qwen-32B", timeout=60)
369
- response = ai.chat("Say 'Hello' in one word", raw=True, stream=True)
370
+ response = ai.chat("Say 'Hello' in one word", raw=False, stream=True)
370
371
  for chunk in response:
371
372
 
372
373
  print(chunk, end='', flush=True)
webscout/conversation.py CHANGED
@@ -165,29 +165,19 @@ class Conversation:
165
165
  ))
166
166
 
167
167
  def _compress_history(self) -> None:
168
- """Compress history when it exceeds threshold."""
168
+ """Delete old history when it exceeds threshold."""
169
169
  if len(self.messages) > self.compression_threshold:
170
- # Keep recent messages and summarize older ones
171
- keep_recent = 100 # Adjust based on needs
172
- self.messages = (
173
- [self._summarize_messages(self.messages[:-keep_recent])] +
174
- self.messages[-keep_recent:]
175
- )
176
-
177
- def _summarize_messages(self, messages: List[Message]) -> Message:
178
- """Create a summary message from older messages."""
179
- return Message(
180
- role="system",
181
- content="[History Summary] Previous conversation summarized for context",
182
- metadata={"summarized_count": len(messages)}
183
- )
170
+ # Remove oldest messages, keep only the most recent ones
171
+ self.messages = self.messages[-self.compression_threshold:]
172
+
173
+ # _summarize_messages removed
184
174
 
185
175
  def gen_complete_prompt(self, prompt: str, intro: Optional[str] = None) -> str:
186
176
  """Generate complete prompt with enhanced context management."""
187
177
  if not self.status:
188
178
  return prompt
189
179
 
190
- intro = intro or self.intro
180
+ intro = intro or self.intro or ""
191
181
 
192
182
  # Add tool information if available
193
183
  tools_description = self.get_tools_description()
@@ -260,6 +250,7 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
260
250
 
261
251
  def _trim_chat_history(self, chat_history: str, intro: str) -> str:
262
252
  """Trim chat history with improved token management."""
253
+ intro = intro or ""
263
254
  total_length = len(intro) + len(chat_history)
264
255
 
265
256
  if total_length > self.history_offset:
@@ -273,20 +264,31 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
273
264
  return chat_history
274
265
 
275
266
  def add_message(self, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
276
- """Add a message with enhanced validation and metadata support."""
267
+ """Add a message with enhanced validation and metadata support. Deletes oldest messages if total word count exceeds max_tokens_to_sample."""
277
268
  try:
278
269
  role = role.lower() # Normalize role to lowercase
279
270
  if not self.validate_message(role, content):
280
271
  raise MessageValidationError("Invalid message role or content")
281
272
 
273
+ # Calculate total word count in history
274
+ def total_word_count(messages):
275
+ return sum(len(msg.content.split()) for msg in messages)
276
+
277
+ # Remove oldest messages until total word count is below limit
278
+ temp_messages = self.messages.copy()
279
+ while temp_messages and (total_word_count(temp_messages) + len(content.split()) > self.max_tokens_to_sample):
280
+ temp_messages.pop(0)
281
+
282
+ self.messages = temp_messages
283
+
282
284
  message = Message(role=role, content=content, metadata=metadata or {})
283
285
  self.messages.append(message)
284
-
286
+
285
287
  if self.file and self.update_file:
286
288
  self._append_to_file(message)
287
-
289
+
288
290
  self._compress_history()
289
-
291
+
290
292
  except Exception as e:
291
293
  raise ConversationError(f"Failed to add message: {str(e)}") from e
292
294
 
webscout/sanitize.py CHANGED
@@ -143,7 +143,7 @@ def _process_chunk(
143
143
  if to_json:
144
144
  try:
145
145
  # Only strip before JSON parsing if both boundaries are incorrect
146
- if sanitized_chunk[0] not in '{[' and sanitized_chunk[-1] not in '}]':
146
+ if len(sanitized_chunk) >= 2 and sanitized_chunk[0] not in '{[' and sanitized_chunk[-1] not in '}]':
147
147
  sanitized_chunk = sanitized_chunk.strip()
148
148
  return json.loads(sanitized_chunk)
149
149
  except (json.JSONDecodeError, Exception) as e:
@@ -646,13 +646,14 @@ async def _sanitize_stream_async(
646
646
  f"Stream must yield strings or bytes, not {type(first_item).__name__}"
647
647
  )
648
648
 
649
- async for line in line_iterator:
650
- if not line:
651
- continue
652
- buffer += line
653
- while True:
654
- if not found_start and start_marker:
655
- idx = buffer.find(start_marker)
649
+ try:
650
+ async for line in line_iterator:
651
+ if not line:
652
+ continue
653
+ buffer += line
654
+ while True:
655
+ if not found_start and start_marker:
656
+ idx = buffer.find(start_marker)
656
657
  if idx != -1:
657
658
  found_start = True
658
659
  buffer = buffer[idx + len(start_marker) :]
@@ -735,6 +736,9 @@ async def _sanitize_stream_async(
735
736
  break
736
737
  else:
737
738
  break
739
+ except Exception as e:
740
+ import sys
741
+ print(f"Async stream processing error: {str(e)}", file=sys.stderr)
738
742
 
739
743
 
740
744
  def sanitize_stream(
@@ -937,7 +941,7 @@ def sanitize_stream(
937
941
  payload, intro_value, to_json, skip_markers, strip_chars,
938
942
  start_marker, end_marker, content_extractor, yield_raw_on_error,
939
943
  encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
940
- skip_regexes, extract_regexes,
944
+ skip_regexes, extract_regexes, raw,
941
945
  )
942
946
 
943
947
  # Handle async iterables
@@ -966,6 +970,7 @@ def sanitize_stream(
966
970
 
967
971
  # --- Decorator version of sanitize_stream ---
968
972
  import functools
973
+ import asyncio
969
974
  from typing import overload
970
975
 
971
976
  def _sanitize_stream_decorator(
@@ -1057,7 +1062,6 @@ sanitize_stream_decorator = _sanitize_stream_decorator
1057
1062
  lit_streamer = _sanitize_stream_decorator
1058
1063
 
1059
1064
  # Allow @sanitize_stream and @lit_streamer as decorators
1060
- import asyncio
1061
1065
  sanitize_stream.__decorator__ = _sanitize_stream_decorator
1062
1066
  LITSTREAM.__decorator__ = _sanitize_stream_decorator
1063
1067
  lit_streamer.__decorator__ = _sanitize_stream_decorator