webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (117) hide show
  1. webscout/AIutel.py +367 -41
  2. webscout/Bard.py +2 -22
  3. webscout/Bing_search.py +1 -2
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/scira_search.py +24 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/Deepinfra.py +75 -57
  8. webscout/Provider/ExaChat.py +93 -63
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/HeckAI.py +85 -80
  14. webscout/Provider/Jadve.py +56 -50
  15. webscout/Provider/LambdaChat.py +39 -31
  16. webscout/Provider/MiniMax.py +207 -0
  17. webscout/Provider/Nemotron.py +41 -13
  18. webscout/Provider/Netwrck.py +39 -59
  19. webscout/Provider/OLLAMA.py +8 -9
  20. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  21. webscout/Provider/OPENAI/MiniMax.py +298 -0
  22. webscout/Provider/OPENAI/README.md +31 -30
  23. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  24. webscout/Provider/OPENAI/__init__.py +4 -2
  25. webscout/Provider/OPENAI/autoproxy.py +753 -18
  26. webscout/Provider/OPENAI/base.py +7 -76
  27. webscout/Provider/OPENAI/copilot.py +73 -26
  28. webscout/Provider/OPENAI/deepinfra.py +96 -132
  29. webscout/Provider/OPENAI/exachat.py +9 -5
  30. webscout/Provider/OPENAI/flowith.py +179 -166
  31. webscout/Provider/OPENAI/friendli.py +233 -0
  32. webscout/Provider/OPENAI/monochat.py +329 -0
  33. webscout/Provider/OPENAI/netwrck.py +4 -7
  34. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  35. webscout/Provider/OPENAI/qodo.py +630 -0
  36. webscout/Provider/OPENAI/scirachat.py +82 -49
  37. webscout/Provider/OPENAI/textpollinations.py +13 -12
  38. webscout/Provider/OPENAI/toolbaz.py +1 -0
  39. webscout/Provider/OPENAI/typegpt.py +4 -4
  40. webscout/Provider/OPENAI/utils.py +19 -42
  41. webscout/Provider/OPENAI/x0gpt.py +14 -2
  42. webscout/Provider/OpenGPT.py +54 -32
  43. webscout/Provider/PI.py +58 -84
  44. webscout/Provider/Qodo.py +454 -0
  45. webscout/Provider/StandardInput.py +32 -13
  46. webscout/Provider/TTI/README.md +9 -9
  47. webscout/Provider/TTI/__init__.py +2 -1
  48. webscout/Provider/TTI/aiarta.py +92 -78
  49. webscout/Provider/TTI/infip.py +212 -0
  50. webscout/Provider/TTI/monochat.py +220 -0
  51. webscout/Provider/TeachAnything.py +11 -3
  52. webscout/Provider/TextPollinationsAI.py +91 -82
  53. webscout/Provider/TogetherAI.py +32 -48
  54. webscout/Provider/Venice.py +37 -46
  55. webscout/Provider/VercelAI.py +27 -24
  56. webscout/Provider/WiseCat.py +35 -35
  57. webscout/Provider/WrDoChat.py +22 -26
  58. webscout/Provider/WritingMate.py +26 -22
  59. webscout/Provider/__init__.py +6 -6
  60. webscout/Provider/copilot.py +58 -61
  61. webscout/Provider/freeaichat.py +64 -55
  62. webscout/Provider/granite.py +48 -57
  63. webscout/Provider/koala.py +51 -39
  64. webscout/Provider/learnfastai.py +49 -64
  65. webscout/Provider/llmchat.py +79 -93
  66. webscout/Provider/llmchatco.py +63 -78
  67. webscout/Provider/monochat.py +275 -0
  68. webscout/Provider/multichat.py +51 -40
  69. webscout/Provider/oivscode.py +1 -1
  70. webscout/Provider/scira_chat.py +257 -104
  71. webscout/Provider/scnet.py +13 -13
  72. webscout/Provider/searchchat.py +13 -13
  73. webscout/Provider/sonus.py +12 -11
  74. webscout/Provider/toolbaz.py +25 -8
  75. webscout/Provider/turboseek.py +41 -42
  76. webscout/Provider/typefully.py +27 -12
  77. webscout/Provider/typegpt.py +43 -48
  78. webscout/Provider/uncovr.py +55 -90
  79. webscout/Provider/x0gpt.py +325 -299
  80. webscout/Provider/yep.py +79 -96
  81. webscout/__init__.py +7 -2
  82. webscout/auth/__init__.py +12 -1
  83. webscout/auth/providers.py +27 -5
  84. webscout/auth/routes.py +146 -105
  85. webscout/auth/server.py +367 -312
  86. webscout/client.py +121 -116
  87. webscout/litagent/Readme.md +68 -55
  88. webscout/litagent/agent.py +99 -9
  89. webscout/version.py +1 -1
  90. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
  91. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
  92. webscout/Provider/AI21.py +0 -177
  93. webscout/Provider/HuggingFaceChat.py +0 -469
  94. webscout/Provider/OPENAI/freeaichat.py +0 -363
  95. webscout/Provider/TTI/fastflux.py +0 -233
  96. webscout/Provider/Writecream.py +0 -246
  97. webscout/auth/static/favicon.svg +0 -11
  98. webscout/auth/swagger_ui.py +0 -203
  99. webscout/auth/templates/components/authentication.html +0 -237
  100. webscout/auth/templates/components/base.html +0 -103
  101. webscout/auth/templates/components/endpoints.html +0 -750
  102. webscout/auth/templates/components/examples.html +0 -491
  103. webscout/auth/templates/components/footer.html +0 -75
  104. webscout/auth/templates/components/header.html +0 -27
  105. webscout/auth/templates/components/models.html +0 -286
  106. webscout/auth/templates/components/navigation.html +0 -70
  107. webscout/auth/templates/static/api.js +0 -455
  108. webscout/auth/templates/static/icons.js +0 -168
  109. webscout/auth/templates/static/main.js +0 -784
  110. webscout/auth/templates/static/particles.js +0 -201
  111. webscout/auth/templates/static/styles.css +0 -3353
  112. webscout/auth/templates/static/ui.js +0 -374
  113. webscout/auth/templates/swagger_ui.html +0 -170
  114. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  115. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  116. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  117. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -17,19 +17,74 @@ class SciraAI(Provider):
17
17
  A class to interact with the Scira AI chat API.
18
18
  """
19
19
 
20
- AVAILABLE_MODELS = {
21
- "scira-default": "Grok3-mini", # thinking model
22
- "scira-grok-3": "Grok3",
23
- "scira-anthropic": "Claude 4 Sonnet",
24
- "scira-anthropic-thinking": "Claude 4 Sonnet Thinking", # thinking model
25
- "scira-vision" : "Grok2-Vision", # vision model
26
- "scira-4o": "GPT4o",
27
- "scira-qwq": "QWQ-32B",
28
- "scira-o4-mini": "o4-mini",
29
- "scira-google": "gemini 2.5 flash Thinking", # thinking model
30
- "scira-google-pro": "gemini 2.5 pro",
31
- "scira-llama-4": "llama 4 Maverick",
20
+ # Model mapping: actual model names to Scira API format
21
+ MODEL_MAPPING = {
22
+ "grok-3-mini": "scira-default",
23
+ "grok-3-mini-fast": "scira-x-fast-mini",
24
+ "grok-3-fast": "scira-x-fast",
25
+ "gpt-4.1-nano": "scira-nano",
26
+ "grok-3": "scira-grok-3",
27
+ "grok-4": "scira-grok-4",
28
+ "grok-2-vision-1212": "scira-vision",
29
+ "grok-2-latest": "scira-g2",
30
+ "gpt-4o-mini": "scira-4o-mini",
31
+ "o4-mini-2025-04-16": "scira-o4-mini",
32
+ "o3": "scira-o3",
33
+ "qwen/qwen3-32b": "scira-qwen-32b",
34
+ "qwen3-30b-a3b": "scira-qwen-30b",
35
+ "deepseek-v3-0324": "scira-deepseek-v3",
36
+ "claude-3-5-haiku-20241022": "scira-haiku",
37
+ "mistral-small-latest": "scira-mistral",
38
+ "gemini-2.5-flash-lite-preview-06-17": "scira-google-lite",
39
+ "gemini-2.5-flash": "scira-google",
40
+ "gemini-2.5-pro": "scira-google-pro",
41
+ "claude-sonnet-4-20250514": "scira-anthropic",
42
+ "claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
43
+ "claude-4-opus-20250514": "scira-opus",
44
+ "claude-4-opus-20250514-pro": "scira-opus-pro",
45
+ "meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
32
46
  }
47
+
48
+ # Reverse mapping: Scira format to actual model names
49
+ SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
50
+ # Add special cases for aliases and duplicate mappings
51
+ SCIRA_TO_MODEL["scira-anthropic-thinking"] = "claude-sonnet-4-20250514"
52
+ SCIRA_TO_MODEL["scira-opus-pro"] = "claude-4-opus-20250514"
53
+ SCIRA_TO_MODEL["scira-x-fast"] = "grok-3-fast"
54
+ SCIRA_TO_MODEL["scira-x-fast-mini"] = "grok-3-mini-fast"
55
+ SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
56
+ SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
57
+ SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
58
+ SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
59
+ SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
60
+ MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
61
+ # Available models list (actual model names + scira aliases)
62
+ AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
63
+
64
+ @classmethod
65
+ def _resolve_model(cls, model: str) -> str:
66
+ """
67
+ Resolve a model name to its Scira API format.
68
+
69
+ Args:
70
+ model: Either an actual model name or a Scira alias
71
+
72
+ Returns:
73
+ The Scira API format model name
74
+
75
+ Raises:
76
+ ValueError: If the model is not supported
77
+ """
78
+ # If it's already a Scira format, return as-is
79
+ if model in cls.SCIRA_TO_MODEL:
80
+ return model
81
+
82
+ # If it's an actual model name, convert to Scira format
83
+ if model in cls.MODEL_MAPPING:
84
+ return cls.MODEL_MAPPING[model]
85
+
86
+ # Model not found
87
+ raise ValueError(f"Invalid model: {model}. Choose from: {cls.AVAILABLE_MODELS}")
33
88
 
34
89
  def __init__(
35
90
  self,
@@ -42,7 +97,7 @@ class SciraAI(Provider):
42
97
  proxies: dict = {},
43
98
  history_offset: int = 10250,
44
99
  act: str = None,
45
- model: str = "scira-default",
100
+ model: str = "grok-3-mini",
46
101
  chat_id: str = None,
47
102
  user_id: str = None,
48
103
  browser: str = "chrome",
@@ -67,9 +122,9 @@ class SciraAI(Provider):
67
122
  system_prompt (str): System prompt for the AI.
68
123
 
69
124
  """
70
- if model not in self.AVAILABLE_MODELS:
71
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
72
-
125
+ # Resolve the model to Scira format
126
+ self.model = self._resolve_model(model)
127
+
73
128
  self.url = "https://scira.ai/api/search"
74
129
 
75
130
  # Initialize LitAgent for user agent generation
@@ -103,7 +158,6 @@ class SciraAI(Provider):
103
158
  self.max_tokens_to_sample = max_tokens
104
159
  self.timeout = timeout
105
160
  self.last_response = {}
106
- self.model = model
107
161
  self.chat_id = chat_id or str(uuid.uuid4())
108
162
  self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
109
163
 
@@ -154,14 +208,20 @@ class SciraAI(Provider):
154
208
  return self.fingerprint
155
209
 
156
210
  @staticmethod
157
- def _scira_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
158
- """Extracts content from the Scira stream format '0:"..."'."""
211
+ def _scira_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[dict]:
212
+ """Extracts g and 0 chunks from the Scira stream format.
213
+ Returns a dict: {"g": [g1, g2, ...], "0": zero} if present.
214
+ """
159
215
  if isinstance(chunk, str):
160
- match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
161
- if match:
162
- # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
163
- content = match.group(1).encode().decode('unicode_escape')
164
- return content.replace('\\\\', '\\').replace('\\"', '"')
216
+ g_matches = re.findall(r'g:"(.*?)"', chunk)
217
+ zero_match = re.search(r'0:"(.*?)"(?=,|$)', chunk)
218
+ result = {}
219
+ if g_matches:
220
+ result["g"] = [g.encode().decode('unicode_escape').replace('\\', '\\').replace('\\"', '"') for g in g_matches]
221
+ if zero_match:
222
+ result["0"] = zero_match.group(1).encode().decode('unicode_escape').replace('\\', '\\').replace('\\"', '"')
223
+ if result:
224
+ return result
165
225
  return None
166
226
 
167
227
  def ask(
@@ -169,7 +229,9 @@ class SciraAI(Provider):
169
229
  prompt: str,
170
230
  optimizer: str = None,
171
231
  conversationally: bool = False,
172
- ) -> Dict[str, Any]: # Note: Stream parameter removed as API doesn't seem to support it
232
+ stream: bool = True, # Default to True, always stream
233
+ raw: bool = False, # Added raw parameter
234
+ ) -> Union[Dict[str, Any], Any]:
173
235
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
174
236
  if optimizer:
175
237
  if optimizer in self.__available_optimizers:
@@ -194,107 +256,198 @@ class SciraAI(Provider):
194
256
  "timezone": "Asia/Calcutta"
195
257
  }
196
258
 
197
- try:
198
- # Use curl_cffi post with impersonate
199
- response = self.session.post(
200
- self.url,
201
- json=payload,
202
- timeout=self.timeout,
203
- impersonate="chrome120" # Add impersonate
204
- )
205
- if response.status_code != 200:
206
- # Try to get response content for better error messages
207
- try: # Use try-except for reading response content
208
- error_content = response.text
209
- except:
210
- error_content = "<could not read response content>"
211
-
212
- if response.status_code in [403, 429]:
213
- print(f"Received status code {response.status_code}, refreshing identity...")
214
- self.refresh_identity()
215
- response = self.session.post(
216
- self.url, json=payload, timeout=self.timeout,
217
- impersonate="chrome120" # Add impersonate to retry
218
- )
219
- if not response.ok:
259
+ def for_stream():
260
+ try:
261
+ response = self.session.post(
262
+ self.url,
263
+ json=payload,
264
+ timeout=self.timeout,
265
+ impersonate="chrome120",
266
+ stream=True
267
+ )
268
+ if response.status_code != 200:
269
+ try:
270
+ error_content = response.text
271
+ except:
272
+ error_content = "<could not read response content>"
273
+
274
+ if response.status_code in [403, 429]:
275
+ print(f"Received status code {response.status_code}, refreshing identity...")
276
+ self.refresh_identity()
277
+ response = self.session.post(
278
+ self.url, json=payload, timeout=self.timeout,
279
+ impersonate="chrome120", stream=True
280
+ )
281
+ if not response.ok:
282
+ raise exceptions.FailedToGenerateResponseError(
283
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
284
+ )
285
+ print("Identity refreshed successfully.")
286
+ else:
220
287
  raise exceptions.FailedToGenerateResponseError(
221
- f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
288
+ f"Request failed with status code {response.status_code}. Response: {error_content}"
222
289
  )
223
- print("Identity refreshed successfully.")
224
- else:
225
- raise exceptions.FailedToGenerateResponseError(
226
- f"Request failed with status code {response.status_code}. Response: {error_content}"
227
- )
228
-
229
- response_text_raw = response.text # Get raw response text
230
-
231
- # Process the text using sanitize_stream line by line
232
- processed_stream = sanitize_stream(
233
- data=response_text_raw.splitlines(), # Split into lines
234
- intro_value=None, # No simple prefix
235
- to_json=False, # Content is not JSON
236
- content_extractor=self._scira_extractor # Use the specific extractor
237
- )
238
290
 
239
- # Aggregate the results from the generator
291
+ processed_stream = sanitize_stream(
292
+ data=response.iter_content(chunk_size=None),
293
+ intro_value=None,
294
+ to_json=False,
295
+ content_extractor=self._scira_extractor,
296
+ raw=raw
297
+ )
298
+
299
+ streaming_response = ""
300
+ in_think = False
301
+ for content in processed_stream:
302
+ if content is None:
303
+ continue
304
+ if isinstance(content, dict):
305
+ # Handle g chunks
306
+ g_chunks = content.get("g", [])
307
+ zero_chunk = content.get("0")
308
+ if g_chunks:
309
+ if not in_think:
310
+ if raw:
311
+ yield "<think>\n\n"
312
+ else:
313
+ yield "<think>\n\n"
314
+ in_think = True
315
+ for g in g_chunks:
316
+ if raw:
317
+ yield g
318
+ else:
319
+ yield dict(text=g)
320
+ if zero_chunk is not None:
321
+ if in_think:
322
+ if raw:
323
+ yield "</think>\n\n"
324
+ else:
325
+ yield "</think>\n\n"
326
+ in_think = False
327
+ if raw:
328
+ yield zero_chunk
329
+ else:
330
+ streaming_response += zero_chunk
331
+ yield dict(text=zero_chunk)
332
+ else:
333
+ # fallback for old string/list logic
334
+ if raw:
335
+ yield content
336
+ else:
337
+ if content and isinstance(content, str):
338
+ streaming_response += content
339
+ yield dict(text=content)
340
+ if not raw:
341
+ self.last_response = {"text": streaming_response}
342
+ self.conversation.update_chat_history(prompt, streaming_response)
343
+ except CurlError as e:
344
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
345
+ except Exception as e:
346
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
347
+
348
+ def for_non_stream():
349
+ # Always use streaming logic, but aggregate the result
240
350
  full_response = ""
241
- for content in processed_stream:
242
- if content and isinstance(content, str):
243
- full_response += content
244
-
245
- self.last_response = {"text": full_response}
246
- self.conversation.update_chat_history(prompt, full_response)
247
- return {"text": full_response}
248
- except CurlError as e: # Catch CurlError
249
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
250
- except Exception as e:
251
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
351
+ for chunk in for_stream():
352
+ if raw:
353
+ if isinstance(chunk, str):
354
+ full_response += chunk
355
+ else:
356
+ if isinstance(chunk, dict) and "text" in chunk:
357
+ full_response += chunk["text"]
358
+ if not raw:
359
+ self.last_response = {"text": full_response}
360
+ self.conversation.update_chat_history(prompt, full_response)
361
+ return {"text": full_response}
362
+ else:
363
+ return full_response
364
+
365
+ return for_stream() if stream else for_non_stream()
252
366
 
253
367
  def chat(
254
368
  self,
255
369
  prompt: str,
256
370
  optimizer: str = None,
257
371
  conversationally: bool = False,
258
- ) -> str:
259
- return self.get_message(
260
- self.ask(
261
- prompt, optimizer=optimizer, conversationally=conversationally
372
+ stream: bool = True, # Default to True, always stream
373
+ raw: bool = False, # Added raw parameter
374
+ ) -> Any:
375
+ def for_stream():
376
+ for response in self.ask(
377
+ prompt, optimizer=optimizer, conversationally=conversationally, stream=True, raw=raw
378
+ ):
379
+ if raw:
380
+ yield response
381
+ else:
382
+ if isinstance(response, dict):
383
+ yield self.get_message(response)
384
+ else:
385
+ # For <think> and </think> tags (strings), yield as is
386
+ yield response
387
+ def for_non_stream():
388
+ result = self.ask(
389
+ prompt, optimizer=optimizer, conversationally=conversationally, stream=False, raw=raw
262
390
  )
263
- )
391
+ if raw:
392
+ return result
393
+ else:
394
+ if isinstance(result, dict):
395
+ return self.get_message(result)
396
+ else:
397
+ return result
398
+ return for_stream() if stream else for_non_stream()
264
399
 
265
400
  def get_message(self, response: dict) -> str:
401
+ """
402
+ Retrieves message only from response
403
+
404
+ Args:
405
+ response (dict): Response generated by `self.ask`
406
+
407
+ Returns:
408
+ str: Message extracted
409
+ """
266
410
  assert isinstance(response, dict), "Response should be of dict data-type only"
267
- # Extractor handles formatting
268
- return response.get("text", "").replace('\\n', '\n').replace('\\n\\n', '\n\n')
411
+ return response.get("text", "")
269
412
 
270
413
  if __name__ == "__main__":
271
- print("-" * 100)
414
+ print("-" * 80)
272
415
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
273
- print("-" * 100)
274
-
275
- test_prompt = "Say 'Hello' in one word"
276
-
277
- # Test each model
416
+ print("-" * 80)
417
+
418
+ # Test all available models
419
+ working = 0
420
+ total = len(SciraAI.AVAILABLE_MODELS)
421
+
278
422
  for model in SciraAI.AVAILABLE_MODELS:
279
- print(f"\rTesting {model}...", end="")
280
-
281
423
  try:
282
- test_ai = SciraAI(model=model, timeout=120) # Increased timeout
283
- response = test_ai.chat(test_prompt)
284
-
285
- if response and len(response.strip()) > 0:
424
+ test_ai = SciraAI(model=model, timeout=60)
425
+ # Test stream first
426
+ response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
427
+ response_text = ""
428
+ print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
429
+ for chunk in response_stream:
430
+ response_text += chunk
431
+ # Optional: print chunks as they arrive for visual feedback
432
+ # print(chunk, end="", flush=True)
433
+
434
+ if response_text and len(response_text.strip()) > 0:
286
435
  status = "✓"
287
436
  # Clean and truncate response
288
- clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
437
+ clean_text = response_text.strip() # Already decoded in get_message
289
438
  display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
290
439
  else:
291
- status = "✗"
292
- display_text = "Empty or invalid response"
293
-
440
+ status = "✗ (Stream)"
441
+ display_text = "Empty or invalid stream response"
294
442
  print(f"\r{model:<50} {status:<10} {display_text}")
443
+
444
+ # Optional: Add non-stream test if needed, but stream test covers basic functionality
445
+ # print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
446
+ # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
447
+ # if not response_non_stream or len(response_non_stream.strip()) == 0:
448
+ # print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
449
+
450
+
295
451
  except Exception as e:
296
- error_msg = str(e)
297
- # Truncate very long error messages
298
- if len(error_msg) > 100:
299
- error_msg = error_msg[:97] + "..."
300
- print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")
452
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
453
+
@@ -185,24 +185,24 @@ class SCNet(Provider):
185
185
  stream: bool = False,
186
186
  optimizer: Optional[str] = None,
187
187
  conversationally: bool = False,
188
+ raw: bool = False, # Added raw parameter
188
189
  ) -> Union[str, Generator[str, None, None]]:
189
190
  def for_stream_chat():
190
- # ask() yields dicts or strings when streaming
191
- gen = self.ask(
192
- prompt, stream=True, raw=False, # Ensure ask yields dicts
193
- optimizer=optimizer, conversationally=conversationally
194
- )
195
- for response_dict in gen:
196
- yield self.get_message(response_dict) # get_message expects dict
197
-
191
+ for response in self.ask(
192
+ prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
193
+ ):
194
+ if raw:
195
+ yield response
196
+ else:
197
+ yield self.get_message(response)
198
198
  def for_non_stream_chat():
199
- # ask() returns dict or str when not streaming
200
199
  response_data = self.ask(
201
- prompt, stream=False, raw=False, # Ensure ask returns dict
202
- optimizer=optimizer, conversationally=conversationally
200
+ prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
203
201
  )
204
- return self.get_message(response_data) # get_message expects dict
205
-
202
+ if raw:
203
+ return response_data if isinstance(response_data, str) else self.get_message(response_data)
204
+ else:
205
+ return self.get_message(response_data)
206
206
  return for_stream_chat() if stream else for_non_stream_chat()
207
207
 
208
208
  def get_message(self, response: dict) -> str:
@@ -232,6 +232,7 @@ class SearchChatAI(Provider):
232
232
  stream: bool = False,
233
233
  optimizer: str = None,
234
234
  conversationally: bool = False,
235
+ raw: bool = False, # Added raw parameter
235
236
  ) -> Union[str, Generator[str, None, None]]:
236
237
  """
237
238
  Chat with the API.
@@ -246,22 +247,21 @@ class SearchChatAI(Provider):
246
247
  Either a string response or a generator for streaming
247
248
  """
248
249
  def for_stream_chat():
249
- # ask() yields dicts or strings when streaming
250
- gen = self.ask(
251
- prompt, stream=True, raw=False, # Ensure ask yields dicts
252
- optimizer=optimizer, conversationally=conversationally
253
- )
254
- for response_dict in gen:
255
- yield self.get_message(response_dict) # get_message expects dict
256
-
250
+ for response in self.ask(
251
+ prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
252
+ ):
253
+ if raw:
254
+ yield response
255
+ else:
256
+ yield self.get_message(response)
257
257
  def for_non_stream_chat():
258
- # ask() returns dict or str when not streaming
259
258
  response_data = self.ask(
260
- prompt, stream=False, raw=False, # Ensure ask returns dict
261
- optimizer=optimizer, conversationally=conversationally
259
+ prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
262
260
  )
263
- return self.get_message(response_data) # get_message expects dict
264
-
261
+ if raw:
262
+ return response_data if isinstance(response_data, str) else self.get_message(response_data)
263
+ else:
264
+ return self.get_message(response_data)
265
265
  return for_stream_chat() if stream else for_non_stream_chat()
266
266
 
267
267
  def get_message(self, response: dict) -> str:
@@ -208,23 +208,24 @@ class SonusAI(Provider):
208
208
  optimizer: str = None,
209
209
  conversationally: bool = False,
210
210
  reasoning: bool = False,
211
+ raw: bool = False, # Added raw parameter
211
212
  ) -> Union[str, Generator[str, None, None]]:
212
213
  def for_stream_chat():
213
- # ask() yields dicts when raw=False
214
- for response_dict in self.ask(
215
- prompt, stream=True, raw=False, # Ensure ask yields dicts
216
- optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
214
+ for response in self.ask(
215
+ prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
217
216
  ):
218
- yield self.get_message(response_dict)
219
-
217
+ if raw:
218
+ yield response
219
+ else:
220
+ yield self.get_message(response)
220
221
  def for_non_stream_chat():
221
- # ask() returns dict or str when raw=False/True
222
222
  response_data = self.ask(
223
- prompt, stream=False, raw=False, # Ensure ask returns dict
224
- optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
223
+ prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
225
224
  )
226
- return self.get_message(response_data) # get_message expects dict
227
-
225
+ if raw:
226
+ return response_data if isinstance(response_data, str) else self.get_message(response_data)
227
+ else:
228
+ return self.get_message(response_data)
228
229
  return for_stream_chat() if stream else for_non_stream_chat()
229
230
 
230
231
  def get_message(self, response: dict) -> str:
@@ -24,6 +24,7 @@ class Toolbaz(Provider):
24
24
  AVAILABLE_MODELS = [
25
25
  "gemini-2.5-flash",
26
26
  "gemini-2.0-flash-thinking",
27
+ "sonar",
27
28
  "gemini-2.0-flash",
28
29
  "gemini-1.5-flash",
29
30
  "o3-mini",
@@ -223,14 +224,22 @@ class Toolbaz(Provider):
223
224
  intro_value=None, # No simple prefix
224
225
  to_json=False, # Content is text
225
226
  content_extractor=self._toolbaz_extractor, # Use the tag remover
226
- yield_raw_on_error=True # Yield even if extractor somehow fails (though unlikely for regex)
227
+ yield_raw_on_error=True, # Yield even if extractor somehow fails (though unlikely for regex)
228
+ raw=raw
227
229
  )
228
230
 
229
231
  for content_chunk in processed_stream:
230
232
  # content_chunk is the string with tags removed
231
- if content_chunk and isinstance(content_chunk, str):
232
- streaming_text += content_chunk
233
- yield {"text": content_chunk} if not raw else content_chunk
233
+ if isinstance(content_chunk, bytes):
234
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
235
+ if content_chunk is None:
236
+ continue
237
+ if raw:
238
+ yield content_chunk
239
+ else:
240
+ if content_chunk and isinstance(content_chunk, str):
241
+ streaming_text += content_chunk
242
+ yield {"text": content_chunk}
234
243
 
235
244
  self.last_response = {"text": streaming_text}
236
245
  self.conversation.update_chat_history(prompt, streaming_text)
@@ -274,28 +283,36 @@ class Toolbaz(Provider):
274
283
  stream: bool = False,
275
284
  optimizer: Optional[str] = None,
276
285
  conversationally: bool = False,
286
+ raw: bool = False, # Added raw parameter
277
287
  ) -> Union[str, Generator[str, None, None]]:
278
288
  """Generates a response from the Toolbaz API."""
279
289
  def for_stream_chat():
280
290
  # ask() yields dicts when raw=False
281
- for response_dict in self.ask(
291
+ for response in self.ask(
282
292
  prompt,
283
293
  stream=True,
284
- raw=False, # Ensure ask yields dicts
294
+ raw=raw,
285
295
  optimizer=optimizer,
286
296
  conversationally=conversationally
287
297
  ):
288
- yield self.get_message(response_dict)
298
+ if raw:
299
+ yield response
300
+ else:
301
+ yield self.get_message(response)
289
302
 
290
303
  def for_non_stream_chat():
291
304
  # ask() returns a dict when stream=False
292
305
  response_dict = self.ask(
293
306
  prompt,
294
307
  stream=False,
308
+ raw=raw,
295
309
  optimizer=optimizer,
296
310
  conversationally=conversationally,
297
311
  )
298
- return self.get_message(response_dict)
312
+ if raw:
313
+ return response_dict
314
+ else:
315
+ return self.get_message(response_dict)
299
316
 
300
317
  return for_stream_chat() if stream else for_non_stream_chat()
301
318