webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (117) hide show
  1. webscout/AIutel.py +367 -41
  2. webscout/Bard.py +2 -22
  3. webscout/Bing_search.py +1 -2
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/scira_search.py +24 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/Deepinfra.py +75 -57
  8. webscout/Provider/ExaChat.py +93 -63
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/HeckAI.py +85 -80
  14. webscout/Provider/Jadve.py +56 -50
  15. webscout/Provider/LambdaChat.py +39 -31
  16. webscout/Provider/MiniMax.py +207 -0
  17. webscout/Provider/Nemotron.py +41 -13
  18. webscout/Provider/Netwrck.py +39 -59
  19. webscout/Provider/OLLAMA.py +8 -9
  20. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  21. webscout/Provider/OPENAI/MiniMax.py +298 -0
  22. webscout/Provider/OPENAI/README.md +31 -30
  23. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  24. webscout/Provider/OPENAI/__init__.py +4 -2
  25. webscout/Provider/OPENAI/autoproxy.py +753 -18
  26. webscout/Provider/OPENAI/base.py +7 -76
  27. webscout/Provider/OPENAI/copilot.py +73 -26
  28. webscout/Provider/OPENAI/deepinfra.py +96 -132
  29. webscout/Provider/OPENAI/exachat.py +9 -5
  30. webscout/Provider/OPENAI/flowith.py +179 -166
  31. webscout/Provider/OPENAI/friendli.py +233 -0
  32. webscout/Provider/OPENAI/monochat.py +329 -0
  33. webscout/Provider/OPENAI/netwrck.py +4 -7
  34. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  35. webscout/Provider/OPENAI/qodo.py +630 -0
  36. webscout/Provider/OPENAI/scirachat.py +82 -49
  37. webscout/Provider/OPENAI/textpollinations.py +13 -12
  38. webscout/Provider/OPENAI/toolbaz.py +1 -0
  39. webscout/Provider/OPENAI/typegpt.py +4 -4
  40. webscout/Provider/OPENAI/utils.py +19 -42
  41. webscout/Provider/OPENAI/x0gpt.py +14 -2
  42. webscout/Provider/OpenGPT.py +54 -32
  43. webscout/Provider/PI.py +58 -84
  44. webscout/Provider/Qodo.py +454 -0
  45. webscout/Provider/StandardInput.py +32 -13
  46. webscout/Provider/TTI/README.md +9 -9
  47. webscout/Provider/TTI/__init__.py +2 -1
  48. webscout/Provider/TTI/aiarta.py +92 -78
  49. webscout/Provider/TTI/infip.py +212 -0
  50. webscout/Provider/TTI/monochat.py +220 -0
  51. webscout/Provider/TeachAnything.py +11 -3
  52. webscout/Provider/TextPollinationsAI.py +91 -82
  53. webscout/Provider/TogetherAI.py +32 -48
  54. webscout/Provider/Venice.py +37 -46
  55. webscout/Provider/VercelAI.py +27 -24
  56. webscout/Provider/WiseCat.py +35 -35
  57. webscout/Provider/WrDoChat.py +22 -26
  58. webscout/Provider/WritingMate.py +26 -22
  59. webscout/Provider/__init__.py +6 -6
  60. webscout/Provider/copilot.py +58 -61
  61. webscout/Provider/freeaichat.py +64 -55
  62. webscout/Provider/granite.py +48 -57
  63. webscout/Provider/koala.py +51 -39
  64. webscout/Provider/learnfastai.py +49 -64
  65. webscout/Provider/llmchat.py +79 -93
  66. webscout/Provider/llmchatco.py +63 -78
  67. webscout/Provider/monochat.py +275 -0
  68. webscout/Provider/multichat.py +51 -40
  69. webscout/Provider/oivscode.py +1 -1
  70. webscout/Provider/scira_chat.py +257 -104
  71. webscout/Provider/scnet.py +13 -13
  72. webscout/Provider/searchchat.py +13 -13
  73. webscout/Provider/sonus.py +12 -11
  74. webscout/Provider/toolbaz.py +25 -8
  75. webscout/Provider/turboseek.py +41 -42
  76. webscout/Provider/typefully.py +27 -12
  77. webscout/Provider/typegpt.py +43 -48
  78. webscout/Provider/uncovr.py +55 -90
  79. webscout/Provider/x0gpt.py +325 -299
  80. webscout/Provider/yep.py +79 -96
  81. webscout/__init__.py +7 -2
  82. webscout/auth/__init__.py +12 -1
  83. webscout/auth/providers.py +27 -5
  84. webscout/auth/routes.py +146 -105
  85. webscout/auth/server.py +367 -312
  86. webscout/client.py +121 -116
  87. webscout/litagent/Readme.md +68 -55
  88. webscout/litagent/agent.py +99 -9
  89. webscout/version.py +1 -1
  90. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
  91. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
  92. webscout/Provider/AI21.py +0 -177
  93. webscout/Provider/HuggingFaceChat.py +0 -469
  94. webscout/Provider/OPENAI/freeaichat.py +0 -363
  95. webscout/Provider/TTI/fastflux.py +0 -233
  96. webscout/Provider/Writecream.py +0 -246
  97. webscout/auth/static/favicon.svg +0 -11
  98. webscout/auth/swagger_ui.py +0 -203
  99. webscout/auth/templates/components/authentication.html +0 -237
  100. webscout/auth/templates/components/base.html +0 -103
  101. webscout/auth/templates/components/endpoints.html +0 -750
  102. webscout/auth/templates/components/examples.html +0 -491
  103. webscout/auth/templates/components/footer.html +0 -75
  104. webscout/auth/templates/components/header.html +0 -27
  105. webscout/auth/templates/components/models.html +0 -286
  106. webscout/auth/templates/components/navigation.html +0 -70
  107. webscout/auth/templates/static/api.js +0 -455
  108. webscout/auth/templates/static/icons.js +0 -168
  109. webscout/auth/templates/static/main.js +0 -784
  110. webscout/auth/templates/static/particles.js +0 -201
  111. webscout/auth/templates/static/styles.css +0 -3353
  112. webscout/auth/templates/static/ui.js +0 -374
  113. webscout/auth/templates/swagger_ui.html +0 -170
  114. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  115. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  116. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  117. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
webscout/Provider/PI.py CHANGED
@@ -172,9 +172,10 @@ class PiAI(Provider):
172
172
  voice: bool = None,
173
173
  voice_name: str = None,
174
174
  output_file: str = None
175
- ) -> dict:
175
+ ) -> Union[dict, str, Any]:
176
176
  """
177
177
  Interact with Pi.ai by sending a prompt and receiving a response.
178
+ Now supports raw streaming and non-streaming output, matching the pattern in other providers.
178
179
 
179
180
  Args:
180
181
  prompt (str): The prompt to send
@@ -209,103 +210,82 @@ class PiAI(Provider):
209
210
  }
210
211
 
211
212
  def process_stream():
212
- try: # Add outer try block for error handling
213
- # Try primary URL first
213
+ try:
214
214
  current_url = self.url
215
215
  response = self.session.post(
216
216
  current_url,
217
- # headers are set on the session
218
- # cookies are handled by the session
219
217
  json=data,
220
218
  stream=True,
221
219
  timeout=self.timeout,
222
- # proxies are set on the session
223
- impersonate="chrome110" # Use a common impersonation profile
220
+ impersonate="chrome110"
224
221
  )
225
-
226
- # If primary URL fails, try fallback URL
227
222
  if not response.ok and current_url == self.primary_url:
228
223
  current_url = self.fallback_url
229
224
  response = self.session.post(
230
225
  current_url,
231
- # headers are set on the session
232
- # cookies are handled by the session
233
226
  json=data,
234
227
  stream=True,
235
228
  timeout=self.timeout,
236
- # proxies are set on the session
237
- impersonate="chrome110" # Use a common impersonation profile
229
+ impersonate="chrome110"
238
230
  )
239
-
240
- response.raise_for_status() # Check for HTTP errors after potential fallback
241
-
242
- # --- Process response content ---
243
- # Note: curl_cffi's response.content might behave differently for streams.
244
- # It's often better to iterate directly.
245
- # output_str = response.content.decode('utf-8') # Avoid reading full content at once for streams
231
+ response.raise_for_status()
246
232
 
247
233
  sids = []
248
234
  streaming_text = ""
249
- full_raw_data_for_sids = "" # Accumulate raw data to find SIDs later
250
-
251
- # Iterate over bytes and decode manually
235
+ full_raw_data_for_sids = ""
236
+
237
+ processed_stream = sanitize_stream(
238
+ data=response.iter_lines(),
239
+ intro_value="data: ",
240
+ to_json=True,
241
+ content_extractor=self._pi_extractor,
242
+ raw=raw
243
+ )
244
+ for content in processed_stream:
245
+ if raw:
246
+ yield content
247
+ else:
248
+ if content and isinstance(content, str):
249
+ streaming_text += content
250
+ yield {"text": streaming_text}
251
+ # SID extraction for voice
252
252
  for line_bytes in response.iter_lines():
253
253
  if line_bytes:
254
254
  line = line_bytes.decode('utf-8')
255
- full_raw_data_for_sids += line + "\n" # Accumulate for SID extraction
256
-
257
- if line.startswith("data: "):
258
- json_line_str = line[6:] # Get the JSON part as string
259
- try:
260
- # Process this single JSON line string with sanitize_stream
261
- processed_gen = sanitize_stream(
262
- data=json_line_str,
263
- to_json=True,
264
- content_extractor=self._pi_extractor
265
- )
266
- chunk_text = next(processed_gen, None) # Get the single extracted text item
267
- if chunk_text and isinstance(chunk_text, str):
268
- streaming_text += chunk_text
269
- yield {"text": streaming_text} # Always yield dict with aggregated text
270
- except (StopIteration, json.JSONDecodeError, UnicodeDecodeError):
271
- continue # Skip if sanitize_stream fails or yields nothing
272
- # Extract SIDs after processing the stream
255
+ full_raw_data_for_sids += line + "\n"
273
256
  sids = re.findall(r'"sid":"(.*?)"', full_raw_data_for_sids)
274
257
  second_sid = sids[1] if len(sids) >= 2 else None
275
-
276
258
  if voice and voice_name and second_sid:
277
259
  threading.Thread(
278
260
  target=self.download_audio_threaded,
279
261
  args=(voice_name, second_sid, output_file)
280
262
  ).start()
281
-
282
- # Update history and last response after stream finishes
283
- self.last_response = dict(text=streaming_text)
284
- self.conversation.update_chat_history(
285
- prompt, streaming_text
286
- )
287
-
288
- except CurlError as e: # Catch CurlError
263
+ if not raw:
264
+ self.last_response = dict(text=streaming_text)
265
+ self.conversation.update_chat_history(prompt, streaming_text)
266
+ except CurlError as e:
289
267
  raise exceptions.FailedToGenerateResponseError(f"API request failed (CurlError): {e}") from e
290
- except Exception as e: # Catch other potential exceptions (like HTTPError)
268
+ except Exception as e:
291
269
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
292
270
  raise exceptions.FailedToGenerateResponseError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
293
271
 
294
-
295
272
  if stream:
296
273
  return process_stream()
297
274
  else:
298
- # For non-stream, collect all responses and return the final one
299
- final_text = ""
300
- # process_stream always yields dicts now
301
- for res in process_stream():
302
- if isinstance(res, dict) and "text" in res:
303
- final_text = res["text"] # Keep updating with the latest aggregated text
304
-
305
- # last_response and history are updated within process_stream
306
- # Return the final aggregated response dict or raw text
307
- return final_text if raw else self.last_response
308
-
275
+ full_response = ""
276
+ for chunk in process_stream():
277
+ if raw:
278
+ if isinstance(chunk, str):
279
+ full_response += chunk
280
+ else:
281
+ if isinstance(chunk, dict) and "text" in chunk:
282
+ full_response = chunk["text"]
283
+ if not raw:
284
+ self.last_response = {"text": full_response}
285
+ self.conversation.update_chat_history(prompt, full_response)
286
+ return self.last_response
287
+ else:
288
+ return full_response
309
289
 
310
290
  def chat(
311
291
  self,
@@ -315,8 +295,9 @@ class PiAI(Provider):
315
295
  conversationally: bool = False,
316
296
  voice: bool = None,
317
297
  voice_name: str = None,
318
- output_file: str = None
319
- ) -> str:
298
+ output_file: str = None,
299
+ raw: bool = False, # Added raw parameter
300
+ ) -> Union[str, Any]:
320
301
  """
321
302
  Generates a response based on the provided prompt.
322
303
 
@@ -339,35 +320,37 @@ class PiAI(Provider):
339
320
 
340
321
  if stream:
341
322
  def stream_generator():
342
- # ask() yields dicts or raw JSON objects when streaming
343
323
  gen = self.ask(
344
324
  prompt,
345
325
  stream=True,
346
- raw=False, # Ensure ask yields dicts for get_message
326
+ raw=raw,
347
327
  optimizer=optimizer,
348
328
  conversationally=conversationally,
349
329
  voice=voice,
350
330
  voice_name=voice_name,
351
331
  output_file=output_file
352
332
  )
353
- for response_dict in gen:
354
- # get_message expects dict
355
- yield self.get_message(response_dict)
333
+ for response in gen:
334
+ if raw:
335
+ yield response
336
+ else:
337
+ yield self.get_message(response)
356
338
  return stream_generator()
357
339
  else:
358
- # ask() returns dict or raw text when not streaming
359
340
  response_data = self.ask(
360
341
  prompt,
361
342
  stream=False,
362
- raw=False, # Ensure ask returns dict for get_message
343
+ raw=raw,
363
344
  optimizer=optimizer,
364
345
  conversationally=conversationally,
365
346
  voice=voice,
366
347
  voice_name=voice_name,
367
348
  output_file=output_file
368
349
  )
369
- # get_message expects dict
370
- return self.get_message(response_data)
350
+ if raw:
351
+ return response_data
352
+ else:
353
+ return self.get_message(response_data)
371
354
 
372
355
  def get_message(self, response: dict) -> str:
373
356
  """Retrieves message only from response"""
@@ -411,19 +394,10 @@ if __name__ == '__main__':
411
394
  try: # Add try-except block for testing
412
395
  ai = PiAI(timeout=60)
413
396
  print("[bold blue]Testing Chat (Stream):[/bold blue]")
414
- response = ai.chat(input(">>> "), stream=True)
397
+ response = ai.chat("hi", stream=True, raw=False)
415
398
  full_response = ""
416
399
  for chunk in response:
417
400
  print(chunk, end="", flush=True)
418
- full_response += chunk
419
- print("\n[bold green]Stream Test Complete.[/bold green]")
420
-
421
- # Optional: Test non-stream
422
- # print("\n[bold blue]Testing Chat (Non-Stream):[/bold blue]")
423
- # response_non_stream = ai.chat("Hello again", stream=False)
424
- # print(response_non_stream)
425
- # print("[bold green]Non-Stream Test Complete.[/bold green]")
426
-
427
401
  except exceptions.FailedToGenerateResponseError as e:
428
402
  print(f"\n[bold red]API Error:[/bold red] {e}")
429
403
  except Exception as e: