webscout 4.6__py3-none-any.whl → 4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (44) hide show
  1. webscout/Agents/functioncall.py +97 -37
  2. webscout/Bard.py +365 -0
  3. webscout/Local/_version.py +1 -1
  4. webscout/Provider/Andi.py +7 -1
  5. webscout/Provider/BasedGPT.py +11 -5
  6. webscout/Provider/Berlin4h.py +11 -5
  7. webscout/Provider/Blackboxai.py +10 -4
  8. webscout/Provider/Cohere.py +11 -5
  9. webscout/Provider/DARKAI.py +25 -7
  10. webscout/Provider/Deepinfra.py +2 -1
  11. webscout/Provider/Deepseek.py +25 -9
  12. webscout/Provider/DiscordRocks.py +389 -0
  13. webscout/Provider/{ChatGPTUK.py → Farfalle.py} +80 -67
  14. webscout/Provider/Gemini.py +1 -1
  15. webscout/Provider/Groq.py +244 -110
  16. webscout/Provider/Llama.py +13 -5
  17. webscout/Provider/Llama3.py +15 -2
  18. webscout/Provider/OLLAMA.py +8 -7
  19. webscout/Provider/Perplexity.py +422 -52
  20. webscout/Provider/Phind.py +6 -5
  21. webscout/Provider/PizzaGPT.py +7 -1
  22. webscout/Provider/__init__.py +15 -31
  23. webscout/Provider/ai4chat.py +193 -0
  24. webscout/Provider/koala.py +11 -5
  25. webscout/Provider/{VTLchat.py → liaobots.py} +120 -104
  26. webscout/Provider/meta.py +779 -0
  27. webscout/exceptions.py +6 -0
  28. webscout/version.py +1 -1
  29. webscout/webai.py +2 -64
  30. webscout/webscout_search.py +1 -1
  31. {webscout-4.6.dist-info → webscout-4.8.dist-info}/METADATA +254 -297
  32. {webscout-4.6.dist-info → webscout-4.8.dist-info}/RECORD +36 -40
  33. webscout/Provider/FreeGemini.py +0 -169
  34. webscout/Provider/Geminiflash.py +0 -152
  35. webscout/Provider/Geminipro.py +0 -152
  36. webscout/Provider/Leo.py +0 -469
  37. webscout/Provider/OpenGPT.py +0 -867
  38. webscout/Provider/Xjai.py +0 -230
  39. webscout/Provider/Yepchat.py +0 -478
  40. webscout/Provider/Youchat.py +0 -225
  41. {webscout-4.6.dist-info → webscout-4.8.dist-info}/LICENSE.md +0 -0
  42. {webscout-4.6.dist-info → webscout-4.8.dist-info}/WHEEL +0 -0
  43. {webscout-4.6.dist-info → webscout-4.8.dist-info}/entry_points.txt +0 -0
  44. {webscout-4.6.dist-info → webscout-4.8.dist-info}/top_level.txt +0 -0
webscout/exceptions.py CHANGED
@@ -14,5 +14,11 @@ class FailedToGenerateResponseError(Exception):
14
14
  """Provider failed to fetch response"""
15
15
  class AllProvidersFailure(Exception):
16
16
  """None of the providers generated response successfully"""
17
+ pass
17
18
 
19
+ class FacebookInvalidCredentialsException(Exception):
20
+ pass
21
+
22
+
23
+ class FacebookRegionBlocked(Exception):
18
24
  pass
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "4.6"
1
+ __version__ = "4.8"
2
2
  __prog__ = "webscout"
webscout/webai.py CHANGED
@@ -432,26 +432,6 @@ class Main(cmd.Cmd):
432
432
  intro=intro,
433
433
  act=awesome_prompt,
434
434
  )
435
- elif provider == "leo":
436
- from webscout import LEO
437
-
438
- self.bot = LEO(
439
- is_conversation=disable_conversation,
440
- max_tokens=max_tokens,
441
- temperature=temperature,
442
- top_k=top_k,
443
- top_p=top_p,
444
- model=getOr(model, "llama-2-13b-chat"),
445
- brave_key=getOr(auth, "qztbjzBqJueQZLFkwTTJrieu8Vw3789u"),
446
- timeout=timeout,
447
- intro=intro,
448
- filepath=filepath,
449
- update_file=update_file,
450
- proxies=proxies,
451
- history_offset=history_offset,
452
- act=awesome_prompt,
453
- )
454
-
455
435
  elif provider == "openai":
456
436
  assert auth, (
457
437
  "OpenAI's API-key is required. " "Use the flag `--key` or `-k`"
@@ -546,20 +526,6 @@ class Main(cmd.Cmd):
546
526
  history_offset=history_offset,
547
527
  act=awesome_prompt,
548
528
  )
549
- elif provider == "chatgptuk":
550
- from webscout import ChatGPTUK
551
-
552
- self.bot = ChatGPTUK(
553
- is_conversation=disable_conversation,
554
- max_tokens=max_tokens,
555
- timeout=timeout,
556
- intro=intro,
557
- filepath=filepath,
558
- update_file=update_file,
559
- proxies=proxies,
560
- history_offset=history_offset,
561
- act=awesome_prompt,
562
- )
563
529
  elif provider == "yepchat":
564
530
  from webscout import YEPCHAT
565
531
 
@@ -705,34 +671,6 @@ class Main(cmd.Cmd):
705
671
  history_offset=history_offset,
706
672
  act=awesome_prompt,
707
673
  )
708
- elif provider == "geminiflash":
709
- from webscout import GEMINIFLASH
710
-
711
- self.bot = GEMINIFLASH(
712
- is_conversation=disable_conversation,
713
- max_tokens=max_tokens,
714
- timeout=timeout,
715
- intro=intro,
716
- filepath=filepath,
717
- update_file=update_file,
718
- proxies=proxies,
719
- history_offset=history_offset,
720
- act=awesome_prompt,
721
- )
722
- elif provider == "geminipro":
723
- from webscout import GEMINIPRO
724
-
725
- self.bot = GEMINIPRO(
726
- is_conversation=disable_conversation,
727
- max_tokens=max_tokens,
728
- timeout=timeout,
729
- intro=intro,
730
- filepath=filepath,
731
- update_file=update_file,
732
- proxies=proxies,
733
- history_offset=history_offset,
734
- act=awesome_prompt,
735
- )
736
674
 
737
675
  elif provider == "vtlchat":
738
676
  from webscout import VTLchat
@@ -846,9 +784,9 @@ class Main(cmd.Cmd):
846
784
 
847
785
 
848
786
  elif provider == "perplexity":
849
- from webscout import PERPLEXITY
787
+ from webscout import Perplexity
850
788
 
851
- self.bot = PERPLEXITY(
789
+ self.bot = Perplexity(
852
790
  is_conversation=disable_conversation,
853
791
  max_tokens=max_tokens,
854
792
  timeout=timeout,
@@ -140,7 +140,7 @@ class WEBS:
140
140
  models = {
141
141
  "claude-3-haiku": "claude-3-haiku-20240307",
142
142
  "gpt-3.5": "gpt-3.5-turbo-0125",
143
- "llama-3-70b": "meta-llama/Llama-3-70b-chat-hf",
143
+ "llama-3-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
144
144
  "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
145
145
  "gpt-4o-mini": "gpt-4o-mini",
146
146
  }