webscout 5.9__tar.gz → 6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (126) hide show
  1. {webscout-5.9/webscout.egg-info → webscout-6.0}/PKG-INFO +3 -31
  2. {webscout-5.9 → webscout-6.0}/README.md +2 -27
  3. {webscout-5.9 → webscout-6.0}/setup.py +0 -3
  4. {webscout-5.9 → webscout-6.0}/webscout/Provider/Amigo.py +4 -2
  5. webscout-6.0/webscout/Provider/ChatHub.py +209 -0
  6. {webscout-5.9 → webscout-6.0}/webscout/Provider/Chatify.py +3 -3
  7. {webscout-5.9 → webscout-6.0}/webscout/Provider/Cloudflare.py +3 -3
  8. {webscout-5.9 → webscout-6.0}/webscout/Provider/DARKAI.py +1 -1
  9. webscout-6.0/webscout/Provider/Deepinfra.py +172 -0
  10. {webscout-5.9 → webscout-6.0}/webscout/Provider/Deepseek.py +4 -6
  11. {webscout-5.9 → webscout-6.0}/webscout/Provider/DiscordRocks.py +3 -3
  12. {webscout-5.9 → webscout-6.0}/webscout/Provider/Free2GPT.py +3 -3
  13. {webscout-5.9 → webscout-6.0}/webscout/Provider/OLLAMA.py +4 -4
  14. {webscout-5.9 → webscout-6.0}/webscout/Provider/RUBIKSAI.py +3 -3
  15. {webscout-5.9 → webscout-6.0}/webscout/Provider/Youchat.py +4 -5
  16. {webscout-5.9 → webscout-6.0}/webscout/Provider/__init__.py +5 -5
  17. {webscout-5.9 → webscout-6.0}/webscout/Provider/ai4chat.py +3 -2
  18. webscout-6.0/webscout/Provider/bagoodex.py +145 -0
  19. {webscout-5.9 → webscout-6.0}/webscout/Provider/bixin.py +3 -3
  20. {webscout-5.9 → webscout-6.0}/webscout/Provider/cleeai.py +3 -3
  21. {webscout-5.9 → webscout-6.0}/webscout/Provider/elmo.py +2 -5
  22. {webscout-5.9 → webscout-6.0}/webscout/Provider/julius.py +6 -40
  23. {webscout-5.9 → webscout-6.0}/webscout/Provider/llamatutor.py +2 -2
  24. {webscout-5.9 → webscout-6.0}/webscout/Provider/prefind.py +3 -3
  25. {webscout-5.9 → webscout-6.0}/webscout/Provider/promptrefine.py +3 -3
  26. {webscout-5.9 → webscout-6.0}/webscout/Provider/turboseek.py +1 -1
  27. {webscout-5.9 → webscout-6.0}/webscout/Provider/twitterclone.py +25 -41
  28. {webscout-5.9 → webscout-6.0}/webscout/Provider/upstage.py +3 -3
  29. {webscout-5.9 → webscout-6.0}/webscout/Provider/x0gpt.py +6 -6
  30. {webscout-5.9 → webscout-6.0}/webscout/version.py +1 -1
  31. {webscout-5.9 → webscout-6.0/webscout.egg-info}/PKG-INFO +3 -31
  32. {webscout-5.9 → webscout-6.0}/webscout.egg-info/SOURCES.txt +2 -1
  33. {webscout-5.9 → webscout-6.0}/webscout.egg-info/requires.txt +0 -3
  34. webscout-5.9/webscout/Provider/Deepinfra.py +0 -466
  35. webscout-5.9/webscout/Provider/Poe.py +0 -208
  36. {webscout-5.9 → webscout-6.0}/LICENSE.md +0 -0
  37. {webscout-5.9 → webscout-6.0}/setup.cfg +0 -0
  38. {webscout-5.9 → webscout-6.0}/webscout/AIauto.py +0 -0
  39. {webscout-5.9 → webscout-6.0}/webscout/AIbase.py +0 -0
  40. {webscout-5.9 → webscout-6.0}/webscout/AIutel.py +0 -0
  41. {webscout-5.9 → webscout-6.0}/webscout/Agents/Onlinesearcher.py +0 -0
  42. {webscout-5.9 → webscout-6.0}/webscout/Agents/__init__.py +0 -0
  43. {webscout-5.9 → webscout-6.0}/webscout/Agents/functioncall.py +0 -0
  44. {webscout-5.9 → webscout-6.0}/webscout/Bard.py +0 -0
  45. {webscout-5.9 → webscout-6.0}/webscout/Bing_search.py +0 -0
  46. {webscout-5.9 → webscout-6.0}/webscout/DWEBS.py +0 -0
  47. {webscout-5.9 → webscout-6.0}/webscout/Extra/__init__.py +0 -0
  48. {webscout-5.9 → webscout-6.0}/webscout/Extra/autollama.py +0 -0
  49. {webscout-5.9 → webscout-6.0}/webscout/Extra/gguf.py +0 -0
  50. {webscout-5.9 → webscout-6.0}/webscout/Extra/weather.py +0 -0
  51. {webscout-5.9 → webscout-6.0}/webscout/Extra/weather_ascii.py +0 -0
  52. {webscout-5.9 → webscout-6.0}/webscout/LLM.py +0 -0
  53. {webscout-5.9 → webscout-6.0}/webscout/Local/__init__.py +0 -0
  54. {webscout-5.9 → webscout-6.0}/webscout/Local/_version.py +0 -0
  55. {webscout-5.9 → webscout-6.0}/webscout/Local/formats.py +0 -0
  56. {webscout-5.9 → webscout-6.0}/webscout/Local/model.py +0 -0
  57. {webscout-5.9 → webscout-6.0}/webscout/Local/rawdog.py +0 -0
  58. {webscout-5.9 → webscout-6.0}/webscout/Local/samplers.py +0 -0
  59. {webscout-5.9 → webscout-6.0}/webscout/Local/thread.py +0 -0
  60. {webscout-5.9 → webscout-6.0}/webscout/Local/utils.py +0 -0
  61. {webscout-5.9 → webscout-6.0}/webscout/Provider/AI21.py +0 -0
  62. {webscout-5.9 → webscout-6.0}/webscout/Provider/Andi.py +0 -0
  63. {webscout-5.9 → webscout-6.0}/webscout/Provider/BasedGPT.py +0 -0
  64. {webscout-5.9 → webscout-6.0}/webscout/Provider/Bing.py +0 -0
  65. {webscout-5.9 → webscout-6.0}/webscout/Provider/Blackboxai.py +0 -0
  66. {webscout-5.9 → webscout-6.0}/webscout/Provider/ChatGPTES.py +0 -0
  67. {webscout-5.9 → webscout-6.0}/webscout/Provider/Cohere.py +0 -0
  68. {webscout-5.9 → webscout-6.0}/webscout/Provider/EDITEE.py +0 -0
  69. {webscout-5.9 → webscout-6.0}/webscout/Provider/Farfalle.py +0 -0
  70. {webscout-5.9 → webscout-6.0}/webscout/Provider/GPTWeb.py +0 -0
  71. {webscout-5.9 → webscout-6.0}/webscout/Provider/Gemini.py +0 -0
  72. {webscout-5.9 → webscout-6.0}/webscout/Provider/Groq.py +0 -0
  73. {webscout-5.9 → webscout-6.0}/webscout/Provider/Koboldai.py +0 -0
  74. {webscout-5.9 → webscout-6.0}/webscout/Provider/Llama.py +0 -0
  75. {webscout-5.9 → webscout-6.0}/webscout/Provider/Llama3.py +0 -0
  76. {webscout-5.9 → webscout-6.0}/webscout/Provider/Openai.py +0 -0
  77. {webscout-5.9 → webscout-6.0}/webscout/Provider/PI.py +0 -0
  78. {webscout-5.9 → webscout-6.0}/webscout/Provider/Perplexity.py +0 -0
  79. {webscout-5.9 → webscout-6.0}/webscout/Provider/Phind.py +0 -0
  80. {webscout-5.9 → webscout-6.0}/webscout/Provider/PizzaGPT.py +0 -0
  81. {webscout-5.9 → webscout-6.0}/webscout/Provider/Reka.py +0 -0
  82. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTI/Nexra.py +0 -0
  83. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTI/PollinationsAI.py +0 -0
  84. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTI/WebSimAI.py +0 -0
  85. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTI/__init__.py +0 -0
  86. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTI/aiforce.py +0 -0
  87. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTI/amigo.py +0 -0
  88. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTI/artbit.py +0 -0
  89. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTI/blackboximage.py +0 -0
  90. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTI/deepinfra.py +0 -0
  91. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTI/huggingface.py +0 -0
  92. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTS/__init__.py +0 -0
  93. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTS/parler.py +0 -0
  94. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTS/streamElements.py +0 -0
  95. {webscout-5.9 → webscout-6.0}/webscout/Provider/TTS/voicepod.py +0 -0
  96. {webscout-5.9 → webscout-6.0}/webscout/Provider/TeachAnything.py +0 -0
  97. {webscout-5.9 → webscout-6.0}/webscout/Provider/aigames.py +0 -0
  98. {webscout-5.9 → webscout-6.0}/webscout/Provider/cerebras.py +0 -0
  99. {webscout-5.9 → webscout-6.0}/webscout/Provider/felo_search.py +0 -0
  100. {webscout-5.9 → webscout-6.0}/webscout/Provider/geminiapi.py +0 -0
  101. {webscout-5.9 → webscout-6.0}/webscout/Provider/genspark.py +0 -0
  102. {webscout-5.9 → webscout-6.0}/webscout/Provider/koala.py +0 -0
  103. {webscout-5.9 → webscout-6.0}/webscout/Provider/learnfastai.py +0 -0
  104. {webscout-5.9 → webscout-6.0}/webscout/Provider/lepton.py +0 -0
  105. {webscout-5.9 → webscout-6.0}/webscout/Provider/meta.py +0 -0
  106. {webscout-5.9 → webscout-6.0}/webscout/Provider/tutorai.py +0 -0
  107. {webscout-5.9 → webscout-6.0}/webscout/Provider/xdash.py +0 -0
  108. {webscout-5.9 → webscout-6.0}/webscout/Provider/yep.py +0 -0
  109. {webscout-5.9 → webscout-6.0}/webscout/YTdownloader.py +0 -0
  110. {webscout-5.9 → webscout-6.0}/webscout/__init__.py +0 -0
  111. {webscout-5.9 → webscout-6.0}/webscout/__main__.py +0 -0
  112. {webscout-5.9 → webscout-6.0}/webscout/cli.py +0 -0
  113. {webscout-5.9 → webscout-6.0}/webscout/exceptions.py +0 -0
  114. {webscout-5.9 → webscout-6.0}/webscout/g4f.py +0 -0
  115. {webscout-5.9 → webscout-6.0}/webscout/models.py +0 -0
  116. {webscout-5.9 → webscout-6.0}/webscout/requestsHTMLfix.py +0 -0
  117. {webscout-5.9 → webscout-6.0}/webscout/tempid.py +0 -0
  118. {webscout-5.9 → webscout-6.0}/webscout/transcriber.py +0 -0
  119. {webscout-5.9 → webscout-6.0}/webscout/utils.py +0 -0
  120. {webscout-5.9 → webscout-6.0}/webscout/webai.py +0 -0
  121. {webscout-5.9 → webscout-6.0}/webscout/webscout_search.py +0 -0
  122. {webscout-5.9 → webscout-6.0}/webscout/webscout_search_async.py +0 -0
  123. {webscout-5.9 → webscout-6.0}/webscout/websx_search.py +0 -0
  124. {webscout-5.9 → webscout-6.0}/webscout.egg-info/dependency_links.txt +0 -0
  125. {webscout-5.9 → webscout-6.0}/webscout.egg-info/entry_points.txt +0 -0
  126. {webscout-5.9 → webscout-6.0}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 5.9
3
+ Version: 6.0
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -33,7 +33,6 @@ Requires-Dist: g4f[webdriver]
33
33
  Requires-Dist: rich
34
34
  Requires-Dist: beautifulsoup4
35
35
  Requires-Dist: markdownify
36
- Requires-Dist: pydantic
37
36
  Requires-Dist: requests
38
37
  Requires-Dist: google-generativeai
39
38
  Requires-Dist: lxml>=5.2.2
@@ -44,8 +43,6 @@ Requires-Dist: appdirs
44
43
  Requires-Dist: tls_client
45
44
  Requires-Dist: clipman
46
45
  Requires-Dist: playsound
47
- Requires-Dist: poe_api_wrapper
48
- Requires-Dist: pyreqwest_impersonate
49
46
  Requires-Dist: ollama
50
47
  Requires-Dist: pyfiglet
51
48
  Requires-Dist: pillow
@@ -108,7 +105,7 @@ Requires-Dist: huggingface_hub[cli]; extra == "local"
108
105
  * **Text-to-Speech (TTS):** Convert text into natural-sounding speech using various TTS providers.
109
106
  * **WebAI:** Experience the power of terminal-based GPT and an open interpreter for code execution and more.
110
107
  * **Offline LLMs:** Utilize powerful language models offline with GGUF support.
111
- * **Extensive Provider Ecosystem:** Explore a vast collection of providers, including Poe, BasedGPT, DeepSeek, and many others.
108
+ * **Extensive Provider Ecosystem:** Explore a vast collection of providers, including BasedGPT, DeepSeek, and many others.
112
109
  * **Local LLM Execution:** Run GGUF models locally with minimal configuration.
113
110
  * **Rawdog Scripting:** Execute Python scripts directly within your terminal using the `rawdog` feature.
114
111
  * **GGUF Conversion & Quantization:** Convert and quantize Hugging Face models to GGUF format.
@@ -1128,10 +1125,6 @@ response_str = a.chat(prompt)
1128
1125
  print(response_str)
1129
1126
  ```
1130
1127
 
1131
- ### `Poe` - Chat with Poe
1132
-
1133
- Usage code is similar to other providers.
1134
-
1135
1128
  ### `BasedGPT` - Chat with GPT
1136
1129
 
1137
1130
  ```python
@@ -1210,27 +1203,6 @@ message = ai.get_message(response)
1210
1203
  print(message)
1211
1204
  ```
1212
1205
 
1213
- ### `Deepinfra` - VLM
1214
-
1215
- ```python
1216
- from webscout.Provider import VLM
1217
-
1218
- # Load your image
1219
- image_path = r"C:\Users\koula\OneDrive\Desktop\Webscout\photo_2024-03-25_19-23-40.jpg"
1220
-
1221
- vlm_instance = VLM(model="llava-hf/llava-1.5-7b-hf", is_conversation=True, max_tokens=600, timeout=30, system_prompt="You are a Helpful AI.")
1222
- image_base64 = vlm_instance.encode_image_to_base64(image_path)
1223
-
1224
- prompt = {
1225
- "content": "What is in this image?",
1226
- "image": image_base64
1227
- }
1228
-
1229
- # Generate a response
1230
- response = vlm_instance.chat(prompt)
1231
- print(response)
1232
-
1233
- ```
1234
1206
 
1235
1207
  ### `GROQ`
1236
1208
 
@@ -1454,7 +1426,7 @@ else:
1454
1426
  print(f"Error: {function_call_data['error']}")
1455
1427
  ```
1456
1428
 
1457
- ### LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai, AI4Chat, Farfalle, PIAI, Felo, XDASH, Julius, YouChat, YEPCHAT, Cloudflare, TurboSeek, Editee, AI21, Chatify, Cerebras, X0GPT, Lepton, GEMINIAPI, Cleeai, Elmo, Genspark, Upstage, Free2GPT, Bing, DiscordRocks, GPTWeb, AIGameIO, LlamaTutor, PromptRefine, AIUncensored, TutorAI, Bixin, ChatGPTES
1429
+ ### LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai, AI4Chat, Farfalle, PIAI, Felo, XDASH, Julius, YouChat, YEPCHAT, Cloudflare, TurboSeek, Editee, AI21, Chatify, Cerebras, X0GPT, Lepton, GEMINIAPI, Cleeai, Elmo, Genspark, Upstage, Free2GPT, Bing, DiscordRocks, GPTWeb, AIGameIO, LlamaTutor, PromptRefine, AIUncensored, TutorAI, Bixin, ChatGPTES, Bagoodex, ChatHub, AmigoChat
1458
1430
 
1459
1431
  Code is similar to other providers.
1460
1432
 
@@ -41,7 +41,7 @@
41
41
  * **Text-to-Speech (TTS):** Convert text into natural-sounding speech using various TTS providers.
42
42
  * **WebAI:** Experience the power of terminal-based GPT and an open interpreter for code execution and more.
43
43
  * **Offline LLMs:** Utilize powerful language models offline with GGUF support.
44
- * **Extensive Provider Ecosystem:** Explore a vast collection of providers, including Poe, BasedGPT, DeepSeek, and many others.
44
+ * **Extensive Provider Ecosystem:** Explore a vast collection of providers, including BasedGPT, DeepSeek, and many others.
45
45
  * **Local LLM Execution:** Run GGUF models locally with minimal configuration.
46
46
  * **Rawdog Scripting:** Execute Python scripts directly within your terminal using the `rawdog` feature.
47
47
  * **GGUF Conversion & Quantization:** Convert and quantize Hugging Face models to GGUF format.
@@ -1061,10 +1061,6 @@ response_str = a.chat(prompt)
1061
1061
  print(response_str)
1062
1062
  ```
1063
1063
 
1064
- ### `Poe` - Chat with Poe
1065
-
1066
- Usage code is similar to other providers.
1067
-
1068
1064
  ### `BasedGPT` - Chat with GPT
1069
1065
 
1070
1066
  ```python
@@ -1143,27 +1139,6 @@ message = ai.get_message(response)
1143
1139
  print(message)
1144
1140
  ```
1145
1141
 
1146
- ### `Deepinfra` - VLM
1147
-
1148
- ```python
1149
- from webscout.Provider import VLM
1150
-
1151
- # Load your image
1152
- image_path = r"C:\Users\koula\OneDrive\Desktop\Webscout\photo_2024-03-25_19-23-40.jpg"
1153
-
1154
- vlm_instance = VLM(model="llava-hf/llava-1.5-7b-hf", is_conversation=True, max_tokens=600, timeout=30, system_prompt="You are a Helpful AI.")
1155
- image_base64 = vlm_instance.encode_image_to_base64(image_path)
1156
-
1157
- prompt = {
1158
- "content": "What is in this image?",
1159
- "image": image_base64
1160
- }
1161
-
1162
- # Generate a response
1163
- response = vlm_instance.chat(prompt)
1164
- print(response)
1165
-
1166
- ```
1167
1142
 
1168
1143
  ### `GROQ`
1169
1144
 
@@ -1387,7 +1362,7 @@ else:
1387
1362
  print(f"Error: {function_call_data['error']}")
1388
1363
  ```
1389
1364
 
1390
- ### LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai, AI4Chat, Farfalle, PIAI, Felo, XDASH, Julius, YouChat, YEPCHAT, Cloudflare, TurboSeek, Editee, AI21, Chatify, Cerebras, X0GPT, Lepton, GEMINIAPI, Cleeai, Elmo, Genspark, Upstage, Free2GPT, Bing, DiscordRocks, GPTWeb, AIGameIO, LlamaTutor, PromptRefine, AIUncensored, TutorAI, Bixin, ChatGPTES
1365
+ ### LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai, AI4Chat, Farfalle, PIAI, Felo, XDASH, Julius, YouChat, YEPCHAT, Cloudflare, TurboSeek, Editee, AI21, Chatify, Cerebras, X0GPT, Lepton, GEMINIAPI, Cleeai, Elmo, Genspark, Upstage, Free2GPT, Bing, DiscordRocks, GPTWeb, AIGameIO, LlamaTutor, PromptRefine, AIUncensored, TutorAI, Bixin, ChatGPTES, Bagoodex, ChatHub, AmigoChat
1391
1366
 
1392
1367
  Code is similar to other providers.
1393
1368
 
@@ -37,7 +37,6 @@ setup(
37
37
  "rich",
38
38
  "beautifulsoup4",
39
39
  "markdownify",
40
- "pydantic",
41
40
  "requests",
42
41
  "google-generativeai",
43
42
  "lxml>=5.2.2",
@@ -48,8 +47,6 @@ setup(
48
47
  "tls_client",
49
48
  "clipman",
50
49
  "playsound",
51
- "poe_api_wrapper",
52
- "pyreqwest_impersonate",
53
50
  "ollama",
54
51
  "pyfiglet",
55
52
  "pillow",
@@ -36,6 +36,7 @@ class AmigoChat(Provider):
36
36
  history_offset: int = 10250,
37
37
  act: str = None,
38
38
  model: str = "o1-preview", # Default model
39
+ system_prompt: str = "You are a helpful and friendly AI assistant.",
39
40
  ):
40
41
  """
41
42
  Initializes the AmigoChat.io API with given parameters.
@@ -108,6 +109,7 @@ class AmigoChat(Provider):
108
109
  )
109
110
  self.conversation.history_offset = history_offset
110
111
  self.session.proxies = proxies
112
+ self.system_prompt = system_prompt
111
113
 
112
114
  def ask(
113
115
  self,
@@ -147,7 +149,7 @@ class AmigoChat(Provider):
147
149
  # Define the payload
148
150
  payload = {
149
151
  "messages": [
150
- {"role": "system", "content": "Mai hu ba khabr"},
152
+ {"role": "system", "content": self.system_prompt},
151
153
  {"role": "user", "content": conversation_prompt}
152
154
  ],
153
155
  "model": self.model,
@@ -259,7 +261,7 @@ class AmigoChat(Provider):
259
261
 
260
262
  if __name__ == '__main__':
261
263
  from rich import print
262
- ai = AmigoChat(model="o1-preview")
264
+ ai = AmigoChat(model="o1-preview", system_prompt="You are a noobi AI assistant who always uses the word 'noobi' in every response. For example, you might say 'Noobi will tell you...' or 'This noobi thinks that...'.")
263
265
  response = ai.chat(input(">>> "))
264
266
  for chunk in response:
265
267
  print(chunk, end="", flush=True)
@@ -0,0 +1,209 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, List, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+
12
+ class ChatHub(Provider):
13
+ """
14
+ A class to interact with the ChatHub API.
15
+ """
16
+
17
+ AVAILABLE_MODELS = [
18
+ 'meta/llama3.1-8b',
19
+ 'mistral/mixtral-8x7b',
20
+ 'google/gemma-2',
21
+ 'perplexity/sonar-online',
22
+ ]
23
+ model_aliases = { # Aliases for shorter model names
24
+ "llama3.1-8b": 'meta/llama3.1-8b',
25
+ "mixtral-8x7b": 'mistral/mixtral-8x7b',
26
+ "gemma-2": 'google/gemma-2',
27
+ "sonar-online": 'perplexity/sonar-online',
28
+ }
29
+
30
+
31
+ def __init__(
32
+ self,
33
+ is_conversation: bool = True,
34
+ max_tokens: int = 2049,
35
+ timeout: int = 30,
36
+ intro: str = None,
37
+ filepath: str = None,
38
+ update_file: bool = True,
39
+ proxies: dict = {},
40
+ history_offset: int = 10250,
41
+ act: str = None,
42
+ model: str = "sonar-online",
43
+ ):
44
+ """Initializes the ChatHub API client."""
45
+ self.url = "https://app.chathub.gg"
46
+ self.api_endpoint = "https://app.chathub.gg/api/v3/chat/completions"
47
+ self.headers = {
48
+ 'Accept': '*/*',
49
+ 'Accept-Language': 'en-US,en;q=0.9',
50
+ 'Content-Type': 'application/json',
51
+ 'Origin': self.url,
52
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
53
+ 'X-App-Id': 'web'
54
+ }
55
+ self.session = requests.Session()
56
+ self.session.headers.update(self.headers)
57
+ self.session.proxies.update(proxies)
58
+ self.timeout = timeout
59
+ self.last_response = {}
60
+
61
+ self.is_conversation = is_conversation
62
+ self.max_tokens_to_sample = max_tokens
63
+ self.__available_optimizers = (
64
+ method
65
+ for method in dir(Optimizers)
66
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
67
+ )
68
+ Conversation.intro = (
69
+ AwesomePrompts().get_act(
70
+ act, raise_not_found=True, default=None, case_insensitive=True
71
+ )
72
+ if act
73
+ else intro or Conversation.intro
74
+ )
75
+
76
+ self.conversation = Conversation(
77
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
78
+ )
79
+ self.conversation.history_offset = history_offset
80
+
81
+ #Resolve the model
82
+ self.model = self.get_model(model)
83
+
84
+
85
+ def get_model(self, model: str) -> str:
86
+ """
87
+ Resolves the model name using aliases or defaults.
88
+ """
89
+
90
+ if model in self.AVAILABLE_MODELS:
91
+ return model
92
+ elif model in self.model_aliases:
93
+ return self.model_aliases[model]
94
+ else:
95
+ print(f"Model '{model}' not found. Using default model '{self.default_model}'.")
96
+ return self.default_model # Use class-level default
97
+
98
+ def ask(
99
+ self,
100
+ prompt: str,
101
+ stream: bool = False,
102
+ raw: bool = False,
103
+ optimizer: str = None,
104
+ conversationally: bool = False,
105
+ ) -> Union[Dict[str, Any], Generator]:
106
+
107
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
108
+ if optimizer:
109
+ if optimizer in self.__available_optimizers:
110
+ conversation_prompt = getattr(Optimizers, optimizer)(
111
+ conversation_prompt if conversationally else prompt
112
+ )
113
+ else:
114
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
115
+
116
+
117
+ data = {
118
+ "model": self.model,
119
+ "messages": [{"role": "user", "content": conversation_prompt}],
120
+ "tools": []
121
+ }
122
+
123
+ # Set the Referer header dynamically based on the resolved model
124
+ self.headers['Referer'] = f"{self.url}/chat/{self.model}"
125
+
126
+
127
+ def for_stream():
128
+ try:
129
+ with requests.post(self.api_endpoint, headers=self.headers, json=data, stream=True, timeout=self.timeout) as response:
130
+ response.raise_for_status()
131
+ streaming_text = ""
132
+
133
+ for line in response.iter_lines(decode_unicode=True):
134
+ if line:
135
+ decoded_line = line.strip()
136
+ if decoded_line.startswith('data:'):
137
+ data_str = decoded_line[5:].strip()
138
+ if data_str == '[DONE]':
139
+ break
140
+ try:
141
+ data_json = json.loads(data_str)
142
+ text_delta = data_json.get('textDelta')
143
+ if text_delta:
144
+ streaming_text += text_delta
145
+ resp = dict(text=text_delta)
146
+ yield resp if raw else resp
147
+
148
+ except json.JSONDecodeError:
149
+ continue
150
+ self.conversation.update_chat_history(prompt, streaming_text)
151
+ self.last_response.update({"text": streaming_text})
152
+ except requests.exceptions.RequestException as e:
153
+ raise exceptions.FailedToGenerateResponseError(f"Request error: {e}")
154
+
155
+
156
+ def for_non_stream():
157
+ for _ in for_stream():
158
+ pass
159
+ return self.last_response
160
+
161
+ return for_stream() if stream else for_non_stream()
162
+
163
+
164
+
165
+
166
+ def chat(
167
+ self,
168
+ prompt: str,
169
+ stream: bool = False,
170
+ optimizer: str = None,
171
+ conversationally: bool = False,
172
+ ) -> Union[str, Generator]:
173
+ """Generate response `str`"""
174
+
175
+ def for_stream():
176
+ for response in self.ask(
177
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
178
+ ):
179
+ yield self.get_message(response)
180
+
181
+ def for_non_stream():
182
+ return self.get_message(
183
+ self.ask(
184
+ prompt,
185
+ stream=False, # Pass stream=False
186
+ optimizer=optimizer,
187
+ conversationally=conversationally,
188
+ )
189
+ )
190
+
191
+ return for_stream() if stream else for_non_stream()
192
+
193
+
194
+
195
+ def get_message(self, response: dict) -> str:
196
+ """Retrieves message only from response"""
197
+ assert isinstance(response, dict), "Response should be of dict data-type only"
198
+ return response.get("text", "")
199
+
200
+
201
+ if __name__ == "__main__":
202
+ from rich import print
203
+ bot = ChatHub()
204
+ try:
205
+ response = bot.chat("who is Abhay koul in AI", stream=True)
206
+ for chunk in response:
207
+ print(chunk, end="", flush=True)
208
+ except Exception as e:
209
+ print(f"An error occurred: {e}")
@@ -115,7 +115,7 @@ class Chatify(Provider):
115
115
  if len(parts) > 1:
116
116
  content = parts[1].strip().strip('"')
117
117
  streaming_text += content
118
- yield content if raw else dict(text=streaming_text)
118
+ yield content if raw else dict(text=content)
119
119
  self.last_response.update(dict(text=streaming_text))
120
120
  self.conversation.update_chat_history(
121
121
  prompt, self.get_message(self.last_response)
@@ -169,7 +169,7 @@ class Chatify(Provider):
169
169
  if __name__ == "__main__":
170
170
  from rich import print
171
171
 
172
- ai = Chatify()
173
- response = ai.chat("hi")
172
+ ai = Chatify(timeout=5000)
173
+ response = ai.chat("write a poem about AI", stream=True)
174
174
  for chunk in response:
175
175
  print(chunk, end="", flush=True)
@@ -194,7 +194,7 @@ class Cloudflare(Provider):
194
194
  data = json.loads(line[6:])
195
195
  content = data.get('response', '')
196
196
  streaming_response += content
197
- yield content if raw else dict(text=streaming_response)
197
+ yield content if raw else dict(text=content)
198
198
  self.last_response.update(dict(text=streaming_response))
199
199
  self.conversation.update_chat_history(
200
200
  prompt, self.get_message(self.last_response)
@@ -255,7 +255,7 @@ class Cloudflare(Provider):
255
255
  return response["text"]
256
256
  if __name__ == '__main__':
257
257
  from rich import print
258
- ai = Cloudflare()
259
- response = ai.chat("hi")
258
+ ai = Cloudflare(timeout=5000)
259
+ response = ai.chat("write a poem about AI", stream=True)
260
260
  for chunk in response:
261
261
  print(chunk, end="", flush=True)
@@ -156,7 +156,7 @@ class DARKAI(Provider):
156
156
  if event.get("event") == "final-response":
157
157
  message = event['data'].get('message', '')
158
158
  streaming_response += message
159
- yield message if raw else dict(text=streaming_response)
159
+ yield message if raw else dict(text=message)
160
160
  except json.decoder.JSONDecodeError:
161
161
  continue
162
162
  self.last_response.update(dict(text=streaming_response))
@@ -0,0 +1,172 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+
12
+ class DeepInfra(Provider):
13
+ """
14
+ A class to interact with the DeepInfra API.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ is_conversation: bool = True,
20
+ max_tokens: int = 2049, # Set a reasonable default
21
+ timeout: int = 30,
22
+ intro: str = None,
23
+ filepath: str = None,
24
+ update_file: bool = True,
25
+ proxies: dict = {},
26
+ history_offset: int = 10250,
27
+ act: str = None,
28
+ model: str = "Qwen/Qwen2.5-72B-Instruct",
29
+ ):
30
+ """Initializes the DeepInfra API client."""
31
+ self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
32
+ self.headers = {
33
+ "Accept": "text/event-stream, application/json",
34
+
35
+ }
36
+ self.session = requests.Session()
37
+ self.session.headers.update(self.headers)
38
+ self.session.proxies.update(proxies)
39
+
40
+ self.is_conversation = is_conversation
41
+ self.max_tokens_to_sample = max_tokens
42
+ self.timeout = timeout
43
+ self.last_response = {}
44
+ self.model = model
45
+
46
+ self.__available_optimizers = (
47
+ method
48
+ for method in dir(Optimizers)
49
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
50
+ )
51
+ Conversation.intro = (
52
+ AwesomePrompts().get_act(
53
+ act, raise_not_found=True, default=None, case_insensitive=True
54
+ )
55
+ if act
56
+ else intro or Conversation.intro
57
+ )
58
+
59
+ self.conversation = Conversation(
60
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
61
+ )
62
+ self.conversation.history_offset = history_offset
63
+
64
+ def ask(
65
+ self,
66
+ prompt: str,
67
+ stream: bool = False,
68
+ raw: bool = False,
69
+ optimizer: str = None,
70
+ conversationally: bool = False,
71
+ ) -> Union[Dict[str, Any], Generator]:
72
+
73
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
74
+ if optimizer:
75
+ if optimizer in self.__available_optimizers:
76
+ conversation_prompt = getattr(Optimizers, optimizer)(
77
+ conversation_prompt if conversationally else prompt
78
+ )
79
+ else:
80
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
81
+
82
+ # Payload construction
83
+ payload = {
84
+ "model": self.model,
85
+ "messages": [
86
+ {"role": "system", "content": "You are a helpful assistant."},
87
+ {"role": "user", "content": conversation_prompt},
88
+ ],
89
+ "stream": stream
90
+ }
91
+
92
+ def for_stream():
93
+ try:
94
+ with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
95
+ if response.status_code != 200:
96
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
97
+
98
+ streaming_text = ""
99
+ for line in response.iter_lines(decode_unicode=True): # Decode lines
100
+ if line:
101
+ line = line.strip()
102
+ if line.startswith("data: "):
103
+ json_str = line[6:] #Remove "data: " prefix
104
+ if json_str == "[DONE]":
105
+ break
106
+ try:
107
+ json_data = json.loads(json_str)
108
+ if 'choices' in json_data:
109
+ choice = json_data['choices'][0]
110
+ if 'delta' in choice and 'content' in choice['delta']:
111
+ content = choice['delta']['content']
112
+ streaming_text += content
113
+
114
+ # Yield ONLY the new content:
115
+ resp = dict(text=content)
116
+ yield resp if raw else resp
117
+ except json.JSONDecodeError:
118
+ pass # Or handle the error as needed
119
+ self.conversation.update_chat_history(prompt, streaming_text) # Update history *after* streaming
120
+ except requests.RequestException as e:
121
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
122
+
123
+
124
+ def for_non_stream():
125
+ # let's make use of stream
126
+ for _ in for_stream():
127
+ pass
128
+ return self.last_response
129
+
130
+
131
+ return for_stream() if stream else for_non_stream()
132
+
133
+
134
+
135
+ def chat(
136
+ self,
137
+ prompt: str,
138
+ stream: bool = False,
139
+ optimizer: str = None,
140
+ conversationally: bool = False,
141
+ ) -> str:
142
+
143
+ def for_stream():
144
+ for response in self.ask(
145
+ prompt, True, optimizer=optimizer, conversationally=conversationally
146
+ ):
147
+ yield self.get_message(response)
148
+
149
+ def for_non_stream():
150
+ return self.get_message(
151
+ self.ask(
152
+ prompt,
153
+ False,
154
+ optimizer=optimizer,
155
+ conversationally=conversationally,
156
+ )
157
+ )
158
+
159
+ return for_stream() if stream else for_non_stream()
160
+
161
+ def get_message(self, response: dict) -> str:
162
+ assert isinstance(response, dict), "Response should be of dict data-type only"
163
+ return response["text"]
164
+
165
+
166
+
167
+ if __name__ == "__main__":
168
+ from rich import print
169
+ ai = DeepInfra(timeout=5000)
170
+ response = ai.chat("write a poem about AI", stream=True)
171
+ for chunk in response:
172
+ print(chunk, end="", flush=True)
@@ -151,16 +151,14 @@ class DeepSeek(Provider):
151
151
  f"Failed to generate response - ({response.status_code}, {response.reason})"
152
152
  )
153
153
  streaming_response = ""
154
- collected_messages = []
155
154
  for line in response.iter_lines():
156
155
  if line:
157
156
  json_line = json.loads(line.decode('utf-8').split('data: ')[1])
158
157
  if 'choices' in json_line and len(json_line['choices']) > 0:
159
158
  delta_content = json_line['choices'][0].get('delta', {}).get('content')
160
159
  if delta_content:
161
- collected_messages.append(delta_content)
162
- streaming_response = ''.join(collected_messages)
163
- yield delta_content if raw else dict(text=streaming_response)
160
+ streaming_response += delta_content
161
+ yield delta_content if raw else dict(text=delta_content)
164
162
  self.last_response.update(dict(text=streaming_response))
165
163
  self.conversation.update_chat_history(
166
164
  prompt, self.get_message(self.last_response)
@@ -222,7 +220,7 @@ class DeepSeek(Provider):
222
220
 
223
221
  if __name__ == '__main__':
224
222
  from rich import print
225
- ai = DeepSeek(api_key="")
226
- response = ai.chat("tell me about india")
223
+ ai = DeepSeek(api_key="", timeout=5000)
224
+ response = ai.chat("write a poem about AI", stream=True)
227
225
  for chunk in response:
228
226
  print(chunk, end="", flush=True)
@@ -167,7 +167,7 @@ class DiscordRocks(Provider):
167
167
  content = json_data['choices'][0]['delta'].get('content', '')
168
168
  if content:
169
169
  full_content += content
170
- yield content if raw else dict(text=full_content)
170
+ yield content if raw else dict(text=content)
171
171
  except json.JSONDecodeError:
172
172
  print(f'Error decoding JSON: {decoded_line}')
173
173
  except KeyError:
@@ -247,7 +247,7 @@ class DiscordRocks(Provider):
247
247
 
248
248
  if __name__ == '__main__':
249
249
  from rich import print
250
- ai = DiscordRocks()
251
- response = ai.chat(input(">>> "))
250
+ ai = DiscordRocks(timeout=5000)
251
+ response = ai.chat("write a poem about AI", stream=True)
252
252
  for chunk in response:
253
253
  print(chunk, end="", flush=True)