webscout 5.9__py3-none-any.whl → 6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (57) hide show
  1. webscout/Agents/Onlinesearcher.py +22 -10
  2. webscout/Agents/functioncall.py +2 -2
  3. webscout/Bard.py +21 -21
  4. webscout/Local/__init__.py +6 -7
  5. webscout/Local/formats.py +404 -194
  6. webscout/Local/model.py +1074 -477
  7. webscout/Local/samplers.py +108 -144
  8. webscout/Local/thread.py +251 -410
  9. webscout/Local/ui.py +401 -0
  10. webscout/Local/utils.py +308 -131
  11. webscout/Provider/Amigo.py +5 -3
  12. webscout/Provider/ChatHub.py +209 -0
  13. webscout/Provider/Chatify.py +3 -3
  14. webscout/Provider/Cloudflare.py +3 -3
  15. webscout/Provider/DARKAI.py +1 -1
  16. webscout/Provider/Deepinfra.py +95 -389
  17. webscout/Provider/Deepseek.py +4 -6
  18. webscout/Provider/DiscordRocks.py +3 -3
  19. webscout/Provider/Free2GPT.py +3 -3
  20. webscout/Provider/NinjaChat.py +200 -0
  21. webscout/Provider/OLLAMA.py +4 -4
  22. webscout/Provider/RUBIKSAI.py +3 -3
  23. webscout/Provider/TTI/Nexra.py +3 -3
  24. webscout/Provider/TTI/__init__.py +2 -1
  25. webscout/Provider/TTI/aiforce.py +2 -2
  26. webscout/Provider/TTI/imgninza.py +136 -0
  27. webscout/Provider/Youchat.py +4 -5
  28. webscout/Provider/__init__.py +13 -6
  29. webscout/Provider/ai4chat.py +3 -2
  30. webscout/Provider/aimathgpt.py +193 -0
  31. webscout/Provider/bagoodex.py +145 -0
  32. webscout/Provider/bixin.py +3 -3
  33. webscout/Provider/cleeai.py +3 -3
  34. webscout/Provider/elmo.py +2 -5
  35. webscout/Provider/felo_search.py +1 -1
  36. webscout/Provider/gaurish.py +168 -0
  37. webscout/Provider/geminiprorealtime.py +160 -0
  38. webscout/Provider/julius.py +10 -40
  39. webscout/Provider/llamatutor.py +2 -2
  40. webscout/Provider/prefind.py +3 -3
  41. webscout/Provider/promptrefine.py +3 -3
  42. webscout/Provider/turboseek.py +1 -1
  43. webscout/Provider/twitterclone.py +25 -41
  44. webscout/Provider/upstage.py +3 -3
  45. webscout/Provider/x0gpt.py +6 -6
  46. webscout/exceptions.py +5 -1
  47. webscout/utils.py +3 -0
  48. webscout/version.py +1 -1
  49. webscout/webscout_search.py +154 -123
  50. {webscout-5.9.dist-info → webscout-6.1.dist-info}/METADATA +132 -157
  51. {webscout-5.9.dist-info → webscout-6.1.dist-info}/RECORD +55 -49
  52. {webscout-5.9.dist-info → webscout-6.1.dist-info}/WHEEL +1 -1
  53. webscout/Local/rawdog.py +0 -946
  54. webscout/Provider/Poe.py +0 -208
  55. {webscout-5.9.dist-info → webscout-6.1.dist-info}/LICENSE.md +0 -0
  56. {webscout-5.9.dist-info → webscout-6.1.dist-info}/entry_points.txt +0 -0
  57. {webscout-5.9.dist-info → webscout-6.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,200 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, List, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+
12
+
13
+ class NinjaChat(Provider):
14
+ """
15
+ A class to interact with the NinjaChat API.
16
+ """
17
+
18
+ AVAILABLE_MODELS = {
19
+ "mistral": "https://www.ninjachat.ai/api/mistral",
20
+ "perplexity": "https://www.ninjachat.ai/api/perplexity",
21
+ "claude-3.5": "https://www.ninjachat.ai/api/claude-pro",
22
+ "gemini-1.5-pro": "https://www.ninjachat.ai/api/gemini",
23
+ "llama": "https://www.ninjachat.ai/api/llama-pro",
24
+ "o1-mini": "https://www.ninjachat.ai/api/o1-mini",
25
+ }
26
+
27
+ def __init__(
28
+ self,
29
+ is_conversation: bool = True,
30
+ max_tokens: int = 2049,
31
+ timeout: int = 30,
32
+ intro: str = None, # System message/intro prompt
33
+ filepath: str = None,
34
+ update_file: bool = True,
35
+ proxies: dict = {},
36
+ history_offset: int = 10250,
37
+ act: str = None,
38
+ model: str = "perplexity", # Default model
39
+ system_message: str = "You are a helpful AI assistant.", # Default system message
40
+ ):
41
+ """Initializes the NinjaChat API client."""
42
+
43
+ self.headers = {
44
+ "Accept": "*/*",
45
+ "Accept-Encoding": "gzip, deflate, br, zstd",
46
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
47
+ "Content-Type": "application/json",
48
+ "Cookie": "_ga=GA1.1.298084589.1727859540; _ga_11N4NZX9WP=GS1.1.1727859539.1.0.1727859552.0.0.0; __stripe_mid=4f63db68-c41d-45b4-9111-2457a6cf1b538696a9; __Host-next-auth.csrf-token=a5cb5a40c73df3e808ebc072dcb116fe7dd4b9b8d39d8002ef7e54153e6aa665%7Cbffe3f934f2db43330d281453af2cd0b4757f439b958f2d1a06a36cea63e9cc8; __stripe_sid=118678d1-403a-43f9-b3b9-d80ed9392a0d2ac131; __Secure-next-auth.callback-url=https%3A%2F%2Fwww.ninjachat.ai%2Fdashboard; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..l34CIFGTJCtstUqU.VjEYgaUUPpgp-49wueXFlFYvbm8csuyX0HichHrPNH45nX4s_LeZX2VhK1ZvwmUpfdlsMD4bi8VzFfQUEgs8FLPhkbKnoZDP939vobV7K_2Q9CA8PgC0oXEsQf_azWmILZ8rOE37uYzTu1evCnOjCucDYrC1ONXzl9NbGNPVa8AQr7hXvatuqtqe-lBUQXWdrw3QLulbqxvh6yLoxJj04gqC-nPudGciU-_-3TZJYr98u8o7KtUGio1ZX9vHNFfv8djWM1NCkji3Kl9eUhiyMj71.6uhUS39UcCVRa6tFzHxz2g; ph_phc_wWUtqcGWqyyochfPvwKlXMkMjIoIQKUwcnHE3KMKm8K_posthog=%7B%22distinct_id%22%3A%2201924c74-2926-7042-a1fb-5b5debdbcd1c%22%2C%22%24sesid%22%3A%5B1727966419499%2C%22019252bb-9de4-75db-9f85-a389fb401670%22%2C1727964880355%5D%7D",
49
+ "DNT": "1",
50
+ "Origin": "https://www.ninjachat.ai",
51
+ "Priority": "u=1, i",
52
+ "Referer": "https://www.ninjachat.ai/dashboard",
53
+ "Sec-CH-UA": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
54
+ "Sec-CH-UA-Mobile": "?0",
55
+ "Sec-CH-UA-Platform": '"Windows"',
56
+ "Sec-Fetch-Dest": "empty",
57
+ "Sec-Fetch-Mode": "cors",
58
+ "Sec-Fetch-Site": "same-origin",
59
+ "User-Agent": (
60
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
61
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
62
+ "Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"
63
+ )
64
+ }
65
+ self.session = requests.Session()
66
+ self.session.headers.update(self.headers)
67
+ self.session.proxies.update(proxies)
68
+ self.timeout = timeout
69
+ self.last_response = {}
70
+ self.system_message = system_message
71
+
72
+ self.is_conversation = is_conversation
73
+ self.max_tokens_to_sample = max_tokens
74
+ self.__available_optimizers = (
75
+ method
76
+ for method in dir(Optimizers)
77
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
78
+ )
79
+
80
+ #Set the intro/system message
81
+ Conversation.intro = (
82
+ AwesomePrompts().get_act(
83
+ act, raise_not_found=True, default=None, case_insensitive=True
84
+ )
85
+ if act
86
+ else intro or system_message or Conversation.intro #Priority: act > intro > system_message > Conversation.intro
87
+
88
+ )
89
+
90
+
91
+ self.conversation = Conversation(
92
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
93
+ )
94
+ self.conversation.history_offset = history_offset
95
+
96
+ if model not in self.AVAILABLE_MODELS:
97
+ raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
98
+ self.model_url = self.AVAILABLE_MODELS[model]
99
+ self.headers["Referer"] = self.model_url # Set initial referer
100
+
101
+
102
+
103
+ def ask(
104
+ self,
105
+ prompt: str,
106
+ stream: bool = False,
107
+ raw: bool = False,
108
+ optimizer: str = None,
109
+ conversationally: bool = False,
110
+ ) -> Union[Dict, Generator]:
111
+
112
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt, intro=Conversation.intro)
113
+
114
+ if optimizer:
115
+ if optimizer in self.__available_optimizers:
116
+ conversation_prompt = getattr(Optimizers, optimizer)(
117
+ conversation_prompt if conversationally else prompt
118
+ )
119
+ else:
120
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
121
+
122
+ #Include the system message in the payload
123
+ payload = {
124
+ "messages": [
125
+ {"role": "system", "content": self.system_message}, # System message here
126
+ {"role": "user", "content": conversation_prompt},
127
+ ],
128
+ "stream": stream # Now passed dynamically
129
+ }
130
+
131
+ def for_stream():
132
+ try:
133
+ with requests.post(self.model_url, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
134
+ response.raise_for_status()
135
+ streaming_text = ""
136
+ for line in response.iter_lines(decode_unicode=True):
137
+ if line:
138
+ if line.startswith("0:"):
139
+ try:
140
+ text = json.loads(line[2:]) # Extract streaming text
141
+ streaming_text += text #Accumulate for history
142
+ resp = dict(text=text)
143
+ yield resp if raw else resp
144
+ except json.JSONDecodeError:
145
+ print("\n[Error] Failed to decode JSON content.")
146
+
147
+ elif line.startswith("d:"):
148
+ break #End of stream
149
+ self.conversation.update_chat_history(prompt, streaming_text)
150
+ self.last_response.update({"text": streaming_text})
151
+ except requests.exceptions.RequestException as e:
152
+ print("An error occurred:", e)
153
+
154
+
155
+
156
+ def for_non_stream():
157
+
158
+ for _ in for_stream():
159
+ pass
160
+ return self.last_response
161
+
162
+
163
+ return for_stream() if stream else for_non_stream()
164
+
165
+ def chat(
166
+ self,
167
+ prompt: str,
168
+ stream: bool = False,
169
+ optimizer: str = None,
170
+ conversationally: bool = False,
171
+ ) -> Union[str, Generator]:
172
+
173
+ def for_stream():
174
+ for response in self.ask(
175
+ prompt, True, optimizer=optimizer, conversationally=conversationally
176
+ ):
177
+ yield self.get_message(response)
178
+
179
+ def for_non_stream():
180
+ return self.get_message(
181
+ self.ask(
182
+ prompt, False, optimizer=optimizer, conversationally=conversationally
183
+ )
184
+ )
185
+ return for_stream() if stream else for_non_stream()
186
+
187
+ def get_message(self, response: dict) -> str:
188
+ assert isinstance(response, dict), "Response should be of dict data-type only"
189
+ return response["text"]
190
+
191
+
192
+
193
+ if __name__ == "__main__":
194
+ from rich import print
195
+ bot = NinjaChat(model="gemini-1.5-pro", system_message="You are a creative writer.")
196
+
197
+ response = bot.chat("write a poem about a robot learning to love", stream=True)
198
+
199
+ for chunk in response:
200
+ print(chunk, end="", flush=True)
@@ -166,7 +166,7 @@ class OLLAMA(Provider):
166
166
  assert isinstance(response, dict), "Response should be of dict data-type only"
167
167
  return response["text"]
168
168
  if __name__ == "__main__":
169
- ollama_provider = OLLAMA(model="qwen2:0.5b")
170
- response = ollama_provider.chat("hi", stream=True)
171
- for r in response:
172
- print(r, end="", flush=True)
169
+ ai = OLLAMA(model="qwen2:0.5b")
170
+ response = ai.chat("write a poem about AI", stream=True)
171
+ for chunk in response:
172
+ print(chunk, end="", flush=True)
@@ -146,7 +146,7 @@ class RUBIKSAI(Provider):
146
146
  if "choices" in data and len(data["choices"]) > 0:
147
147
  content = data["choices"][0]["delta"].get("content", "")
148
148
  streaming_response += content
149
- yield content if raw else dict(text=streaming_response)
149
+ yield content if raw else dict(text=content)
150
150
  except json.decoder.JSONDecodeError:
151
151
  continue
152
152
 
@@ -211,7 +211,7 @@ class RUBIKSAI(Provider):
211
211
  if __name__ == '__main__':
212
212
 
213
213
  from rich import print
214
- ai = RUBIKSAI()
215
- response = ai.chat("hi")
214
+ ai = RUBIKSAI(timeout=5000)
215
+ response = ai.chat("write a poem about AI", stream=True)
216
216
  for chunk in response:
217
217
  print(chunk, end="", flush=True)
@@ -11,7 +11,7 @@ class NexraImager(ImageProvider):
11
11
  """Image provider for Nexra API"""
12
12
 
13
13
  AVAILABLE_MODELS = {
14
- "standard": ["emi", "stablediffusion-1.5", "stablediffusion-2.1", "sdxl-lora", "dalle", "dalle2", "dalle-mini"],
14
+ "standard": ["emi", "stablediffusion-1.5", "stablediffusion-2.1", "sdxl-lora", "dalle", "dalle2", "dalle-mini", "flux", "midjourney"],
15
15
  "prodia": [
16
16
  "dreamshaperXL10_alpha2.safetensors [c8afe2ef]",
17
17
  "dynavisionXL_0411.safetensors [c39cc051]",
@@ -37,7 +37,7 @@ class NexraImager(ImageProvider):
37
37
  self.image_extension: str = "png"
38
38
 
39
39
  def generate(
40
- self, prompt: str, model: str = "emi", amount: int = 1,
40
+ self, prompt: str, model: str = "flux", amount: int = 1,
41
41
  max_retries: int = 3, retry_delay: int = 5,
42
42
  additional_params: Optional[dict] = None
43
43
  ) -> List[bytes]:
@@ -116,5 +116,5 @@ class NexraImager(ImageProvider):
116
116
 
117
117
  if __name__ == "__main__":
118
118
  bot = NexraImager()
119
- resp_standard = bot.generate("AI-generated image - webscout", "emi", 1)
119
+ resp_standard = bot.generate("AI-generated image - webscout", "midjourney", 1)
120
120
  print(bot.save(resp_standard))
@@ -6,4 +6,5 @@ from .Nexra import *
6
6
  from .huggingface import *
7
7
  from .artbit import *
8
8
  from .amigo import *
9
- from .WebSimAI import *
9
+ from .WebSimAI import *
10
+ from .imgninza import *
@@ -8,7 +8,7 @@ from requests.exceptions import RequestException
8
8
 
9
9
  from webscout.AIbase import ImageProvider
10
10
 
11
- class AiForceimagger(ImageProvider):
11
+ class AiForceimager(ImageProvider):
12
12
  """Image provider for Airforce API"""
13
13
 
14
14
  AVAILABLE_MODELS = [
@@ -152,7 +152,7 @@ class AiForceimagger(ImageProvider):
152
152
  return filenames
153
153
 
154
154
  if __name__ == "__main__":
155
- bot = AiForceimagger()
155
+ bot = AiForceimager()
156
156
  try:
157
157
  resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1, model="flux-realism")
158
158
  print(bot.save(resp))
@@ -0,0 +1,136 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import List, Dict, Optional
5
+
6
+ from webscout.AIbase import ImageProvider
7
+ from webscout import exceptions # Import exceptions module
8
+
9
+
10
+ class NinjaImager(ImageProvider):
11
+ """
12
+ Image provider for NinjaChat.ai.
13
+ """
14
+
15
+ AVAILABLE_MODELS = {
16
+ "stable-diffusion": "https://www.ninjachat.ai/api/image-generator",
17
+ "flux-dev": "https://www.ninjachat.ai/api/flux-image-generator",
18
+ }
19
+
20
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
21
+ """Initializes the NinjaChatImager class."""
22
+ self.headers = {
23
+ "Accept": "*/*",
24
+ "Accept-Encoding": "gzip, deflate, br, zstd",
25
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
26
+ "Content-Type": "application/json",
27
+ "DNT": "1",
28
+ "Origin": "https://www.ninjachat.ai",
29
+ "Priority": "u=1, i",
30
+ "Referer": "https://www.ninjachat.ai/image-generation",
31
+ "Sec-CH-UA": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
32
+ "Sec-CH-UA-Mobile": "?0",
33
+ "Sec-CH-UA-Platform": '"Windows"',
34
+ "Sec-Fetch-Dest": "empty",
35
+ "Sec-Fetch-Mode": "cors",
36
+ "Sec-Fetch-Site": "same-origin",
37
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0",
38
+ }
39
+ self.session = requests.Session()
40
+ self.session.headers.update(self.headers)
41
+ self.session.proxies.update(proxies)
42
+ self.timeout = timeout
43
+ self.prompt = "AI-generated image - webscout"
44
+ self.image_extension = "png" # Default extension
45
+
46
+ def generate(self, prompt: str, amount: int = 1, model: str = "flux-dev") -> List[str]:
47
+ """Generate images from a prompt."""
48
+
49
+ assert bool(prompt), "Prompt cannot be null"
50
+ assert isinstance(amount, int) and amount > 0, "Amount should be a positive integer"
51
+
52
+ if model not in self.AVAILABLE_MODELS:
53
+ raise exceptions.ModelNotFoundError(f"Model '{model}' not found. Available models: {', '.join(self.AVAILABLE_MODELS)}")
54
+
55
+ self.prompt = prompt # Store the prompt
56
+ url = self.AVAILABLE_MODELS[model]
57
+
58
+ payload = {
59
+ "prompt": prompt,
60
+ "model": model if model == "flux-dev" else "stable-diffusion", # Pass model name to API if needed
61
+ "negativePrompt": "", #Use negative prompt from API's data structure
62
+ "cfg": 7,
63
+ "aspectRatio": "1:1",
64
+ "outputFormat": self.image_extension,
65
+ "numOutputs": amount,
66
+ "outputQuality": 90
67
+ }
68
+
69
+
70
+ image_urls = []
71
+ try:
72
+ with requests.post(url, headers=self.headers, json=payload, timeout=self.timeout) as response:
73
+ if response.status_code != 200:
74
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code: {response.status_code}, {response.text}") # Raise Webscout exception
75
+
76
+ data = response.json()
77
+
78
+ if 'output' not in data:
79
+ raise exceptions.InvalidResponseError("Invalid API response format: 'output' key missing.")
80
+
81
+ for img_url in data['output']:
82
+ image_urls.append(img_url)
83
+
84
+ except requests.exceptions.RequestException as e:
85
+ raise exceptions.APIConnectionError(f"An error occurred during the request: {e}") # More specific exception
86
+ except json.JSONDecodeError as e:
87
+ raise exceptions.InvalidResponseError(f"Failed to parse JSON response: {e}")
88
+
89
+ return image_urls
90
+
91
+
92
+ def save(
93
+ self,
94
+ response: List[str],
95
+ name: str = None,
96
+ dir: str = os.getcwd(),
97
+ filenames_prefix: str = "",
98
+ ) -> List[str]:
99
+ """Saves generated images."""
100
+
101
+ assert isinstance(response, list), f"Response should be a list, not {type(response)}"
102
+ name = self.prompt if name is None else name
103
+
104
+ filenames = []
105
+ count = 0
106
+ for img_url in response:
107
+ def complete_path():
108
+ count_value = "" if count == 0 else f"_{count}"
109
+ return os.path.join(dir, name + count_value + "." + self.image_extension)
110
+
111
+ while os.path.isfile(complete_path()):
112
+ count += 1
113
+
114
+ absolute_path_to_file = complete_path()
115
+ filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
116
+ try:
117
+ with requests.get(img_url, stream=True, timeout=self.timeout) as img_response:
118
+ img_response.raise_for_status()
119
+ with open(absolute_path_to_file, "wb") as f:
120
+ for chunk in img_response.iter_content(chunk_size=8192):
121
+ f.write(chunk)
122
+
123
+ except requests.exceptions.RequestException as e:
124
+ raise exceptions.FailedToSaveImageError(f"An error occurred while downloading/saving image: {e}")
125
+
126
+ return filenames
127
+
128
+
129
+
130
+ if __name__ == "__main__":
131
+ bot = NinjaImager()
132
+ try:
133
+ resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
134
+ print(bot.save(resp))
135
+ except Exception as e:
136
+ print(f"An error occurred: {e}")
@@ -160,11 +160,10 @@ class YouChat(Provider):
160
160
  token = data.get('youChatToken', '')
161
161
  if token:
162
162
  streaming_text += token
163
- resp = dict(text=streaming_text)
164
- self.last_response.update(resp)
165
- yield value if raw else resp
163
+ yield token if raw else dict(text=token)
166
164
  except json.decoder.JSONDecodeError:
167
165
  pass
166
+ self.last_response.update(dict(text=streaming_text))
168
167
  self.conversation.update_chat_history(
169
168
  prompt, self.get_message(self.last_response)
170
169
  )
@@ -224,7 +223,7 @@ class YouChat(Provider):
224
223
  return response["text"]
225
224
  if __name__ == '__main__':
226
225
  from rich import print
227
- ai = YouChat()
228
- response = ai.chat("hi")
226
+ ai = YouChat(timeout=5000)
227
+ response = ai.chat("Who is Abhay Koul in AI?", stream=True)
229
228
  for chunk in response:
230
229
  print(chunk, end="", flush=True)
@@ -16,10 +16,9 @@ from .Phind import PhindSearch
16
16
  from .Phind import Phindv2
17
17
  from .ai4chat import *
18
18
  from .Gemini import GEMINI
19
- from .Poe import POE
20
19
  from .BasedGPT import BasedGPT
21
20
  from .Deepseek import DeepSeek
22
- from .Deepinfra import DeepInfra, VLM, AsyncDeepInfra
21
+ from .Deepinfra import DeepInfra
23
22
  from .Farfalle import *
24
23
  from .cleeai import *
25
24
  from .OLLAMA import OLLAMA
@@ -61,6 +60,12 @@ from .bixin import *
61
60
  from .ChatGPTES import *
62
61
  from .Amigo import *
63
62
  from .prefind import *
63
+ from .bagoodex import *
64
+ from .ChatHub import *
65
+ from .aimathgpt import *
66
+ from .gaurish import *
67
+ from .geminiprorealtime import *
68
+ from .NinjaChat import *
64
69
  __all__ = [
65
70
  'Farfalle',
66
71
  'LLAMA',
@@ -78,12 +83,9 @@ __all__ = [
78
83
  'PhindSearch',
79
84
  'Felo',
80
85
  'GEMINI',
81
- 'POE',
82
86
  'BasedGPT',
83
87
  'DeepSeek',
84
88
  'DeepInfra',
85
- 'VLM',
86
- 'AsyncDeepInfra',
87
89
  'AI4Chat',
88
90
  'Phindv2',
89
91
  'OLLAMA',
@@ -126,7 +128,12 @@ __all__ = [
126
128
  'ChatGPTES',
127
129
  'AmigoChat',
128
130
  'PrefindAI',
129
- # 'LearnFast',
131
+ 'Bagoodex',
132
+ 'ChatHub',
133
+ 'AIMathGPT',
134
+ 'GaurishCerebras',
135
+ 'GeminiPro',
136
+ 'NinjaChat',
130
137
 
131
138
 
132
139
  ]
@@ -195,5 +195,6 @@ class AI4Chat(Provider):
195
195
  if __name__ == "__main__":
196
196
  from rich import print
197
197
  ai = AI4Chat()
198
- response = ai.chat(input(">>> "))
199
- print(response)
198
+ response = ai.chat("write me poem about AI", stream=True)
199
+ for chunk in response:
200
+ print(chunk, end="", flush=True)