webscout 6.1__py3-none-any.whl → 6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (48) hide show
  1. webscout/AIauto.py +77 -259
  2. webscout/Agents/functioncall.py +2 -2
  3. webscout/Extra/autollama.py +37 -20
  4. webscout/Local/formats.py +4 -2
  5. webscout/Local/utils.py +37 -12
  6. webscout/Provider/Amigo.py +50 -37
  7. webscout/Provider/Deepseek.py +7 -6
  8. webscout/Provider/EDITEE.py +2 -2
  9. webscout/Provider/GPTWeb.py +1 -1
  10. webscout/Provider/Llama3.py +1 -1
  11. webscout/Provider/NinjaChat.py +2 -2
  12. webscout/Provider/OLLAMA.py +1 -1
  13. webscout/Provider/Perplexity.py +1 -1
  14. webscout/Provider/Reka.py +12 -5
  15. webscout/Provider/TTI/AIuncensored.py +103 -0
  16. webscout/Provider/TTI/__init__.py +3 -2
  17. webscout/Provider/TTI/talkai.py +116 -0
  18. webscout/Provider/TeachAnything.py +0 -3
  19. webscout/Provider/__init__.py +8 -11
  20. webscout/Provider/cerebras.py +143 -123
  21. webscout/Provider/cleeai.py +1 -1
  22. webscout/Provider/felo_search.py +1 -1
  23. webscout/Provider/gaurish.py +41 -2
  24. webscout/Provider/geminiprorealtime.py +1 -1
  25. webscout/Provider/genspark.py +1 -1
  26. webscout/Provider/julius.py +4 -3
  27. webscout/Provider/learnfastai.py +1 -1
  28. webscout/Provider/{aigames.py → llmchat.py} +74 -84
  29. webscout/Provider/promptrefine.py +3 -1
  30. webscout/Provider/talkai.py +196 -0
  31. webscout/Provider/turboseek.py +3 -8
  32. webscout/Provider/tutorai.py +1 -1
  33. webscout/__init__.py +2 -43
  34. webscout/tempid.py +4 -73
  35. webscout/version.py +1 -1
  36. webscout/webai.py +1 -1
  37. {webscout-6.1.dist-info → webscout-6.2.dist-info}/METADATA +44 -128
  38. {webscout-6.1.dist-info → webscout-6.2.dist-info}/RECORD +42 -45
  39. webscout/Provider/BasedGPT.py +0 -214
  40. webscout/Provider/ChatHub.py +0 -209
  41. webscout/Provider/TTI/amigo.py +0 -148
  42. webscout/Provider/bixin.py +0 -264
  43. webscout/Provider/xdash.py +0 -182
  44. webscout/websx_search.py +0 -19
  45. {webscout-6.1.dist-info → webscout-6.2.dist-info}/LICENSE.md +0 -0
  46. {webscout-6.1.dist-info → webscout-6.2.dist-info}/WHEEL +0 -0
  47. {webscout-6.1.dist-info → webscout-6.2.dist-info}/entry_points.txt +0 -0
  48. {webscout-6.1.dist-info → webscout-6.2.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- import requests
1
+ import cloudscraper
2
2
  import json
3
3
  import uuid
4
4
  import os
@@ -12,7 +12,7 @@ from webscout import exceptions
12
12
 
13
13
  class AmigoChat(Provider):
14
14
  """
15
- A class to interact with the AmigoChat.io API.
15
+ A class to interact with the AmigoChat.io API using cloudscraper.
16
16
  """
17
17
 
18
18
  AVAILABLE_MODELS = [
@@ -51,14 +51,18 @@ class AmigoChat(Provider):
51
51
  proxies (dict, optional): Http request proxies. Defaults to {}.
52
52
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
53
53
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
54
- model (str, optional): The AI model to use for text generation. Defaults to "o1-preview".
55
- Options: "llama-three-point-one", "openai-o-one-mini", "claude",
56
- "gemini-1.5-pro", "gemini-1.5-flash", "openai-o-one".
54
+ model (str, optional): The AI model to use for text generation. Defaults to "o1-preview".
57
55
  """
58
56
  if model not in self.AVAILABLE_MODELS:
59
57
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
60
58
 
61
- self.session = requests.Session()
59
+ self.session = cloudscraper.create_scraper(
60
+ browser={
61
+ 'browser': 'chrome',
62
+ 'platform': 'windows',
63
+ 'mobile': False
64
+ }
65
+ )
62
66
  self.is_conversation = is_conversation
63
67
  self.max_tokens_to_sample = max_tokens
64
68
  self.api_endpoint = "https://api.amigochat.io/v1/chat/completions"
@@ -156,7 +160,7 @@ class AmigoChat(Provider):
156
160
  "frequency_penalty": 0,
157
161
  "max_tokens": 4000,
158
162
  "presence_penalty": 0,
159
- "stream": stream, # Enable streaming
163
+ "stream": stream,
160
164
  "temperature": 0.5,
161
165
  "top_p": 0.95
162
166
  }
@@ -164,37 +168,46 @@ class AmigoChat(Provider):
164
168
  def for_stream():
165
169
  try:
166
170
  # Make the POST request with streaming enabled
167
- with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True) as response:
168
- # Check if the request was successful
169
- if response.status_code == 201:
170
- # Iterate over the streamed response line by line
171
- for line in response.iter_lines():
172
- if line:
173
- # Decode the line from bytes to string
174
- decoded_line = line.decode('utf-8').strip()
175
- if decoded_line.startswith("data: "):
176
- data_str = decoded_line[6:]
177
- if data_str == "[DONE]":
178
- break
179
- try:
180
- # Load the JSON data
181
- data_json = json.loads(data_str)
182
-
183
- # Extract the content from the response
184
- choices = data_json.get("choices", [])
185
- if choices:
186
- delta = choices[0].get("delta", {})
187
- content = delta.get("content", "")
188
- if content:
189
- yield content if raw else dict(text=content)
190
- except json.JSONDecodeError:
191
- print(f"Received non-JSON data: {data_str}")
192
- else:
193
- print(f"Request failed with status code {response.status_code}")
194
- print("Response:", response.text)
171
+ response = self.session.post(
172
+ self.api_endpoint,
173
+ json=payload,
174
+ stream=True,
175
+ timeout=self.timeout
176
+ )
177
+
178
+ # Check if the request was successful
179
+ if response.status_code == 201:
180
+ # Iterate over the streamed response line by line
181
+ for line in response.iter_lines():
182
+ if line:
183
+ # Decode the line from bytes to string
184
+ decoded_line = line.decode('utf-8').strip()
185
+ if decoded_line.startswith("data: "):
186
+ data_str = decoded_line[6:]
187
+ if data_str == "[DONE]":
188
+ break
189
+ try:
190
+ # Load the JSON data
191
+ data_json = json.loads(data_str)
192
+
193
+ # Extract the content from the response
194
+ choices = data_json.get("choices", [])
195
+ if choices:
196
+ delta = choices[0].get("delta", {})
197
+ content = delta.get("content", "")
198
+ if content:
199
+ yield content if raw else dict(text=content)
200
+ except json.JSONDecodeError:
201
+ print(f"Received non-JSON data: {data_str}")
202
+ else:
203
+ print(f"Request failed with status code {response.status_code}")
204
+ print("Response:", response.text)
195
205
 
196
- except requests.exceptions.RequestException as e:
197
- print("An error occurred while making the request:", e)
206
+ except (cloudscraper.exceptions.CloudflareChallengeError,
207
+ cloudscraper.exceptions.CloudflareCode1020) as e:
208
+ print("Cloudflare protection error:", str(e))
209
+ except Exception as e:
210
+ print("An error occurred while making the request:", str(e))
198
211
 
199
212
  def for_non_stream():
200
213
  # Accumulate the streaming response
@@ -64,23 +64,24 @@ class DeepSeek(Provider):
64
64
  self.timeout = timeout
65
65
  self.last_response = {}
66
66
  self.headers = {
67
- "authority": "chat.deepseek.com",
68
67
  "accept": "*/*",
69
68
  "accept-encoding": "gzip, deflate, br, zstd",
70
69
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
71
70
  "authorization": f"Bearer {self.api_token}",
71
+ "content-length": "128",
72
72
  "content-type": "application/json",
73
73
  "dnt": "1",
74
74
  "origin": "https://chat.deepseek.com",
75
- "referer": "https://chat.deepseek.com",
76
- "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
75
+ "priority": "u=1, i",
76
+ "referer": "https://chat.deepseek.com/",
77
+ "sec-ch-ua": '"Chromium";v="130", "Microsoft Edge";v="130", "Not?A_Brand";v="99"',
77
78
  "sec-ch-ua-mobile": "?0",
78
79
  "sec-ch-ua-platform": '"Windows"',
79
80
  "sec-fetch-dest": "empty",
80
81
  "sec-fetch-mode": "cors",
81
82
  "sec-fetch-site": "same-origin",
82
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
83
- "x-app-version": "20240126.0"
83
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0",
84
+ "x-app-version": "20241018.0"
84
85
  }
85
86
  self.__available_optimizers = (
86
87
  method
@@ -223,4 +224,4 @@ if __name__ == '__main__':
223
224
  ai = DeepSeek(api_key="", timeout=5000)
224
225
  response = ai.chat("write a poem about AI", stream=True)
225
226
  for chunk in response:
226
- print(chunk, end="", flush=True)
227
+ print(chunk, end="", flush=True)
@@ -1,4 +1,4 @@
1
- import requests
1
+ import cloudscraper
2
2
  from webscout.AIutel import Optimizers
3
3
  from webscout.AIutel import Conversation, Proxy
4
4
  from webscout.AIutel import AwesomePrompts, sanitize_stream
@@ -49,7 +49,7 @@ class Editee(Provider):
49
49
  if model not in self.AVAILABLE_MODELS:
50
50
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
51
51
 
52
- self.session = requests.Session()
52
+ self.session = cloudscraper.create_scraper()
53
53
  self.is_conversation = is_conversation
54
54
  self.max_tokens_to_sample = max_tokens
55
55
  self.api_endpoint = "https://editee.com/submit/chatgptfree"
@@ -188,6 +188,6 @@ class GPTWeb(Provider):
188
188
  if __name__ == '__main__':
189
189
  from rich import print
190
190
  ai = GPTWeb()
191
- response = ai.chat(input(">>> "))
191
+ response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
192
192
  for chunk in response:
193
193
  print(chunk, end='', flush=True)
@@ -183,7 +183,7 @@ class LLAMA3(Provider):
183
183
 
184
184
  if __name__ == "__main__":
185
185
  from rich import print
186
- ai = LLAMA3(api_key='7979b01c-c5ea-40df-9198-f45733fa2208')
186
+ ai = LLAMA3(api_key='')
187
187
  response = ai.chat(input(">>> "))
188
188
  for chunks in response:
189
189
  print(chunks, end="", flush=True)
@@ -192,9 +192,9 @@ class NinjaChat(Provider):
192
192
 
193
193
  if __name__ == "__main__":
194
194
  from rich import print
195
- bot = NinjaChat(model="gemini-1.5-pro", system_message="You are a creative writer.")
195
+ bot = NinjaChat(model="perplexity", system_message="You are a creative writer.")
196
196
 
197
- response = bot.chat("write a poem about a robot learning to love", stream=True)
197
+ response = bot.chat("tell me about Abhay koul, HelpingAI ", stream=True)
198
198
 
199
199
  for chunk in response:
200
200
  print(chunk, end="", flush=True)
@@ -166,7 +166,7 @@ class OLLAMA(Provider):
166
166
  assert isinstance(response, dict), "Response should be of dict data-type only"
167
167
  return response["text"]
168
168
  if __name__ == "__main__":
169
- ai = OLLAMA(model="qwen2:0.5b")
169
+ ai = OLLAMA(model="llama3.2:1b")
170
170
  response = ai.chat("write a poem about AI", stream=True)
171
171
  for chunk in response:
172
172
  print(chunk, end="", flush=True)
@@ -592,7 +592,7 @@ class Perplexity(Provider):
592
592
  if __name__ == "__main__":
593
593
  perplexity = Perplexity()
594
594
  # Stream the response
595
- response = perplexity.chat("What is the meaning of life?")
595
+ response = perplexity.chat("tell me about Abhay koul, HelpingAI ")
596
596
  for chunk in response:
597
597
  print(chunk, end="", flush=True)
598
598
 
webscout/Provider/Reka.py CHANGED
@@ -4,10 +4,10 @@ import requests
4
4
 
5
5
  import json
6
6
 
7
- from ..AIutel import Optimizers
8
- from ..AIutel import Conversation
9
- from ..AIutel import AwesomePrompts, sanitize_stream
10
- from ..AIbase import Provider, AsyncProvider
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
11
  from webscout import exceptions
12
12
 
13
13
  #-----------------------------------------------REKA-----------------------------------------------
@@ -205,4 +205,11 @@ class REKA(Provider):
205
205
  str: Message extracted
206
206
  """
207
207
  assert isinstance(response, dict), "Response should be of dict data-type only"
208
- return response.get("text")
208
+ return response.get("text")
209
+ if __name__ == "__main__":
210
+
211
+ from rich import print
212
+ ai = REKA(api_key="YOUR_API_KEY", timeout=5000)
213
+ response = ai.chat("write a poem about AI", stream=True)
214
+ for chunk in response:
215
+ print(chunk, end="", flush=True)
@@ -0,0 +1,103 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from datetime import datetime
5
+ from typing import List, Dict, Optional
6
+
7
+ from webscout.AIbase import ImageProvider
8
+ from webscout import exceptions
9
+
10
+
11
+ class AIUncensoredImager(ImageProvider):
12
+ """
13
+ Image provider for AIUncensored.info.
14
+ """
15
+
16
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
17
+ """Initializes the AIUncensoredImager class."""
18
+ self.url = "https://twitterclone-i0wr.onrender.com/api/image"
19
+ self.headers = {
20
+ "Content-Type": "application/json",
21
+ "Accept": "*/*",
22
+ "Origin": "https://www.aiuncensored.info",
23
+ "Referer": "https://www.aiuncensored.info/",
24
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0",
25
+ }
26
+ self.session = requests.Session()
27
+ self.session.headers.update(self.headers)
28
+ self.session.proxies.update(proxies)
29
+ self.timeout = timeout
30
+ self.prompt = "AI-generated image - webscout"
31
+ self.image_extension = "jpg"
32
+
33
+ def generate(self, prompt: str, amount: int = 1) -> List[str]:
34
+ """Generates image URLs from a prompt."""
35
+
36
+ assert bool(prompt), "Prompt cannot be null"
37
+ assert isinstance(amount, int) and amount > 0, "Amount must be a positive integer"
38
+ self.prompt = prompt
39
+
40
+ payload = {"prompt": prompt}
41
+ image_urls = []
42
+
43
+ try:
44
+ with self.session.post(self.url, json=payload, timeout=self.timeout) as response:
45
+ response.raise_for_status() # Raise HTTPError for bad responses
46
+ data = response.json()
47
+ image_url = data.get("image_url")
48
+
49
+ if not image_url:
50
+ raise exceptions.InvalidResponseError("No image URL in API response")
51
+
52
+ image_urls.append(image_url) # Only one image returned for now
53
+
54
+
55
+ except requests.exceptions.RequestException as e:
56
+ raise exceptions.APIConnectionError(f"Error during request: {e}")
57
+ except json.JSONDecodeError as e:
58
+ raise exceptions.InvalidResponseError(f"Invalid JSON response: {e}")
59
+ except Exception as e:
60
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {e}")
61
+
62
+ return image_urls
63
+
64
+ def save(
65
+ self,
66
+ response: List[str],
67
+ name: str = None,
68
+ dir: str = os.getcwd(),
69
+ filenames_prefix: str = "",
70
+ ) -> List[str]:
71
+ """Saves generated images."""
72
+
73
+ assert isinstance(response, list), f"Response should be a list, not {type(response)}"
74
+ name = self.prompt if name is None else name
75
+ os.makedirs(dir, exist_ok=True) #Create dir if needed
76
+
77
+ filenames = []
78
+ for i, img_url in enumerate(response):
79
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
80
+ filename = f"{dir}/{name}_{i + 1}_{timestamp}.{self.image_extension}"
81
+ filenames.append(filenames_prefix + os.path.basename(filename))
82
+
83
+ try:
84
+ with self.session.get(img_url, stream=True, timeout=self.timeout) as img_response:
85
+ img_response.raise_for_status()
86
+ with open(filename, "wb") as f:
87
+ for chunk in img_response.iter_content(chunk_size=8192):
88
+ f.write(chunk)
89
+ except requests.exceptions.RequestException as e:
90
+ raise exceptions.FailedToSaveImageError(f"Error downloading/saving image: {e}")
91
+
92
+ return filenames
93
+
94
+
95
+ if __name__ == "__main__":
96
+ imager = AIUncensoredImager()
97
+ prompt = "a photo of a cat sitting in a basket"
98
+ try:
99
+ image_urls = imager.generate(prompt, amount=1)
100
+ saved_filenames = imager.save(image_urls)
101
+ print(f"Saved filenames: {saved_filenames}")
102
+ except Exception as e:
103
+ print(f"An error occurred: {e}")
@@ -5,6 +5,7 @@ from .blackboximage import *
5
5
  from .Nexra import *
6
6
  from .huggingface import *
7
7
  from .artbit import *
8
- from .amigo import *
9
8
  from .WebSimAI import *
10
- from .imgninza import *
9
+ from .imgninza import *
10
+ from .AIuncensored import *
11
+ from .talkai import *
@@ -0,0 +1,116 @@
1
+ import uuid
2
+ import requests
3
+ import json
4
+ import os
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ from webscout.AIbase import ImageProvider
8
+ from webscout import exceptions
9
+
10
+
11
+ class TalkaiImager(ImageProvider):
12
+ """
13
+ Image provider for Talkai.info.
14
+ """
15
+
16
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
17
+ """Initializes the TalkaiImager class.
18
+
19
+ Args:
20
+ timeout (int, optional): HTTP request timeout in seconds. Defaults to 60.
21
+ proxies (dict, optional): HTTP request proxies. Defaults to {}.
22
+ """
23
+ self.api_endpoint = "https://talkai.info/chat/send/"
24
+ self.headers = {
25
+ 'accept': 'application/json',
26
+ 'accept-language': 'en-US,en;q=0.9',
27
+ 'content-type': 'application/json',
28
+ 'origin': 'https://talkai.info',
29
+ 'referer': 'https://talkai.info/image/',
30
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
31
+ }
32
+ self.session = requests.Session()
33
+ self.session.headers.update(self.headers)
34
+ self.session.proxies.update(proxies)
35
+ self.timeout = timeout
36
+ self.prompt: str = "AI-generated image - webscout"
37
+ self.image_extension: str = "png"
38
+
39
+ def generate(self, prompt: str, amount: int = 1) -> List[str]:
40
+ """Generates image URLs from a prompt."""
41
+ assert bool(prompt), "Prompt cannot be empty."
42
+ assert isinstance(amount, int) and amount > 0, "Amount must be a positive integer."
43
+
44
+ self.prompt = prompt
45
+ image_urls = []
46
+
47
+ try:
48
+ with self.session.post(self.api_endpoint, json=self._create_payload(prompt), timeout=self.timeout) as response:
49
+ response.raise_for_status()
50
+ data = response.json()
51
+
52
+ if 'data' in data and len(data['data']) > 0 and 'url' in data['data'][0]:
53
+ image_urls.append(data['data'][0]['url'])
54
+ else:
55
+ raise exceptions.InvalidResponseError("No image URL found in API response.")
56
+
57
+ except requests.exceptions.RequestException as e:
58
+ raise exceptions.APIConnectionError(f"Error making API request: {e}") from e
59
+ except json.JSONDecodeError as e:
60
+ raise exceptions.InvalidResponseError(f"Invalid JSON response: {e}") from e
61
+ except Exception as e:
62
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}") from e
63
+
64
+ return image_urls
65
+
66
+ def _create_payload(self, prompt: str) -> Dict[str, Any]:
67
+ return {
68
+ "type": "image",
69
+ "messagesHistory": [
70
+ {
71
+ "id": str(uuid.uuid4()),
72
+ "from": "you",
73
+ "content": prompt
74
+ }
75
+ ],
76
+ "settings": {
77
+ "model": "gpt-4o-mini" # Or another suitable model if available
78
+ }
79
+ }
80
+
81
+
82
+ def save(
83
+ self,
84
+ response: List[str],
85
+ name: str = None,
86
+ dir: str = os.getcwd(),
87
+ filenames_prefix: str = "",
88
+ ) -> List[str]:
89
+ assert isinstance(response, list), f"Response should be a list, not {type(response)}"
90
+ name = self.prompt if name is None else name
91
+
92
+ filenames = []
93
+ for i, url in enumerate(response):
94
+ try:
95
+ with self.session.get(url, stream=True, timeout=self.timeout) as r:
96
+ r.raise_for_status()
97
+ filename = f"{filenames_prefix}{name}_{i}.{self.image_extension}"
98
+ filepath = os.path.join(dir, filename)
99
+ with open(filepath, 'wb') as f:
100
+ for chunk in r.iter_content(chunk_size=8192):
101
+ f.write(chunk)
102
+ filenames.append(filename)
103
+ except requests.exceptions.RequestException as e:
104
+ print(f"Error downloading image from {url}: {e}")
105
+ filenames.append(None) # Indicate failure to download
106
+
107
+ return filenames
108
+
109
+
110
+ if __name__ == "__main__":
111
+ bot = TalkaiImager()
112
+ try:
113
+ resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
114
+ print(bot.save(resp))
115
+ except Exception as e:
116
+ print(f"An error occurred: {e}")
@@ -1,9 +1,6 @@
1
1
  import requests
2
2
  from requests.exceptions import RequestException
3
3
  from typing import Any, Dict
4
- import logging
5
- import random
6
-
7
4
  from webscout.AIutel import Conversation, Optimizers
8
5
 
9
6
  class TeachAnything:
@@ -16,7 +16,6 @@ from .Phind import PhindSearch
16
16
  from .Phind import Phindv2
17
17
  from .ai4chat import *
18
18
  from .Gemini import GEMINI
19
- from .BasedGPT import BasedGPT
20
19
  from .Deepseek import DeepSeek
21
20
  from .Deepinfra import DeepInfra
22
21
  from .Farfalle import *
@@ -31,7 +30,6 @@ from .RUBIKSAI import *
31
30
  from .meta import *
32
31
  from .DiscordRocks import *
33
32
  from .felo_search import *
34
- from .xdash import *
35
33
  from .julius import *
36
34
  from .Youchat import *
37
35
  from .yep import *
@@ -51,21 +49,22 @@ from .genspark import *
51
49
  from .upstage import *
52
50
  from .Bing import *
53
51
  from .GPTWeb import *
54
- from .aigames import *
52
+ # from .UNFINISHED.aigames import *
55
53
  from .llamatutor import *
56
54
  from .promptrefine import *
57
55
  from .twitterclone import *
58
56
  from .tutorai import *
59
- from .bixin import *
60
57
  from .ChatGPTES import *
61
58
  from .Amigo import *
62
59
  from .prefind import *
63
60
  from .bagoodex import *
64
- from .ChatHub import *
61
+ # from .UNFINISHED.ChatHub import *
65
62
  from .aimathgpt import *
66
63
  from .gaurish import *
67
64
  from .geminiprorealtime import *
68
65
  from .NinjaChat import *
66
+ from .llmchat import *
67
+ from .talkai import *
69
68
  __all__ = [
70
69
  'Farfalle',
71
70
  'LLAMA',
@@ -83,7 +82,6 @@ __all__ = [
83
82
  'PhindSearch',
84
83
  'Felo',
85
84
  'GEMINI',
86
- 'BasedGPT',
87
85
  'DeepSeek',
88
86
  'DeepInfra',
89
87
  'AI4Chat',
@@ -98,7 +96,6 @@ __all__ = [
98
96
  'Meta',
99
97
  'DiscordRocks',
100
98
  'PiAI',
101
- 'XDASH',
102
99
  'Julius',
103
100
  'YouChat',
104
101
  'YEPCHAT',
@@ -119,21 +116,21 @@ __all__ = [
119
116
  'Free2GPT',
120
117
  'Bing',
121
118
  'GPTWeb',
122
- 'AIGameIO',
119
+ # 'AIGameIO',
123
120
  'LlamaTutor',
124
121
  'PromptRefine',
125
122
  'AIUncensored',
126
123
  'TutorAI',
127
- 'Bixin',
128
124
  'ChatGPTES',
129
125
  'AmigoChat',
130
126
  'PrefindAI',
131
127
  'Bagoodex',
132
- 'ChatHub',
128
+ # 'ChatHub',
133
129
  'AIMathGPT',
134
130
  'GaurishCerebras',
135
131
  'GeminiPro',
136
132
  'NinjaChat',
137
-
133
+ 'LLMChat',
134
+ 'Talkai'
138
135
 
139
136
  ]