webscout 7.5__py3-none-any.whl → 7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (132) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/__init__.py +9 -9
  9. webscout/Extra/autocoder/autocoder_utiles.py +193 -199
  10. webscout/Extra/autocoder/rawdog.py +789 -677
  11. webscout/Extra/gguf.py +682 -428
  12. webscout/Extra/weather.py +178 -156
  13. webscout/Extra/weather_ascii.py +70 -17
  14. webscout/Litlogger/core/logger.py +1 -2
  15. webscout/Litlogger/handlers/file.py +1 -1
  16. webscout/Litlogger/styles/formats.py +0 -2
  17. webscout/Litlogger/utils/detectors.py +0 -1
  18. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  19. webscout/Provider/AISEARCH/ISou.py +1 -22
  20. webscout/Provider/AISEARCH/felo_search.py +0 -1
  21. webscout/Provider/AllenAI.py +28 -30
  22. webscout/Provider/C4ai.py +29 -11
  23. webscout/Provider/ChatGPTClone.py +226 -0
  24. webscout/Provider/ChatGPTGratis.py +24 -56
  25. webscout/Provider/DeepSeek.py +25 -17
  26. webscout/Provider/Deepinfra.py +115 -48
  27. webscout/Provider/Gemini.py +1 -1
  28. webscout/Provider/Glider.py +33 -12
  29. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  30. webscout/Provider/HeckAI.py +23 -7
  31. webscout/Provider/Hunyuan.py +272 -0
  32. webscout/Provider/Jadve.py +20 -5
  33. webscout/Provider/LambdaChat.py +391 -0
  34. webscout/Provider/Netwrck.py +42 -19
  35. webscout/Provider/OLLAMA.py +256 -32
  36. webscout/Provider/PI.py +4 -2
  37. webscout/Provider/Perplexitylabs.py +26 -6
  38. webscout/Provider/PizzaGPT.py +10 -51
  39. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  40. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  41. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  42. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +179 -206
  43. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -192
  44. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  45. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  46. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  47. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  48. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  49. webscout/Provider/TTI/__init__.py +2 -3
  50. webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
  51. webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
  52. webscout/Provider/TTI/artbit/async_artbit.py +3 -32
  53. webscout/Provider/TTI/artbit/sync_artbit.py +3 -31
  54. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  55. webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
  56. webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
  57. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  58. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  59. webscout/Provider/TTS/__init__.py +2 -2
  60. webscout/Provider/TTS/deepgram.py +12 -39
  61. webscout/Provider/TTS/elevenlabs.py +14 -40
  62. webscout/Provider/TTS/gesserit.py +11 -35
  63. webscout/Provider/TTS/murfai.py +13 -39
  64. webscout/Provider/TTS/parler.py +17 -40
  65. webscout/Provider/TTS/speechma.py +180 -0
  66. webscout/Provider/TTS/streamElements.py +17 -44
  67. webscout/Provider/TextPollinationsAI.py +39 -59
  68. webscout/Provider/Venice.py +25 -8
  69. webscout/Provider/WebSim.py +227 -0
  70. webscout/Provider/WiseCat.py +27 -5
  71. webscout/Provider/Youchat.py +64 -37
  72. webscout/Provider/__init__.py +12 -7
  73. webscout/Provider/akashgpt.py +20 -5
  74. webscout/Provider/flowith.py +33 -7
  75. webscout/Provider/freeaichat.py +32 -45
  76. webscout/Provider/koala.py +20 -5
  77. webscout/Provider/labyrinth.py +239 -0
  78. webscout/Provider/learnfastai.py +28 -15
  79. webscout/Provider/llamatutor.py +1 -1
  80. webscout/Provider/llmchat.py +30 -8
  81. webscout/Provider/multichat.py +65 -9
  82. webscout/Provider/sonus.py +208 -0
  83. webscout/Provider/talkai.py +1 -0
  84. webscout/Provider/turboseek.py +3 -0
  85. webscout/Provider/tutorai.py +2 -0
  86. webscout/Provider/typegpt.py +155 -65
  87. webscout/Provider/uncovr.py +297 -0
  88. webscout/Provider/x0gpt.py +3 -1
  89. webscout/Provider/yep.py +102 -20
  90. webscout/__init__.py +3 -0
  91. webscout/cli.py +53 -40
  92. webscout/conversation.py +1 -10
  93. webscout/litagent/__init__.py +2 -2
  94. webscout/litagent/agent.py +356 -20
  95. webscout/litagent/constants.py +34 -5
  96. webscout/litprinter/__init__.py +0 -3
  97. webscout/models.py +181 -0
  98. webscout/optimizers.py +1 -1
  99. webscout/prompt_manager.py +2 -8
  100. webscout/scout/core/scout.py +1 -4
  101. webscout/scout/core/search_result.py +1 -1
  102. webscout/scout/core/text_utils.py +1 -1
  103. webscout/scout/core.py +2 -5
  104. webscout/scout/element.py +1 -1
  105. webscout/scout/parsers/html_parser.py +1 -1
  106. webscout/scout/utils.py +0 -1
  107. webscout/swiftcli/__init__.py +1 -3
  108. webscout/tempid.py +1 -1
  109. webscout/update_checker.py +1 -3
  110. webscout/version.py +1 -1
  111. webscout/webscout_search_async.py +1 -2
  112. webscout/yep_search.py +297 -297
  113. {webscout-7.5.dist-info → webscout-7.7.dist-info}/LICENSE.md +4 -4
  114. {webscout-7.5.dist-info → webscout-7.7.dist-info}/METADATA +127 -405
  115. {webscout-7.5.dist-info → webscout-7.7.dist-info}/RECORD +118 -117
  116. webscout/Extra/autollama.py +0 -231
  117. webscout/Provider/Amigo.py +0 -274
  118. webscout/Provider/Bing.py +0 -243
  119. webscout/Provider/DiscordRocks.py +0 -253
  120. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  121. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  122. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  123. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  124. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  125. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  126. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  127. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  128. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  129. webscout/Provider/TTS/voicepod.py +0 -117
  130. {webscout-7.5.dist-info → webscout-7.7.dist-info}/WHEEL +0 -0
  131. {webscout-7.5.dist-info → webscout-7.7.dist-info}/entry_points.txt +0 -0
  132. {webscout-7.5.dist-info → webscout-7.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,208 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Optional, Generator, Union
4
+ from webscout.AIutel import Optimizers
5
+ from webscout.AIutel import Conversation
6
+ from webscout.AIutel import AwesomePrompts
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+
10
+ class SonusAI(Provider):
11
+ """
12
+ A class to interact with the Sonus AI chat API.
13
+ """
14
+
15
+ AVAILABLE_MODELS = [
16
+ "pro",
17
+ "air",
18
+ "mini"
19
+ ]
20
+
21
+ def __init__(
22
+ self,
23
+ is_conversation: bool = True,
24
+ max_tokens: int = 2049,
25
+ timeout: int = 30,
26
+ intro: str = None,
27
+ filepath: str = None,
28
+ update_file: bool = True,
29
+ proxies: dict = {},
30
+ history_offset: int = 10250,
31
+ act: str = None,
32
+ model: str = "pro"
33
+ ):
34
+ """Initializes the Sonus AI API client."""
35
+ if model not in self.AVAILABLE_MODELS:
36
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
37
+
38
+ self.url = "https://chat.sonus.ai/chat.php"
39
+
40
+ # Headers for the request
41
+ self.headers = {
42
+ 'Accept': '*/*',
43
+ 'Accept-Language': 'en-US,en;q=0.9',
44
+ 'Origin': 'https://chat.sonus.ai',
45
+ 'Referer': 'https://chat.sonus.ai/',
46
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'
47
+ }
48
+
49
+ self.session = requests.Session()
50
+ self.session.headers.update(self.headers)
51
+ self.session.proxies.update(proxies)
52
+
53
+ self.is_conversation = is_conversation
54
+ self.max_tokens_to_sample = max_tokens
55
+ self.timeout = timeout
56
+ self.last_response = {}
57
+ self.model = model
58
+
59
+ self.__available_optimizers = (
60
+ method
61
+ for method in dir(Optimizers)
62
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
63
+ )
64
+ Conversation.intro = (
65
+ AwesomePrompts().get_act(
66
+ act, raise_not_found=True, default=None, case_insensitive=True
67
+ )
68
+ if act
69
+ else intro or Conversation.intro
70
+ )
71
+
72
+ self.conversation = Conversation(
73
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
74
+ )
75
+ self.conversation.history_offset = history_offset
76
+
77
+ def ask(
78
+ self,
79
+ prompt: str,
80
+ stream: bool = False,
81
+ raw: bool = False,
82
+ optimizer: str = None,
83
+ conversationally: bool = False,
84
+ reasoning: bool = False,
85
+ ) -> Union[Dict[str, Any], Generator]:
86
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
87
+ if optimizer:
88
+ if optimizer in self.__available_optimizers:
89
+ conversation_prompt = getattr(Optimizers, optimizer)(
90
+ conversation_prompt if conversationally else prompt
91
+ )
92
+ else:
93
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
94
+
95
+ # Prepare the multipart form data
96
+ files = {
97
+ 'message': (None, conversation_prompt),
98
+ 'history': (None),
99
+ 'reasoning': (None, str(reasoning).lower()),
100
+ 'model': (None, self.model)
101
+ }
102
+
103
+ def for_stream():
104
+ try:
105
+ with requests.post(self.url, files=files, headers=self.headers, stream=True, timeout=self.timeout) as response:
106
+ if response.status_code != 200:
107
+ raise exceptions.FailedToGenerateResponseError(
108
+ f"Request failed with status code {response.status_code}"
109
+ )
110
+
111
+ streaming_text = ""
112
+ for line in response.iter_lines():
113
+ if line:
114
+ try:
115
+ # Decode the line and remove 'data: ' prefix if present
116
+ line = line.decode('utf-8')
117
+ if line.startswith('data: '):
118
+ line = line[6:]
119
+
120
+ data = json.loads(line)
121
+ if "content" in data:
122
+ content = data["content"]
123
+ streaming_text += content
124
+ resp = dict(text=content)
125
+ yield resp if raw else resp
126
+ except (json.JSONDecodeError, UnicodeDecodeError):
127
+ continue
128
+
129
+ self.last_response = {"text": streaming_text}
130
+ self.conversation.update_chat_history(prompt, streaming_text)
131
+
132
+ except requests.RequestException as e:
133
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
134
+
135
+ def for_non_stream():
136
+ try:
137
+ response = requests.post(self.url, files=files, headers=self.headers, timeout=self.timeout)
138
+ if response.status_code != 200:
139
+ raise exceptions.FailedToGenerateResponseError(
140
+ f"Request failed with status code {response.status_code}"
141
+ )
142
+
143
+ full_response = ""
144
+ for line in response.iter_lines():
145
+ if line:
146
+ try:
147
+ line = line.decode('utf-8')
148
+ if line.startswith('data: '):
149
+ line = line[6:]
150
+ data = json.loads(line)
151
+ if "content" in data:
152
+ full_response += data["content"]
153
+ except (json.JSONDecodeError, UnicodeDecodeError):
154
+ continue
155
+
156
+ self.last_response = {"text": full_response}
157
+ self.conversation.update_chat_history(prompt, full_response)
158
+ return {"text": full_response}
159
+ except Exception as e:
160
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
161
+
162
+ return for_stream() if stream else for_non_stream()
163
+
164
+ def chat(
165
+ self,
166
+ prompt: str,
167
+ stream: bool = False,
168
+ optimizer: str = None,
169
+ conversationally: bool = False,
170
+ reasoning: bool = False,
171
+ ) -> Union[str, Generator[str, None, None]]:
172
+ def for_stream():
173
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning):
174
+ yield self.get_message(response)
175
+ def for_non_stream():
176
+ return self.get_message(
177
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning)
178
+ )
179
+ return for_stream() if stream else for_non_stream()
180
+
181
+ def get_message(self, response: dict) -> str:
182
+ assert isinstance(response, dict), "Response should be of dict data-type only"
183
+ return response["text"]
184
+
185
+ if __name__ == "__main__":
186
+ print("-" * 80)
187
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
188
+ print("-" * 80)
189
+
190
+ for model in SonusAI.AVAILABLE_MODELS:
191
+ try:
192
+ test_ai = SonusAI(model=model, timeout=60)
193
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
194
+ response_text = ""
195
+ for chunk in response:
196
+ response_text += chunk
197
+
198
+ if response_text and len(response_text.strip()) > 0:
199
+ status = "✓"
200
+ # Clean and truncate response
201
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
202
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
203
+ else:
204
+ status = "✗"
205
+ display_text = "Empty or invalid response"
206
+ print(f"\r{model:<50} {status:<10} {display_text}")
207
+ except Exception as e:
208
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -9,6 +9,7 @@ from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
11
11
  from webscout.litagent import LitAgent
12
+
12
13
  class Talkai(Provider):
13
14
  """
14
15
  A class to interact with the Talkai.info API.
@@ -13,6 +13,8 @@ class TurboSeek(Provider):
13
13
  """
14
14
  This class provides methods for interacting with the TurboSeek API.
15
15
  """
16
+ AVAILABLE_MODELS = ["Llama 3.1 70B"]
17
+
16
18
  def __init__(
17
19
  self,
18
20
  is_conversation: bool = True,
@@ -24,6 +26,7 @@ class TurboSeek(Provider):
24
26
  proxies: dict = {},
25
27
  history_offset: int = 10250,
26
28
  act: str = None,
29
+ model: str = "Llama 3.1 70B"
27
30
  ):
28
31
  """Instantiates TurboSeek
29
32
 
@@ -15,6 +15,7 @@ class TutorAI(Provider):
15
15
  """
16
16
  A class to interact with the TutorAI.me API.
17
17
  """
18
+ AVAILABLE_MODELS = ["gpt-4o"]
18
19
 
19
20
  def __init__(
20
21
  self,
@@ -28,6 +29,7 @@ class TutorAI(Provider):
28
29
  history_offset: int = 10250,
29
30
  act: str = None,
30
31
  system_prompt: str = "You are a helpful AI assistant.",
32
+ model: str = "gpt-4o"
31
33
  ):
32
34
  """
33
35
  Initializes the TutorAI.me API with given parameters.
@@ -15,43 +15,64 @@ class TypeGPT(Provider):
15
15
  A class to interact with the TypeGPT.net API. Improved to match webscout standards.
16
16
  """
17
17
  url = "https://chat.typegpt.net"
18
- working = True
19
- supports_message_history = True
20
18
 
21
- models = [
19
+ AVAILABLE_MODELS = [
22
20
  # OpenAI Models
23
21
  "gpt-3.5-turbo",
24
- "chatgpt-4o-latest",
25
22
  "gpt-3.5-turbo-202201",
26
23
  "gpt-4o",
27
24
  "gpt-4o-2024-05-13",
25
+ "gpt-4o-2024-11-20",
26
+ "gpt-4o-mini",
27
+ "gpt-4o-mini-2024-07-18",
28
+ # "gpt-4o-mini-ddg", >>>> NOT WORKING
29
+ "o1",
30
+ # "o1-mini-2024-09-12", >>>> NOT WORKING
28
31
  "o1-preview",
32
+ "o3-mini",
33
+ "chatgpt-4o-latest",
29
34
 
30
35
  # Claude Models
31
- "claude",
36
+ # "claude", >>>> NOT WORKING
32
37
  "claude-3-5-sonnet",
33
- "claude-sonnet-3.5",
34
38
  "claude-3-5-sonnet-20240620",
39
+ "claude-3-5-sonnet-x",
40
+ # "claude-3-haiku-ddg", >>>> NOT WORKING
41
+ "claude-hybridspace",
42
+ "claude-sonnet-3.5",
43
+ "Claude-sonnet-3.7",
44
+ "anthropic/claude-3.5-sonnet",
45
+ "anthropic/claude-3.7-sonnet",
35
46
 
36
47
  # Meta/LLaMA Models
37
48
  "@cf/meta/llama-2-7b-chat-fp16",
38
49
  "@cf/meta/llama-2-7b-chat-int8",
39
50
  "@cf/meta/llama-3-8b-instruct",
40
51
  "@cf/meta/llama-3.1-8b-instruct",
41
- "@cf/meta-llama/llama-2-7b-chat-hf-lora",
52
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
53
+ # "@cf/meta-llama/llama-2-7b-chat-hf-lora", >>>> NOT WORKING
42
54
  "llama-3.1-405b",
43
55
  "llama-3.1-70b",
56
+ # "llama-3.1-70b-ddg", >>>> NOT WORKING
44
57
  "llama-3.1-8b",
45
- "meta-llama/Llama-2-7b-chat-hf",
46
- "meta-llama/Llama-3.1-70B-Instruct",
47
- "meta-llama/Llama-3.1-8B-Instruct",
58
+ # "llama-scaleway", >>>> NOT WORKING
59
+ "llama3.1-8b", # >>>> NOT WORKING
60
+ "llama3.3-70b",
61
+ # "llamalight", >>>> NOT WORKING
62
+ "Meta-Llama-3.1-405B-Instruct-Turbo",
63
+ "Meta-Llama-3.3-70B-Instruct-Turbo",
64
+ # "meta-llama/Llama-2-7b-chat-hf", >>>> NOT WORKING
65
+ # "meta-llama/Llama-3.1-70B-Instruct", >>>> NOT WORKING
66
+ # "meta-llama/Llama-3.1-8B-Instruct", >>>> NOT WORKING
48
67
  "meta-llama/Llama-3.2-11B-Vision-Instruct",
49
- "meta-llama/Llama-3.2-1B-Instruct",
50
- "meta-llama/Llama-3.2-3B-Instruct",
68
+ # "meta-llama/Llama-3.2-1B-Instruct", >>>> NOT WORKING
69
+ # "meta-llama/Llama-3.2-3B-Instruct", >>>> NOT WORKING
51
70
  "meta-llama/Llama-3.2-90B-Vision-Instruct",
52
- "meta-llama/Llama-Guard-3-8B",
53
- "meta-llama/Meta-Llama-3-70B-Instruct",
54
- "meta-llama/Meta-Llama-3-8B-Instruct",
71
+ "meta-llama/Llama-3.3-70B-Instruct",
72
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
73
+ # "meta-llama/Llama-Guard-3-8B", >>>> NOT WORKING
74
+ # "meta-llama/Meta-Llama-3-70B-Instruct", >>>> NOT WORKING
75
+ # "meta-llama/Meta-Llama-3-8B-Instruct", >>>> NOT WORKING
55
76
  "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
56
77
  "meta-llama/Meta-Llama-3.1-8B-Instruct",
57
78
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
@@ -60,25 +81,34 @@ class TypeGPT(Provider):
60
81
  "mistral",
61
82
  "mistral-large",
62
83
  "@cf/mistral/mistral-7b-instruct-v0.1",
63
- "@cf/mistral/mistral-7b-instruct-v0.2-lora",
84
+ # "@cf/mistral/mistral-7b-instruct-v0.2-lora", >>>> NOT WORKING
64
85
  "@hf/mistralai/mistral-7b-instruct-v0.2",
65
86
  "mistralai/Mistral-7B-Instruct-v0.2",
66
87
  "mistralai/Mistral-7B-Instruct-v0.3",
67
88
  "mistralai/Mixtral-8x22B-Instruct-v0.1",
68
89
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
90
+ # "mixtral-8x7b-ddg", >>>> NOT WORKING
91
+ "Mistral-7B-Instruct-v0.2",
69
92
 
70
93
  # Qwen Models
71
94
  "@cf/qwen/qwen1.5-0.5b-chat",
72
95
  "@cf/qwen/qwen1.5-1.8b-chat",
73
- "@cf/qwen/qwen1.5-7b-chat-awq",
74
96
  "@cf/qwen/qwen1.5-14b-chat-awq",
97
+ "@cf/qwen/qwen1.5-7b-chat-awq",
75
98
  "Qwen/Qwen2.5-3B-Instruct",
76
99
  "Qwen/Qwen2.5-72B-Instruct",
77
100
  "Qwen/Qwen2.5-Coder-32B-Instruct",
101
+ "Qwen/Qwen2-72B-Instruct",
102
+ "Qwen/QwQ-32B",
103
+ "Qwen/QwQ-32B-Preview",
104
+ "Qwen2.5-72B-Instruct",
105
+ "qwen",
106
+ "qwen-coder",
107
+ # "Qwen-QwQ-32B-Preview", >>>> NOT WORKING
78
108
 
79
109
  # Google/Gemini Models
80
- "@cf/google/gemma-2b-it-lora",
81
- "@cf/google/gemma-7b-it-lora",
110
+ # "@cf/google/gemma-2b-it-lora", >>>> NOT WORKING
111
+ # "@cf/google/gemma-7b-it-lora", >>>> NOT WORKING
82
112
  "@hf/google/gemma-7b-it",
83
113
  "google/gemma-1.1-2b-it",
84
114
  "google/gemma-1.1-7b-it",
@@ -86,22 +116,8 @@ class TypeGPT(Provider):
86
116
  "gemini-1.5-pro",
87
117
  "gemini-1.5-pro-latest",
88
118
  "gemini-1.5-flash",
89
-
90
- # Cohere Models
91
- "c4ai-aya-23-35b",
92
- "c4ai-aya-23-8b",
93
- "command",
94
- "command-light",
95
- "command-light-nightly",
96
- "command-nightly",
97
- "command-r",
98
- "command-r-08-2024",
99
- "command-r-plus",
100
- "command-r-plus-08-2024",
101
- "rerank-english-v2.0",
102
- "rerank-english-v3.0",
103
- "rerank-multilingual-v2.0",
104
- "rerank-multilingual-v3.0",
119
+ "gemini-flash-2.0",
120
+ "gemini-thinking",
105
121
 
106
122
  # Microsoft Models
107
123
  "@cf/microsoft/phi-2",
@@ -109,29 +125,45 @@ class TypeGPT(Provider):
109
125
  "microsoft/Phi-3-medium-4k-instruct",
110
126
  "microsoft/Phi-3-mini-4k-instruct",
111
127
  "microsoft/Phi-3.5-mini-instruct",
128
+ "microsoft/phi-4",
112
129
  "microsoft/WizardLM-2-8x22B",
113
130
 
114
131
  # Yi Models
115
132
  "01-ai/Yi-1.5-34B-Chat",
116
- "01-ai/Yi-34B-Chat",
133
+ # "01-ai/Yi-34B-Chat", >>>> NOT WORKING
117
134
 
118
- # Specialized Models and Tools
135
+ # DeepSeek Models
119
136
  "@cf/deepseek-ai/deepseek-math-7b-base",
120
137
  "@cf/deepseek-ai/deepseek-math-7b-instruct",
138
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
139
+ "deepseek",
140
+ "deepseek-ai/DeepSeek-R1",
141
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
142
+ # "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", >>>> NOT WORKING
143
+ # "deepseek-ai/DeepSeek-V2.5", >>>> NOT WORKING
144
+ "deepseek-llm-67b-chat",
145
+ "deepseek-r1",
146
+ "deepseek-r1-distill-llama-70b",
147
+ # "deepseek-reasoner", >>>> NOT WORKING
148
+ "deepseek-v3",
149
+
150
+ # Specialized Models and Tools
121
151
  "@cf/defog/sqlcoder-7b-2",
122
- "@cf/openchat/openchat-3.5-0106",
123
152
  "@cf/thebloke/discolm-german-7b-v1-awq",
124
153
  "@cf/tiiuae/falcon-7b-instruct",
125
- "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
126
- "@hf/nexusflow/starling-lm-7b-beta",
127
- "@hf/nousresearch/hermes-2-pro-mistral-7b",
128
- "@hf/thebloke/deepseek-coder-6.7b-base-awq",
129
- "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
130
- "@hf/thebloke/llama-2-13b-chat-awq",
131
- "@hf/thebloke/llamaguard-7b-awq",
132
- "@hf/thebloke/neural-chat-7b-v3-1-awq",
133
- "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
134
- "@hf/thebloke/zephyr-7b-beta-awq",
154
+ # "@cf/tinyllama/tinyllama-1.1b-chat-v1.0", >>>> NOT WORKING
155
+ # "@hf/nexusflow/starling-lm-7b-beta", >>>> NOT WORKING
156
+ # "@hf/nousresearch/hermes-2-pro-mistral-7b", >>>> NOT WORKING
157
+ # "@hf/thebloke/deepseek-coder-6.7b-base-awq", >>>> NOT WORKING
158
+ # "@hf/thebloke/deepseek-coder-6.7b-instruct-awq", >>>> NOT WORKING
159
+ # "@hf/thebloke/llama-2-13b-chat-awq", >>>> NOT WORKING
160
+ # "@hf/thebloke/llamaguard-7b-awq", >>>> NOT WORKING
161
+ # "@hf/thebloke/mistral-7b-instruct-v0.1-awq", >>>> NOT WORKING
162
+ # "@hf/thebloke/neural-chat-7b-v3-1-awq", >>>> NOT WORKING
163
+ # "@hf/thebloke/openhermes-2.5-mistral-7b-awq", >>>> NOT WORKING
164
+ # "@hf/thebloke/zephyr-7b-beta-awq", >>>> NOT WORKING
165
+
166
+ # Development Agents
135
167
  "AndroidDeveloper",
136
168
  "AngularJSAgent",
137
169
  "AzureAgent",
@@ -150,8 +182,6 @@ class TypeGPT(Provider):
150
182
  "GodotAgent",
151
183
  "GoogleCloudAgent",
152
184
  "HTMLAgent",
153
- "HerokuAgent",
154
- "ImageGeneration",
155
185
  "JavaAgent",
156
186
  "JavaScriptAgent",
157
187
  "MongoDBAgent",
@@ -162,18 +192,54 @@ class TypeGPT(Provider):
162
192
  "RepoMap",
163
193
  "SwiftDeveloper",
164
194
  "XcodeAgent",
165
- "YoutubeAgent",
195
+ # "YoutubeAgent", >>>> NOT WORKING
196
+
197
+ # Other Models
166
198
  "blackboxai",
167
199
  "blackboxai-pro",
168
200
  "builderAgent",
169
- "dify",
201
+ # "Cipher-20b", >>>> NOT WORKING
202
+ # "dify", >>>> NOT WORKING
170
203
  "flux",
171
- "openchat/openchat-3.6-8b",
172
- "rtist",
173
- "searchgpt",
174
- "sur",
175
- "sur-mistral",
176
- "unity"
204
+ # "flux-1-schnell", >>>> NOT WORKING
205
+ # "HelpingAI-15B", >>>> NOT WORKING
206
+ # "HelpingAI2-3b", >>>> NOT WORKING
207
+ # "HelpingAI2-6B", >>>> NOT WORKING
208
+ # "HelpingAI2-9B", >>>> NOT WORKING
209
+ # "HelpingAI2.5-10B", >>>> NOT WORKING
210
+ # "Helpingai2.5-10b-1m", >>>> NOT WORKING
211
+ # "HelpingAI2.5-2B", >>>> NOT WORKING
212
+ # "HELVETE", >>>> NOT WORKING
213
+ # "HELVETE-X", >>>> NOT WORKING
214
+ # "evil", >>>> NOT WORKING
215
+ # "Image-Generator", >>>> NOT WORKING
216
+ # "Image-Generator-NSFW", >>>> NOT WORKING
217
+ # "midijourney", >>>> NOT WORKING
218
+ # "Niansuh", >>>> NOT WORKING
219
+ # "niansuh-t1", >>>> NOT WORKING
220
+ # "Nous-Hermes-2-Mixtral-8x7B-DPO", >>>> NOT WORKING
221
+ # "NousResearch/Hermes-3-Llama-3.1-8B", >>>> NOT WORKING
222
+ # "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", >>>> NOT WORKING
223
+ # "nvidia/Llama-3.1-Nemotron-70B-Instruct", >>>> NOT WORKING
224
+ # "openai", >>>> NOT WORKING
225
+ # "openai-audio", >>>> NOT WORKING
226
+ # "openai-large", >>>> NOT WORKING
227
+ # "openai-reasoning", >>>> NOT WORKING
228
+ # "openai/whisper-large-v3", >>>> NOT WORKING
229
+ # "openai/whisper-large-v3-turbo", >>>> NOT WORKING
230
+ # "openbmb/MiniCPM-Llama3-V-2_5", >>>> NOT WORKING
231
+ # "openchat/openchat-3.6-8b", >>>> NOT WORKING
232
+ # "p1", >>>> NOT WORKING
233
+ # "phi", >>>> NOT WORKING
234
+ # "Phi-4-multilmodal-instruct", >>>> NOT WORKING
235
+ # "Priya-3B", >>>> NOT WORKING
236
+ # "rtist", >>>> NOT WORKING
237
+ # "searchgpt", >>>> NOT WORKING
238
+ # "sur", >>>> NOT WORKING
239
+ # "sur-mistral", >>>> NOT WORKING
240
+ # "tiiuae/falcon-7b-instruct", >>>> NOT WORKING
241
+ # "TirexAi", >>>> NOT WORKING
242
+ # "unity", >>>> NOT WORKING
177
243
  ]
178
244
 
179
245
  def __init__(
@@ -195,13 +261,13 @@ class TypeGPT(Provider):
195
261
  top_p: float = 1,
196
262
  ):
197
263
  """Initializes the TypeGPT API client."""
198
- if model not in self.models:
199
- raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.models)}")
264
+ if model not in self.AVAILABLE_MODELS:
265
+ raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
200
266
 
201
267
  self.session = requests.Session()
202
268
  self.is_conversation = is_conversation
203
269
  self.max_tokens_to_sample = max_tokens
204
- self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
270
+ self.api_endpoint = "https://chat.typegpt.net/api/openai/typegpt/v1/chat/completions"
205
271
  self.timeout = timeout
206
272
  self.last_response = {}
207
273
  self.model = model
@@ -354,7 +420,31 @@ class TypeGPT(Provider):
354
420
  raise TypeError("Invalid response type. Expected str or dict.")
355
421
 
356
422
  if __name__ == "__main__":
357
- ai = TypeGPT(model="chatgpt-4o-latest")
358
- response = ai.chat("hi", stream=True)
359
- for chunks in response:
360
- print(chunks, end="", flush=True)
423
+ print("-" * 80)
424
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
425
+ print("-" * 80)
426
+
427
+ # Test all available models
428
+ working = 0
429
+ total = len(TypeGPT.AVAILABLE_MODELS)
430
+
431
+ for model in TypeGPT.AVAILABLE_MODELS:
432
+ try:
433
+ test_ai = TypeGPT(model=model, timeout=60)
434
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
435
+ response_text = ""
436
+ for chunk in response:
437
+ response_text += chunk
438
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
439
+
440
+ if response_text and len(response_text.strip()) > 0:
441
+ status = "✓"
442
+ # Truncate response if too long
443
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
444
+ else:
445
+ status = "✗"
446
+ display_text = "Empty or invalid response"
447
+ print(f"\r{model:<50} {status:<10} {display_text}")
448
+ except Exception as e:
449
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
450
+