webscout 8.0__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (45) hide show
  1. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  2. webscout/Provider/AISEARCH/ISou.py +1 -1
  3. webscout/Provider/AISEARCH/__init__.py +2 -1
  4. webscout/Provider/AISEARCH/felo_search.py +1 -1
  5. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +1 -1
  7. webscout/Provider/AISEARCH/iask_search.py +436 -0
  8. webscout/Provider/AISEARCH/scira_search.py +1 -1
  9. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  10. webscout/Provider/ExaAI.py +1 -1
  11. webscout/Provider/Jadve.py +2 -2
  12. webscout/Provider/OPENAI/__init__.py +17 -0
  13. webscout/Provider/OPENAI/base.py +46 -0
  14. webscout/Provider/OPENAI/c4ai.py +347 -0
  15. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  16. webscout/Provider/OPENAI/deepinfra.py +284 -0
  17. webscout/Provider/OPENAI/exaai.py +419 -0
  18. webscout/Provider/OPENAI/exachat.py +421 -0
  19. webscout/Provider/OPENAI/freeaichat.py +355 -0
  20. webscout/Provider/OPENAI/glider.py +314 -0
  21. webscout/Provider/OPENAI/heckai.py +337 -0
  22. webscout/Provider/OPENAI/llmchatco.py +325 -0
  23. webscout/Provider/OPENAI/netwrck.py +348 -0
  24. webscout/Provider/OPENAI/scirachat.py +459 -0
  25. webscout/Provider/OPENAI/sonus.py +294 -0
  26. webscout/Provider/OPENAI/typegpt.py +361 -0
  27. webscout/Provider/OPENAI/utils.py +211 -0
  28. webscout/Provider/OPENAI/venice.py +428 -0
  29. webscout/Provider/OPENAI/wisecat.py +381 -0
  30. webscout/Provider/OPENAI/x0gpt.py +389 -0
  31. webscout/Provider/OPENAI/yep.py +329 -0
  32. webscout/Provider/Venice.py +1 -1
  33. webscout/Provider/__init__.py +6 -6
  34. webscout/Provider/scira_chat.py +13 -10
  35. webscout/Provider/typegpt.py +3 -184
  36. webscout/prompt_manager.py +2 -1
  37. webscout/version.py +1 -1
  38. webscout-8.1.dist-info/METADATA +683 -0
  39. {webscout-8.0.dist-info → webscout-8.1.dist-info}/RECORD +43 -23
  40. webscout/Provider/flowith.py +0 -207
  41. webscout-8.0.dist-info/METADATA +0 -995
  42. {webscout-8.0.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  43. {webscout-8.0.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  44. {webscout-8.0.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  45. {webscout-8.0.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,329 @@
1
+ import time
2
+ import uuid
3
+ import cloudscraper # Import cloudscraper
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, get_system_prompt # Import get_system_prompt
12
+ )
13
+
14
+ # Attempt to import LitAgent, fallback if not available
15
+ try:
16
+ from webscout.litagent import LitAgent
17
+ except ImportError:
18
+ # Define a dummy LitAgent if webscout is not installed or accessible
19
+ class LitAgent:
20
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
21
+ print("Warning: LitAgent not found. Using default minimal headers.")
22
+ return {
23
+ "accept": "*/*",
24
+ "accept_language": "en-US,en;q=0.9",
25
+ "platform": "Windows",
26
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
27
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
28
+ "browser_type": browser,
29
+ }
30
+
31
+ # --- YEPCHAT Client ---
32
+
33
+ # ANSI escape codes for formatting
34
+ BOLD = "\033[1m"
35
+ RED = "\033[91m"
36
+ RESET = "\033[0m"
37
+
38
+ class Completions(BaseCompletions):
39
+ def __init__(self, client: 'YEPCHAT'):
40
+ self._client = client
41
+
42
+ def create(
43
+ self,
44
+ *,
45
+ model: str,
46
+ messages: List[Dict[str, str]],
47
+ max_tokens: Optional[int] = 1280,
48
+ stream: bool = False,
49
+ temperature: Optional[float] = 0.6,
50
+ top_p: Optional[float] = 0.7,
51
+ system_prompt: Optional[str] = None, # Added for consistency, but will be ignored
52
+ **kwargs: Any
53
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
54
+ """
55
+ Creates a model response for the given chat conversation using YEPCHAT API.
56
+ Mimics openai.chat.completions.create
57
+ Note: YEPCHAT does not support system messages. They will be ignored.
58
+ """
59
+ if model not in self._client.AVAILABLE_MODELS:
60
+ raise ValueError(
61
+ f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}"
62
+ )
63
+
64
+ # Filter out system messages and warn the user if any are present
65
+ filtered_messages = []
66
+ has_system_message = False
67
+ if get_system_prompt(messages) or system_prompt: # Check both message list and explicit param
68
+ has_system_message = True
69
+
70
+ for msg in messages:
71
+ if msg["role"] == "system":
72
+ continue # Skip system messages
73
+ filtered_messages.append(msg)
74
+
75
+ if has_system_message:
76
+ # Print warning in bold red
77
+ print(f"{BOLD}{RED}Warning: YEPCHAT does not support system messages, they will be ignored.{RESET}")
78
+
79
+ # If no messages left after filtering, raise an error
80
+ if not filtered_messages:
81
+ raise ValueError("At least one user or assistant message is required for YEPCHAT.")
82
+
83
+ payload = {
84
+ "stream": stream,
85
+ "max_tokens": max_tokens,
86
+ "top_p": top_p,
87
+ "temperature": temperature,
88
+ "messages": filtered_messages, # Use filtered messages
89
+ "model": model,
90
+ }
91
+
92
+ # Add any extra kwargs to the payload
93
+ payload.update(kwargs)
94
+
95
+ request_id = f"chatcmpl-{uuid.uuid4()}"
96
+ created_time = int(time.time())
97
+
98
+ if stream:
99
+ return self._create_stream(request_id, created_time, model, payload)
100
+ else:
101
+ return self._create_non_stream(request_id, created_time, model, payload)
102
+
103
+ def _create_stream(
104
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
105
+ ) -> Generator[ChatCompletionChunk, None, None]:
106
+ try:
107
+ # Use session.post from cloudscraper instance
108
+ response = self._client.session.post(
109
+ self._client.api_endpoint,
110
+ headers=self._client.headers,
111
+ cookies=self._client.cookies,
112
+ json=payload,
113
+ stream=True,
114
+ timeout=self._client.timeout
115
+ )
116
+
117
+ if not response.ok:
118
+ # Simplified error handling for now, add refresh logic if needed
119
+ raise IOError(
120
+ f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
121
+ )
122
+
123
+ for line in response.iter_lines(decode_unicode=True):
124
+ if line:
125
+ line = line.strip()
126
+ if line.startswith("data: "):
127
+ json_str = line[6:]
128
+ if json_str == "[DONE]":
129
+ break
130
+ try:
131
+ data = json.loads(json_str)
132
+ choice_data = data.get('choices', [{}])[0]
133
+ delta_data = choice_data.get('delta', {})
134
+ finish_reason = choice_data.get('finish_reason')
135
+ content = delta_data.get('content')
136
+
137
+ if content is not None: # Only yield chunks with content
138
+ delta = ChoiceDelta(content=content, role=delta_data.get('role', 'assistant'))
139
+ choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
140
+ chunk = ChatCompletionChunk(
141
+ id=request_id,
142
+ choices=[choice],
143
+ created=created_time,
144
+ model=model,
145
+ )
146
+ yield chunk
147
+
148
+ except json.JSONDecodeError:
149
+ print(f"Warning: Could not decode JSON line: {json_str}")
150
+ continue
151
+
152
+ # Yield final chunk with finish reason if not already sent
153
+ delta = ChoiceDelta()
154
+ choice = Choice(index=0, delta=delta, finish_reason="stop") # Assume stop if loop finishes
155
+ chunk = ChatCompletionChunk(
156
+ id=request_id,
157
+ choices=[choice],
158
+ created=created_time,
159
+ model=model,
160
+ )
161
+ yield chunk
162
+
163
+ except cloudscraper.exceptions.CloudflareChallengeError as e:
164
+ pass
165
+
166
+ def _create_non_stream(
167
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
168
+ ) -> ChatCompletion:
169
+ full_response_content = ""
170
+ finish_reason = "stop" # Assume stop unless error occurs
171
+
172
+ try:
173
+ stream_generator = self._create_stream(request_id, created_time, model, payload)
174
+ for chunk in stream_generator:
175
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
176
+ full_response_content += chunk.choices[0].delta.content
177
+ if chunk.choices and chunk.choices[0].finish_reason:
178
+ finish_reason = chunk.choices[0].finish_reason # Capture finish reason if provided
179
+
180
+ except IOError as e:
181
+ print(f"Error obtaining non-stream response from YEPCHAT: {e}")
182
+ finish_reason = "error"
183
+
184
+ # Construct the final ChatCompletion object
185
+ message = ChatCompletionMessage(
186
+ role="assistant",
187
+ content=full_response_content
188
+ )
189
+ choice = Choice(
190
+ index=0,
191
+ message=message,
192
+ finish_reason=finish_reason
193
+ )
194
+ # Usage data is not provided by this API in a standard way, set to 0
195
+ usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
196
+
197
+ completion = ChatCompletion(
198
+ id=request_id,
199
+ choices=[choice],
200
+ created=created_time,
201
+ model=model,
202
+ usage=usage,
203
+ )
204
+ return completion
205
+
206
+ class Chat(BaseChat):
207
+ def __init__(self, client: 'YEPCHAT'):
208
+ self.completions = Completions(client)
209
+
210
+ class YEPCHAT(OpenAICompatibleProvider):
211
+ """
212
+ OpenAI-compatible client for YEPCHAT API.
213
+
214
+ Usage:
215
+ client = YEPCHAT()
216
+ response = client.chat.completions.create(
217
+ model="DeepSeek-R1-Distill-Qwen-32B",
218
+ messages=[{"role": "user", "content": "Hello!"}]
219
+ )
220
+ print(response.choices[0].message.content)
221
+ """
222
+ AVAILABLE_MODELS = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
223
+
224
+ def __init__(
225
+ self,
226
+ timeout: int = 30,
227
+ browser: str = "chrome"
228
+ ):
229
+ """
230
+ Initialize the YEPCHAT client.
231
+
232
+ Args:
233
+ timeout: Request timeout in seconds.
234
+ browser: Browser name for LitAgent to generate User-Agent.
235
+ """
236
+ self.timeout = timeout
237
+ self.api_endpoint = "https://api.yep.com/v1/chat/completions"
238
+ self.session = cloudscraper.create_scraper() # Use cloudscraper
239
+
240
+ # Initialize LitAgent for user agent generation and fingerprinting
241
+ try:
242
+ agent = LitAgent()
243
+ fingerprint = agent.generate_fingerprint(browser=browser)
244
+ except Exception as e:
245
+ print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
246
+ # Fallback fingerprint data
247
+ fingerprint = {
248
+ "accept": "*/*",
249
+ "accept_language": "en-US,en;q=0.9",
250
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
251
+ "platform": "Windows",
252
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
253
+ }
254
+
255
+ # Initialize headers using the fingerprint
256
+ self.headers = {
257
+ "Accept": fingerprint["accept"],
258
+ "Accept-Encoding": "gzip, deflate, br, zstd",
259
+ "Accept-Language": fingerprint["accept_language"],
260
+ "Content-Type": "application/json; charset=utf-8",
261
+ "DNT": "1",
262
+ "Origin": "https://yep.com",
263
+ "Referer": "https://yep.com/",
264
+ "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
265
+ "Sec-CH-UA-Mobile": "?0",
266
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
267
+ "User-Agent": fingerprint["user_agent"],
268
+ }
269
+ self.session.headers.update(self.headers)
270
+
271
+ # Generate cookies (consider if these need refreshing or specific values)
272
+ self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
273
+
274
+ # Initialize the chat interface
275
+ self.chat = Chat(self)
276
+
277
+ def convert_model_name(self, model: str) -> str:
278
+ """
279
+ Ensures the model name is valid for YEPCHAT.
280
+ Returns the validated model name or raises an error if invalid.
281
+ """
282
+ if model in self.AVAILABLE_MODELS:
283
+ return model
284
+ else:
285
+ # Raise error instead of defaulting, as model is mandatory in create()
286
+ raise ValueError(f"Model '{model}' not supported by YEPCHAT. Available: {self.AVAILABLE_MODELS}")
287
+
288
+ # Example usage (optional, for testing)
289
+ if __name__ == '__main__':
290
+ print("Testing YEPCHAT OpenAI-Compatible Client...")
291
+
292
+ # Test Non-Streaming
293
+ try:
294
+ print("\n--- Non-Streaming Test (DeepSeek) ---")
295
+ client = YEPCHAT()
296
+ response = client.chat.completions.create(
297
+ model="DeepSeek-R1-Distill-Qwen-32B",
298
+ messages=[
299
+ {"role": "user", "content": "Say 'Hello World'"}
300
+ ],
301
+ stream=False
302
+ )
303
+ print("Response:", response.choices[0].message.content)
304
+ print("Usage:", response.usage) # Will show 0 tokens
305
+ except Exception as e:
306
+ print(f"Non-Streaming Test Failed: {e}")
307
+
308
+ # Test Streaming
309
+ try:
310
+ print("\n--- Streaming Test (Mixtral) ---")
311
+ client_stream = YEPCHAT()
312
+ stream = client_stream.chat.completions.create(
313
+ model="Mixtral-8x7B-Instruct-v0.1",
314
+ messages=[
315
+ {"role": "user", "content": "Write a short sentence about AI."}
316
+ ],
317
+ stream=True
318
+ )
319
+ print("Streaming Response:")
320
+ full_stream_response = ""
321
+ for chunk in stream:
322
+ content = chunk.choices[0].delta.content
323
+ if content:
324
+ print(content, end="", flush=True)
325
+ full_stream_response += content
326
+ print("\n--- End of Stream ---")
327
+ print("Full streamed text:", full_stream_response)
328
+ except Exception as e:
329
+ print(f"Streaming Test Failed: {e}")
@@ -16,7 +16,7 @@ class Venice(Provider):
16
16
  """
17
17
 
18
18
  AVAILABLE_MODELS = [
19
- "llama-3.3-70b",
19
+ "mistral-31-24b",
20
20
  "llama-3.2-3b-akash",
21
21
  "qwen2dot5-coder-32b",
22
22
  "deepseek-coder-v2-lite",
@@ -9,8 +9,8 @@ from .Openai import OPENAI
9
9
  from .Openai import AsyncOPENAI
10
10
  from .Koboldai import KOBOLDAI
11
11
  from .Koboldai import AsyncKOBOLDAI
12
- from .Blackboxai import BLACKBOXAI
13
- from .Phind import PhindSearch
12
+ from .Blackboxai import BLACKBOXAI
13
+ from .Phind import PhindSearch
14
14
  from .Phind import Phindv2
15
15
  from .ai4chat import *
16
16
  from .Gemini import GEMINI
@@ -77,7 +77,6 @@ from .HuggingFaceChat import *
77
77
  from .GithubChat import *
78
78
  from .copilot import *
79
79
  from .C4ai import *
80
- from .flowith import *
81
80
  from .sonus import *
82
81
  from .uncovr import *
83
82
  from .labyrinth import *
@@ -92,12 +91,13 @@ from .searchchat import *
92
91
  from .ExaAI import ExaAI
93
92
  from .OpenGPT import OpenGPT
94
93
  from .scira_chat import *
94
+ from .AISEARCH.iask_search import IAsk
95
95
  __all__ = [
96
96
  'LLAMA',
97
+ 'IAsk',
97
98
  'SciraAI',
98
99
  'LabyrinthAI',
99
100
  'OpenGPT',
100
- 'Flowith',
101
101
  'C4ai',
102
102
  'Venice',
103
103
  'ExaAI',
@@ -124,8 +124,8 @@ __all__ = [
124
124
  'AsyncOPENAI',
125
125
  'KOBOLDAI',
126
126
  'AsyncKOBOLDAI',
127
- 'BLACKBOXAI',
128
- 'PhindSearch',
127
+ 'BLACKBOXAI',
128
+ 'PhindSearch',
129
129
  'GEMINI',
130
130
  'DeepInfra',
131
131
  'AI4Chat',
@@ -1,3 +1,4 @@
1
+ from os import system
1
2
  import requests
2
3
  import json
3
4
  import uuid
@@ -38,7 +39,8 @@ class SciraAI(Provider):
38
39
  model: str = "scira-default",
39
40
  chat_id: str = None,
40
41
  user_id: str = None,
41
- browser: str = "chrome"
42
+ browser: str = "chrome",
43
+ system_prompt: str = "You are a helpful assistant.",
42
44
  ):
43
45
  """Initializes the Scira AI API client.
44
46
 
@@ -56,6 +58,7 @@ class SciraAI(Provider):
56
58
  chat_id (str): Unique identifier for the chat session.
57
59
  user_id (str): Unique identifier for the user.
58
60
  browser (str): Browser to emulate in requests.
61
+ system_prompt (str): System prompt for the AI.
59
62
 
60
63
  """
61
64
  if model not in self.AVAILABLE_MODELS:
@@ -67,7 +70,8 @@ class SciraAI(Provider):
67
70
  self.agent = LitAgent()
68
71
  # Use fingerprinting to create a consistent browser identity
69
72
  self.fingerprint = self.agent.generate_fingerprint(browser)
70
-
73
+ self.system_prompt = system_prompt
74
+
71
75
  # Use the fingerprint for headers
72
76
  self.headers = {
73
77
  "Accept": self.fingerprint["accept"],
@@ -158,18 +162,17 @@ class SciraAI(Provider):
158
162
  else:
159
163
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
160
164
 
165
+ messages = [
166
+ {"role": "system", "content": self.system_prompt},
167
+ {"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}]}
168
+ ]
169
+
161
170
  # Prepare the request payload
162
171
  payload = {
163
172
  "id": self.chat_id,
164
- "messages": [
165
- {
166
- "role": "user",
167
- "content": conversation_prompt,
168
- "parts": [{"type": "text", "text": conversation_prompt}]
169
- }
170
- ],
173
+ "messages": messages,
171
174
  "model": self.model,
172
- "group": "chat", # Always use chat mode (no web search)
175
+ "group": self.search_mode,
173
176
  "user_id": self.user_id,
174
177
  "timezone": "Asia/Calcutta"
175
178
  }
@@ -14,195 +14,14 @@ class TypeGPT(Provider):
14
14
  """
15
15
  A class to interact with the TypeGPT.net API. Improved to match webscout standards.
16
16
  """
17
- url = "https://chat.typegpt.net"
18
-
19
17
  AVAILABLE_MODELS = [
20
- # OpenAI Models
21
- "gpt-3.5-turbo",
22
- "gpt-3.5-turbo-202201",
23
- "gpt-4o",
24
- "gpt-4o-2024-05-13",
25
- "gpt-4o-2024-11-20",
26
- "gpt-4o-mini",
18
+ # Working Models (based on testing)
27
19
  "gpt-4o-mini-2024-07-18",
28
- # "gpt-4o-mini-ddg", >>>> NOT WORKING
29
- "o1",
30
- # "o1-mini-2024-09-12", >>>> NOT WORKING
31
- "o1-preview",
32
- "o3-mini",
33
20
  "chatgpt-4o-latest",
34
-
35
- # Claude Models
36
- # "claude", >>>> NOT WORKING
37
- "claude-3-5-sonnet",
38
- "claude-3-5-sonnet-20240620",
39
- "claude-3-5-sonnet-x",
40
- # "claude-3-haiku-ddg", >>>> NOT WORKING
41
- "claude-hybridspace",
42
- "claude-sonnet-3.5",
43
- "Claude-sonnet-3.7",
44
- "anthropic/claude-3.5-sonnet",
45
- "anthropic/claude-3.7-sonnet",
46
-
47
- # Meta/LLaMA Models
48
- "@cf/meta/llama-2-7b-chat-fp16",
49
- "@cf/meta/llama-2-7b-chat-int8",
50
- "@cf/meta/llama-3-8b-instruct",
51
- "@cf/meta/llama-3.1-8b-instruct",
52
- "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
53
- # "@cf/meta-llama/llama-2-7b-chat-hf-lora", >>>> NOT WORKING
54
- "llama-3.1-405b",
55
- "llama-3.1-70b",
56
- # "llama-3.1-70b-ddg", >>>> NOT WORKING
57
- "llama-3.1-8b",
58
- # "llama-scaleway", >>>> NOT WORKING
59
- "llama3.1-8b", # >>>> NOT WORKING
60
- "llama3.3-70b",
61
- # "llamalight", >>>> NOT WORKING
62
- "Meta-Llama-3.1-405B-Instruct-Turbo",
63
- "Meta-Llama-3.3-70B-Instruct-Turbo",
64
- # "meta-llama/Llama-2-7b-chat-hf", >>>> NOT WORKING
65
- # "meta-llama/Llama-3.1-70B-Instruct", >>>> NOT WORKING
66
- # "meta-llama/Llama-3.1-8B-Instruct", >>>> NOT WORKING
67
- "meta-llama/Llama-3.2-11B-Vision-Instruct",
68
- # "meta-llama/Llama-3.2-1B-Instruct", >>>> NOT WORKING
69
- # "meta-llama/Llama-3.2-3B-Instruct", >>>> NOT WORKING
70
- "meta-llama/Llama-3.2-90B-Vision-Instruct",
71
- "meta-llama/Llama-3.3-70B-Instruct",
72
- "meta-llama/Llama-3.3-70B-Instruct-Turbo",
73
- # "meta-llama/Llama-Guard-3-8B", >>>> NOT WORKING
74
- # "meta-llama/Meta-Llama-3-70B-Instruct", >>>> NOT WORKING
75
- # "meta-llama/Meta-Llama-3-8B-Instruct", >>>> NOT WORKING
76
- "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
77
- "meta-llama/Meta-Llama-3.1-8B-Instruct",
78
- "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
79
-
80
- # Mistral Models
81
- "mistral",
82
- "mistral-large",
83
- "@cf/mistral/mistral-7b-instruct-v0.1",
84
- # "@cf/mistral/mistral-7b-instruct-v0.2-lora", >>>> NOT WORKING
85
- "@hf/mistralai/mistral-7b-instruct-v0.2",
86
- "mistralai/Mistral-7B-Instruct-v0.2",
87
- "mistralai/Mistral-7B-Instruct-v0.3",
88
- "mistralai/Mixtral-8x22B-Instruct-v0.1",
89
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
90
- # "mixtral-8x7b-ddg", >>>> NOT WORKING
91
- "Mistral-7B-Instruct-v0.2",
92
-
93
- # Qwen Models
94
- "@cf/qwen/qwen1.5-0.5b-chat",
95
- "@cf/qwen/qwen1.5-1.8b-chat",
96
- "@cf/qwen/qwen1.5-14b-chat-awq",
97
- "@cf/qwen/qwen1.5-7b-chat-awq",
98
- "Qwen/Qwen2.5-3B-Instruct",
99
- "Qwen/Qwen2.5-72B-Instruct",
100
- "Qwen/Qwen2.5-Coder-32B-Instruct",
101
- "Qwen/Qwen2-72B-Instruct",
102
- "Qwen/QwQ-32B",
103
- "Qwen/QwQ-32B-Preview",
104
- "Qwen2.5-72B-Instruct",
105
- "qwen",
106
- "qwen-coder",
107
- # "Qwen-QwQ-32B-Preview", >>>> NOT WORKING
108
-
109
- # Google/Gemini Models
110
- # "@cf/google/gemma-2b-it-lora", >>>> NOT WORKING
111
- # "@cf/google/gemma-7b-it-lora", >>>> NOT WORKING
112
- "@hf/google/gemma-7b-it",
113
- "google/gemma-1.1-2b-it",
114
- "google/gemma-1.1-7b-it",
115
- "gemini-pro",
116
- "gemini-1.5-pro",
117
- "gemini-1.5-pro-latest",
118
- "gemini-1.5-flash",
119
- "gemini-flash-2.0",
120
- "gemini-thinking",
121
-
122
- # Microsoft Models
123
- "@cf/microsoft/phi-2",
124
- "microsoft/DialoGPT-medium",
125
- "microsoft/Phi-3-medium-4k-instruct",
126
- "microsoft/Phi-3-mini-4k-instruct",
127
- "microsoft/Phi-3.5-mini-instruct",
128
- "microsoft/phi-4",
129
- "microsoft/WizardLM-2-8x22B",
130
-
131
- # Yi Models
132
- "01-ai/Yi-1.5-34B-Chat",
133
- # "01-ai/Yi-34B-Chat", >>>> NOT WORKING
134
-
135
- # DeepSeek Models
136
- "@cf/deepseek-ai/deepseek-math-7b-base",
137
- "@cf/deepseek-ai/deepseek-math-7b-instruct",
138
- "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
139
- "deepseek",
140
- "deepseek-ai/DeepSeek-R1",
141
- "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
142
- # "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", >>>> NOT WORKING
143
- # "deepseek-ai/DeepSeek-V2.5", >>>> NOT WORKING
144
- "deepseek-llm-67b-chat",
145
21
  "deepseek-r1",
146
- "deepseek-r1-distill-llama-70b",
147
- # "deepseek-reasoner", >>>> NOT WORKING
148
22
  "deepseek-v3",
149
- "uncensored-r1",
150
-
151
- # Specialized Models and Tools
152
- "@cf/defog/sqlcoder-7b-2",
153
- "@cf/thebloke/discolm-german-7b-v1-awq",
154
- "@cf/tiiuae/falcon-7b-instruct",
155
- # "@cf/tinyllama/tinyllama-1.1b-chat-v1.0", >>>> NOT WORKING
156
- # "@hf/nexusflow/starling-lm-7b-beta", >>>> NOT WORKING
157
- # "@hf/nousresearch/hermes-2-pro-mistral-7b", >>>> NOT WORKING
158
- # "@hf/thebloke/deepseek-coder-6.7b-base-awq", >>>> NOT WORKING
159
- # "@hf/thebloke/deepseek-coder-6.7b-instruct-awq", >>>> NOT WORKING
160
- # "@hf/thebloke/llama-2-13b-chat-awq", >>>> NOT WORKING
161
- # "@hf/thebloke/llamaguard-7b-awq", >>>> NOT WORKING
162
- # "@hf/thebloke/mistral-7b-instruct-v0.1-awq", >>>> NOT WORKING
163
- # "@hf/thebloke/neural-chat-7b-v3-1-awq", >>>> NOT WORKING
164
- # "@hf/thebloke/openhermes-2.5-mistral-7b-awq", >>>> NOT WORKING
165
- # "@hf/thebloke/zephyr-7b-beta-awq", >>>> NOT WORKING
23
+ "uncensored-r1",
166
24
  "Image-Generator",
167
- # "flux-1-schnell", >>>> NOT WORKING
168
- # "HelpingAI-15B", >>>> NOT WORKING
169
- # "HelpingAI2-3b", >>>> NOT WORKING
170
- # "HelpingAI2-6B", >>>> NOT WORKING
171
- # "HelpingAI2-9B", >>>> NOT WORKING
172
- # "HelpingAI2.5-10B", >>>> NOT WORKING
173
- # "Helpingai2.5-10b-1m", >>>> NOT WORKING
174
- # "HelpingAI2.5-2B", >>>> NOT WORKING
175
- # "HELVETE", >>>> NOT WORKING
176
- # "HELVETE-X", >>>> NOT WORKING
177
- # "evil", >>>> NOT WORKING
178
- # "Image-Generator", >>>> NOT WORKING
179
- # "Image-Generator-NSFW", >>>> NOT WORKING
180
- # "midijourney", >>>> NOT WORKING
181
- # "Niansuh", >>>> NOT WORKING
182
- # "niansuh-t1", >>>> NOT WORKING
183
- # "Nous-Hermes-2-Mixtral-8x7B-DPO", >>>> NOT WORKING
184
- # "NousResearch/Hermes-3-Llama-3.1-8B", >>>> NOT WORKING
185
- # "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", >>>> NOT WORKING
186
- # "nvidia/Llama-3.1-Nemotron-70B-Instruct", >>>> NOT WORKING
187
- # "openai", >>>> NOT WORKING
188
- # "openai-audio", >>>> NOT WORKING
189
- # "openai-large", >>>> NOT WORKING
190
- # "openai-reasoning", >>>> NOT WORKING
191
- # "openai/whisper-large-v3", >>>> NOT WORKING
192
- # "openai/whisper-large-v3-turbo", >>>> NOT WORKING
193
- # "openbmb/MiniCPM-Llama3-V-2_5", >>>> NOT WORKING
194
- # "openchat/openchat-3.6-8b", >>>> NOT WORKING
195
- # "p1", >>>> NOT WORKING
196
- # "phi", >>>> NOT WORKING
197
- # "Phi-4-multilmodal-instruct", >>>> NOT WORKING
198
- # "Priya-3B", >>>> NOT WORKING
199
- # "rtist", >>>> NOT WORKING
200
- # "searchgpt", >>>> NOT WORKING
201
- # "sur", >>>> NOT WORKING
202
- # "sur-mistral", >>>> NOT WORKING
203
- # "tiiuae/falcon-7b-instruct", >>>> NOT WORKING
204
- # "TirexAi", >>>> NOT WORKING
205
- # "unity", >>>> NOT WORKING
206
25
  ]
207
26
 
208
27
  def __init__(
@@ -230,7 +49,7 @@ class TypeGPT(Provider):
230
49
  self.session = requests.Session()
231
50
  self.is_conversation = is_conversation
232
51
  self.max_tokens_to_sample = max_tokens
233
- self.api_endpoint = "https://chat.typegpt.net/api/openai/typegpt/v1/chat/completions"
52
+ self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
234
53
  self.timeout = timeout
235
54
  self.last_response = {}
236
55
  self.model = model
@@ -110,7 +110,8 @@ class AwesomePrompts:
110
110
  self,
111
111
  key: Union[str, int],
112
112
  default: Optional[str] = None,
113
- case_insensitive: bool = True
113
+ case_insensitive: bool = True,
114
+ raise_not_found: bool = False # currently unused
114
115
  ) -> Optional[str]:
115
116
  """Get that perfect prompt! 🎯
116
117
 
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "8.0"
1
+ __version__ = "8.1"
2
2
  __prog__ = "webscout"