webscout 7.9__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (69) hide show
  1. webscout/Extra/GitToolkit/__init__.py +10 -0
  2. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  3. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  4. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  5. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  7. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  8. webscout/Provider/AISEARCH/ISou.py +1 -1
  9. webscout/Provider/AISEARCH/__init__.py +6 -1
  10. webscout/Provider/AISEARCH/felo_search.py +1 -1
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/AISEARCH/hika_search.py +194 -0
  13. webscout/Provider/AISEARCH/iask_search.py +436 -0
  14. webscout/Provider/AISEARCH/monica_search.py +246 -0
  15. webscout/Provider/AISEARCH/scira_search.py +320 -0
  16. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  17. webscout/Provider/AllenAI.py +255 -122
  18. webscout/Provider/DeepSeek.py +1 -2
  19. webscout/Provider/Deepinfra.py +17 -9
  20. webscout/Provider/ExaAI.py +261 -0
  21. webscout/Provider/ExaChat.py +8 -1
  22. webscout/Provider/GithubChat.py +2 -1
  23. webscout/Provider/Jadve.py +2 -2
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OPENAI/__init__.py +17 -0
  26. webscout/Provider/OPENAI/base.py +46 -0
  27. webscout/Provider/OPENAI/c4ai.py +347 -0
  28. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  29. webscout/Provider/OPENAI/deepinfra.py +284 -0
  30. webscout/Provider/OPENAI/exaai.py +419 -0
  31. webscout/Provider/OPENAI/exachat.py +421 -0
  32. webscout/Provider/OPENAI/freeaichat.py +355 -0
  33. webscout/Provider/OPENAI/glider.py +314 -0
  34. webscout/Provider/OPENAI/heckai.py +337 -0
  35. webscout/Provider/OPENAI/llmchatco.py +325 -0
  36. webscout/Provider/OPENAI/netwrck.py +348 -0
  37. webscout/Provider/OPENAI/scirachat.py +459 -0
  38. webscout/Provider/OPENAI/sonus.py +294 -0
  39. webscout/Provider/OPENAI/typegpt.py +361 -0
  40. webscout/Provider/OPENAI/utils.py +211 -0
  41. webscout/Provider/OPENAI/venice.py +428 -0
  42. webscout/Provider/OPENAI/wisecat.py +381 -0
  43. webscout/Provider/OPENAI/x0gpt.py +389 -0
  44. webscout/Provider/OPENAI/yep.py +329 -0
  45. webscout/Provider/OpenGPT.py +199 -0
  46. webscout/Provider/PI.py +39 -24
  47. webscout/Provider/Venice.py +1 -1
  48. webscout/Provider/Youchat.py +326 -296
  49. webscout/Provider/__init__.py +16 -6
  50. webscout/Provider/ai4chat.py +58 -56
  51. webscout/Provider/akashgpt.py +34 -22
  52. webscout/Provider/freeaichat.py +1 -1
  53. webscout/Provider/labyrinth.py +121 -20
  54. webscout/Provider/llmchatco.py +306 -0
  55. webscout/Provider/scira_chat.py +274 -0
  56. webscout/Provider/typefully.py +280 -0
  57. webscout/Provider/typegpt.py +3 -184
  58. webscout/prompt_manager.py +2 -1
  59. webscout/version.py +1 -1
  60. webscout/webscout_search.py +118 -54
  61. webscout/webscout_search_async.py +109 -45
  62. webscout-8.1.dist-info/METADATA +683 -0
  63. {webscout-7.9.dist-info → webscout-8.1.dist-info}/RECORD +67 -33
  64. webscout/Provider/flowith.py +0 -207
  65. webscout-7.9.dist-info/METADATA +0 -995
  66. {webscout-7.9.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  67. {webscout-7.9.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  68. {webscout-7.9.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  69. {webscout-7.9.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,329 @@
1
+ import time
2
+ import uuid
3
+ import cloudscraper # Import cloudscraper
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, get_system_prompt # Import get_system_prompt
12
+ )
13
+
14
+ # Attempt to import LitAgent, fallback if not available
15
+ try:
16
+ from webscout.litagent import LitAgent
17
+ except ImportError:
18
+ # Define a dummy LitAgent if webscout is not installed or accessible
19
+ class LitAgent:
20
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
21
+ print("Warning: LitAgent not found. Using default minimal headers.")
22
+ return {
23
+ "accept": "*/*",
24
+ "accept_language": "en-US,en;q=0.9",
25
+ "platform": "Windows",
26
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
27
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
28
+ "browser_type": browser,
29
+ }
30
+
31
+ # --- YEPCHAT Client ---
32
+
33
+ # ANSI escape codes for formatting
34
+ BOLD = "\033[1m"
35
+ RED = "\033[91m"
36
+ RESET = "\033[0m"
37
+
38
+ class Completions(BaseCompletions):
39
+ def __init__(self, client: 'YEPCHAT'):
40
+ self._client = client
41
+
42
+ def create(
43
+ self,
44
+ *,
45
+ model: str,
46
+ messages: List[Dict[str, str]],
47
+ max_tokens: Optional[int] = 1280,
48
+ stream: bool = False,
49
+ temperature: Optional[float] = 0.6,
50
+ top_p: Optional[float] = 0.7,
51
+ system_prompt: Optional[str] = None, # Added for consistency, but will be ignored
52
+ **kwargs: Any
53
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
54
+ """
55
+ Creates a model response for the given chat conversation using YEPCHAT API.
56
+ Mimics openai.chat.completions.create
57
+ Note: YEPCHAT does not support system messages. They will be ignored.
58
+ """
59
+ if model not in self._client.AVAILABLE_MODELS:
60
+ raise ValueError(
61
+ f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}"
62
+ )
63
+
64
+ # Filter out system messages and warn the user if any are present
65
+ filtered_messages = []
66
+ has_system_message = False
67
+ if get_system_prompt(messages) or system_prompt: # Check both message list and explicit param
68
+ has_system_message = True
69
+
70
+ for msg in messages:
71
+ if msg["role"] == "system":
72
+ continue # Skip system messages
73
+ filtered_messages.append(msg)
74
+
75
+ if has_system_message:
76
+ # Print warning in bold red
77
+ print(f"{BOLD}{RED}Warning: YEPCHAT does not support system messages, they will be ignored.{RESET}")
78
+
79
+ # If no messages left after filtering, raise an error
80
+ if not filtered_messages:
81
+ raise ValueError("At least one user or assistant message is required for YEPCHAT.")
82
+
83
+ payload = {
84
+ "stream": stream,
85
+ "max_tokens": max_tokens,
86
+ "top_p": top_p,
87
+ "temperature": temperature,
88
+ "messages": filtered_messages, # Use filtered messages
89
+ "model": model,
90
+ }
91
+
92
+ # Add any extra kwargs to the payload
93
+ payload.update(kwargs)
94
+
95
+ request_id = f"chatcmpl-{uuid.uuid4()}"
96
+ created_time = int(time.time())
97
+
98
+ if stream:
99
+ return self._create_stream(request_id, created_time, model, payload)
100
+ else:
101
+ return self._create_non_stream(request_id, created_time, model, payload)
102
+
103
+ def _create_stream(
104
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
105
+ ) -> Generator[ChatCompletionChunk, None, None]:
106
+ try:
107
+ # Use session.post from cloudscraper instance
108
+ response = self._client.session.post(
109
+ self._client.api_endpoint,
110
+ headers=self._client.headers,
111
+ cookies=self._client.cookies,
112
+ json=payload,
113
+ stream=True,
114
+ timeout=self._client.timeout
115
+ )
116
+
117
+ if not response.ok:
118
+ # Simplified error handling for now, add refresh logic if needed
119
+ raise IOError(
120
+ f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
121
+ )
122
+
123
+ for line in response.iter_lines(decode_unicode=True):
124
+ if line:
125
+ line = line.strip()
126
+ if line.startswith("data: "):
127
+ json_str = line[6:]
128
+ if json_str == "[DONE]":
129
+ break
130
+ try:
131
+ data = json.loads(json_str)
132
+ choice_data = data.get('choices', [{}])[0]
133
+ delta_data = choice_data.get('delta', {})
134
+ finish_reason = choice_data.get('finish_reason')
135
+ content = delta_data.get('content')
136
+
137
+ if content is not None: # Only yield chunks with content
138
+ delta = ChoiceDelta(content=content, role=delta_data.get('role', 'assistant'))
139
+ choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
140
+ chunk = ChatCompletionChunk(
141
+ id=request_id,
142
+ choices=[choice],
143
+ created=created_time,
144
+ model=model,
145
+ )
146
+ yield chunk
147
+
148
+ except json.JSONDecodeError:
149
+ print(f"Warning: Could not decode JSON line: {json_str}")
150
+ continue
151
+
152
+ # Yield final chunk with finish reason if not already sent
153
+ delta = ChoiceDelta()
154
+ choice = Choice(index=0, delta=delta, finish_reason="stop") # Assume stop if loop finishes
155
+ chunk = ChatCompletionChunk(
156
+ id=request_id,
157
+ choices=[choice],
158
+ created=created_time,
159
+ model=model,
160
+ )
161
+ yield chunk
162
+
163
+ except cloudscraper.exceptions.CloudflareChallengeError as e:
164
+ pass
165
+
166
+ def _create_non_stream(
167
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
168
+ ) -> ChatCompletion:
169
+ full_response_content = ""
170
+ finish_reason = "stop" # Assume stop unless error occurs
171
+
172
+ try:
173
+ stream_generator = self._create_stream(request_id, created_time, model, payload)
174
+ for chunk in stream_generator:
175
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
176
+ full_response_content += chunk.choices[0].delta.content
177
+ if chunk.choices and chunk.choices[0].finish_reason:
178
+ finish_reason = chunk.choices[0].finish_reason # Capture finish reason if provided
179
+
180
+ except IOError as e:
181
+ print(f"Error obtaining non-stream response from YEPCHAT: {e}")
182
+ finish_reason = "error"
183
+
184
+ # Construct the final ChatCompletion object
185
+ message = ChatCompletionMessage(
186
+ role="assistant",
187
+ content=full_response_content
188
+ )
189
+ choice = Choice(
190
+ index=0,
191
+ message=message,
192
+ finish_reason=finish_reason
193
+ )
194
+ # Usage data is not provided by this API in a standard way, set to 0
195
+ usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
196
+
197
+ completion = ChatCompletion(
198
+ id=request_id,
199
+ choices=[choice],
200
+ created=created_time,
201
+ model=model,
202
+ usage=usage,
203
+ )
204
+ return completion
205
+
206
+ class Chat(BaseChat):
207
+ def __init__(self, client: 'YEPCHAT'):
208
+ self.completions = Completions(client)
209
+
210
+ class YEPCHAT(OpenAICompatibleProvider):
211
+ """
212
+ OpenAI-compatible client for YEPCHAT API.
213
+
214
+ Usage:
215
+ client = YEPCHAT()
216
+ response = client.chat.completions.create(
217
+ model="DeepSeek-R1-Distill-Qwen-32B",
218
+ messages=[{"role": "user", "content": "Hello!"}]
219
+ )
220
+ print(response.choices[0].message.content)
221
+ """
222
+ AVAILABLE_MODELS = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
223
+
224
+ def __init__(
225
+ self,
226
+ timeout: int = 30,
227
+ browser: str = "chrome"
228
+ ):
229
+ """
230
+ Initialize the YEPCHAT client.
231
+
232
+ Args:
233
+ timeout: Request timeout in seconds.
234
+ browser: Browser name for LitAgent to generate User-Agent.
235
+ """
236
+ self.timeout = timeout
237
+ self.api_endpoint = "https://api.yep.com/v1/chat/completions"
238
+ self.session = cloudscraper.create_scraper() # Use cloudscraper
239
+
240
+ # Initialize LitAgent for user agent generation and fingerprinting
241
+ try:
242
+ agent = LitAgent()
243
+ fingerprint = agent.generate_fingerprint(browser=browser)
244
+ except Exception as e:
245
+ print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
246
+ # Fallback fingerprint data
247
+ fingerprint = {
248
+ "accept": "*/*",
249
+ "accept_language": "en-US,en;q=0.9",
250
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
251
+ "platform": "Windows",
252
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
253
+ }
254
+
255
+ # Initialize headers using the fingerprint
256
+ self.headers = {
257
+ "Accept": fingerprint["accept"],
258
+ "Accept-Encoding": "gzip, deflate, br, zstd",
259
+ "Accept-Language": fingerprint["accept_language"],
260
+ "Content-Type": "application/json; charset=utf-8",
261
+ "DNT": "1",
262
+ "Origin": "https://yep.com",
263
+ "Referer": "https://yep.com/",
264
+ "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
265
+ "Sec-CH-UA-Mobile": "?0",
266
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
267
+ "User-Agent": fingerprint["user_agent"],
268
+ }
269
+ self.session.headers.update(self.headers)
270
+
271
+ # Generate cookies (consider if these need refreshing or specific values)
272
+ self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
273
+
274
+ # Initialize the chat interface
275
+ self.chat = Chat(self)
276
+
277
+ def convert_model_name(self, model: str) -> str:
278
+ """
279
+ Ensures the model name is valid for YEPCHAT.
280
+ Returns the validated model name or raises an error if invalid.
281
+ """
282
+ if model in self.AVAILABLE_MODELS:
283
+ return model
284
+ else:
285
+ # Raise error instead of defaulting, as model is mandatory in create()
286
+ raise ValueError(f"Model '{model}' not supported by YEPCHAT. Available: {self.AVAILABLE_MODELS}")
287
+
288
+ # Example usage (optional, for testing)
289
+ if __name__ == '__main__':
290
+ print("Testing YEPCHAT OpenAI-Compatible Client...")
291
+
292
+ # Test Non-Streaming
293
+ try:
294
+ print("\n--- Non-Streaming Test (DeepSeek) ---")
295
+ client = YEPCHAT()
296
+ response = client.chat.completions.create(
297
+ model="DeepSeek-R1-Distill-Qwen-32B",
298
+ messages=[
299
+ {"role": "user", "content": "Say 'Hello World'"}
300
+ ],
301
+ stream=False
302
+ )
303
+ print("Response:", response.choices[0].message.content)
304
+ print("Usage:", response.usage) # Will show 0 tokens
305
+ except Exception as e:
306
+ print(f"Non-Streaming Test Failed: {e}")
307
+
308
+ # Test Streaming
309
+ try:
310
+ print("\n--- Streaming Test (Mixtral) ---")
311
+ client_stream = YEPCHAT()
312
+ stream = client_stream.chat.completions.create(
313
+ model="Mixtral-8x7B-Instruct-v0.1",
314
+ messages=[
315
+ {"role": "user", "content": "Write a short sentence about AI."}
316
+ ],
317
+ stream=True
318
+ )
319
+ print("Streaming Response:")
320
+ full_stream_response = ""
321
+ for chunk in stream:
322
+ content = chunk.choices[0].delta.content
323
+ if content:
324
+ print(content, end="", flush=True)
325
+ full_stream_response += content
326
+ print("\n--- End of Stream ---")
327
+ print("Full streamed text:", full_stream_response)
328
+ except Exception as e:
329
+ print(f"Streaming Test Failed: {e}")
@@ -0,0 +1,199 @@
1
+ import requests
2
+ import json
3
+ from typing import Dict, Generator, Union
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.litagent import LitAgent
11
+
12
+ class OpenGPT(Provider):
13
+ """
14
+ A class to interact with the Open-GPT API.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ is_conversation: bool = True,
20
+ max_tokens: int = 600,
21
+ timeout: int = 30,
22
+ intro: str = None,
23
+ filepath: str = None,
24
+ update_file: bool = True,
25
+ proxies: dict = {},
26
+ history_offset: int = 10250,
27
+ act: str = None,
28
+ app_id: str = "clf3yg8730000ih08ndbdi2v4",
29
+ ):
30
+ """Initializes the OpenGPT API client.
31
+
32
+ Args:
33
+ is_conversation (bool, optional): Whether to maintain conversation history. Defaults to True.
34
+ max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 600.
35
+ timeout (int, optional): Http request timeout. Defaults to 30.
36
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
37
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
38
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
39
+ proxies (dict, optional): Http request proxies. Defaults to {}.
40
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
41
+ act (str, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
42
+ app_id (str, optional): The OpenGPT application ID. Defaults to "clf3yg8730000ih08ndbdi2v4".
43
+ """
44
+ self.session = requests.Session()
45
+ self.agent = LitAgent()
46
+
47
+ self.is_conversation = is_conversation
48
+ self.max_tokens_to_sample = max_tokens
49
+ self.timeout = timeout
50
+ self.last_response = {}
51
+ self.app_id = app_id
52
+
53
+ # Set up headers with dynamic user agent
54
+ self.headers = {
55
+ "Content-Type": "application/json",
56
+ "User-Agent": self.agent.random(),
57
+ "Referer": f"https://open-gpt.app/id/app/{app_id}"
58
+ }
59
+
60
+ self.session.headers.update(self.headers)
61
+ self.session.proxies.update(proxies)
62
+
63
+ # Initialize optimizers
64
+ self.__available_optimizers = (
65
+ method
66
+ for method in dir(Optimizers)
67
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
68
+ )
69
+
70
+ # Setup conversation
71
+ Conversation.intro = (
72
+ AwesomePrompts().get_act(
73
+ act, raise_not_found=True, default=None, case_insensitive=True
74
+ ) if act else intro or Conversation.intro
75
+ )
76
+
77
+ self.conversation = Conversation(
78
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
79
+ )
80
+ self.conversation.history_offset = history_offset
81
+
82
+ def ask(
83
+ self,
84
+ prompt: str,
85
+ stream: bool = False,
86
+ raw: bool = False,
87
+ optimizer: str = None,
88
+ conversationally: bool = False,
89
+ ) -> Union[Dict, Generator]:
90
+ """
91
+ Send a prompt to the OpenGPT API and get a response.
92
+
93
+ Args:
94
+ prompt: The user input/prompt for the API.
95
+ stream: Whether to stream the response.
96
+ raw: Whether to return the raw API response.
97
+ optimizer: Optimizer to use on the prompt.
98
+ conversationally: Whether to apply the optimizer on the full conversation prompt.
99
+
100
+ Returns:
101
+ A dictionary or generator with the response.
102
+ """
103
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
104
+
105
+ if optimizer:
106
+ if optimizer in self.__available_optimizers:
107
+ conversation_prompt = getattr(Optimizers, optimizer)(
108
+ conversation_prompt if conversationally else prompt
109
+ )
110
+ else:
111
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
112
+
113
+ # Prepare the request body payload
114
+ payload = {
115
+ "userInput": conversation_prompt,
116
+ "id": self.app_id,
117
+ "userKey": "" # Assuming userKey is meant to be empty as in the original code
118
+ }
119
+
120
+ def for_non_stream():
121
+ try:
122
+ response = self.session.post(
123
+ "https://open-gpt.app/api/generate",
124
+ data=json.dumps(payload),
125
+ timeout=self.timeout
126
+ )
127
+
128
+ # Raise an exception for bad status codes
129
+ response.raise_for_status()
130
+
131
+ response_text = response.text
132
+ self.last_response = {"text": response_text}
133
+ self.conversation.update_chat_history(prompt, response_text)
134
+
135
+ return {"text": response_text} if not raw else {"raw": response_text}
136
+
137
+ except requests.exceptions.RequestException as e:
138
+ # Handle potential errors during the request
139
+ error_msg = f"Error fetching data: {e}"
140
+ raise exceptions.FailedToGenerateResponseError(error_msg)
141
+ except Exception as e:
142
+ # Catch any other unexpected errors
143
+ error_msg = f"An unexpected error occurred: {e}"
144
+ raise exceptions.FailedToGenerateResponseError(error_msg)
145
+
146
+ # This provider doesn't support streaming, so just return non-stream
147
+ return for_non_stream()
148
+
149
+ def chat(
150
+ self,
151
+ prompt: str,
152
+ stream: bool = False,
153
+ optimizer: str = None,
154
+ conversationally: bool = False,
155
+ ) -> Union[str, Generator[str, None, None]]:
156
+ """
157
+ Send a prompt to the OpenGPT API and get a text response.
158
+
159
+ Args:
160
+ prompt: The user input/prompt for the API.
161
+ stream: Whether to stream the response (not supported).
162
+ optimizer: Optimizer to use on the prompt.
163
+ conversationally: Whether to apply the optimizer on the full conversation prompt.
164
+
165
+ Returns:
166
+ A string with the response text.
167
+ """
168
+ response = self.ask(
169
+ prompt, False, optimizer=optimizer, conversationally=conversationally
170
+ )
171
+ return self.get_message(response)
172
+
173
+ def get_message(self, response: dict) -> str:
174
+ """
175
+ Extract the message from the response dictionary.
176
+
177
+ Args:
178
+ response: Response dictionary from the ask method.
179
+
180
+ Returns:
181
+ The text response as a string.
182
+ """
183
+ assert isinstance(response, dict), "Response should be of dict data-type only"
184
+ return response["text"]
185
+
186
+
187
+ if __name__ == "__main__":
188
+ # Test the provider
189
+ print("-" * 80)
190
+ print("Testing OpenGPT provider")
191
+ print("-" * 80)
192
+
193
+ try:
194
+ test_ai = OpenGPT()
195
+ response = test_ai.chat("Explain quantum physics simply.")
196
+ print(response)
197
+ except Exception as e:
198
+ print(f"Error: {e}")
199
+
webscout/Provider/PI.py CHANGED
@@ -50,7 +50,7 @@ class PiAI(Provider):
50
50
  ):
51
51
  """
52
52
  Initializes PiAI with voice support.
53
-
53
+
54
54
  Args:
55
55
  voice (bool): Enable/disable voice output
56
56
  voice_name (str): Name of the voice to use (if None, uses default)
@@ -66,7 +66,9 @@ class PiAI(Provider):
66
66
 
67
67
  # Initialize other attributes
68
68
  self.scraper = cloudscraper.create_scraper()
69
- self.url = 'https://pi.ai/api/chat'
69
+ self.primary_url = 'https://pi.ai/api/chat'
70
+ self.fallback_url = 'https://pi.ai/api/v2/chat'
71
+ self.url = self.primary_url
70
72
  self.headers = {
71
73
  'Accept': 'text/event-stream',
72
74
  'Accept-Encoding': 'gzip, deflate, br, zstd',
@@ -115,7 +117,7 @@ class PiAI(Provider):
115
117
  )
116
118
  self.conversation.history_offset = history_offset
117
119
  self.session.proxies = proxies
118
-
120
+
119
121
  if self.is_conversation:
120
122
  self.start_conversation()
121
123
 
@@ -130,13 +132,13 @@ class PiAI(Provider):
130
132
  json={},
131
133
  timeout=self.timeout
132
134
  )
133
-
135
+
134
136
  if not response.ok:
135
137
  raise Exception(f"Failed to start conversation: {response.status_code}")
136
-
138
+
137
139
  data = response.json()
138
140
  self.conversation_id = data['conversations'][0]['sid']
139
-
141
+
140
142
  return self.conversation_id
141
143
 
142
144
  def ask(
@@ -152,7 +154,7 @@ class PiAI(Provider):
152
154
  ) -> dict:
153
155
  """
154
156
  Interact with Pi.ai by sending a prompt and receiving a response.
155
-
157
+
156
158
  Args:
157
159
  prompt (str): The prompt to send
158
160
  stream (bool): Whether to stream the response
@@ -186,15 +188,28 @@ class PiAI(Provider):
186
188
  }
187
189
 
188
190
  def process_stream():
191
+ # Try primary URL first
189
192
  response = self.scraper.post(
190
- self.url,
191
- headers=self.headers,
192
- cookies=self.cookies,
193
- json=data,
194
- stream=True,
193
+ self.url,
194
+ headers=self.headers,
195
+ cookies=self.cookies,
196
+ json=data,
197
+ stream=True,
195
198
  timeout=self.timeout
196
199
  )
197
-
200
+
201
+ # If primary URL fails, try fallback URL
202
+ if not response.ok and self.url == self.primary_url:
203
+ self.url = self.fallback_url
204
+ response = self.scraper.post(
205
+ self.url,
206
+ headers=self.headers,
207
+ cookies=self.cookies,
208
+ json=data,
209
+ stream=True,
210
+ timeout=self.timeout
211
+ )
212
+
198
213
  if not response.ok:
199
214
  raise Exception(f"API request failed: {response.status_code}")
200
215
 
@@ -204,7 +219,7 @@ class PiAI(Provider):
204
219
 
205
220
  if voice and voice_name and second_sid:
206
221
  threading.Thread(
207
- target=self.download_audio_threaded,
222
+ target=self.download_audio_threaded,
208
223
  args=(voice_name, second_sid, output_file)
209
224
  ).start()
210
225
 
@@ -245,7 +260,7 @@ class PiAI(Provider):
245
260
  ) -> str:
246
261
  """
247
262
  Generates a response based on the provided prompt.
248
-
263
+
249
264
  Args:
250
265
  prompt (str): The prompt to send
251
266
  stream (bool): Whether to stream the response
@@ -300,24 +315,24 @@ class PiAI(Provider):
300
315
  'voice': f'voice{self.AVAILABLE_VOICES[voice_name]}',
301
316
  'messageSid': second_sid,
302
317
  }
303
-
318
+
304
319
  try:
305
320
  audio_response = self.scraper.get(
306
- 'https://pi.ai/api/chat/voice',
307
- params=params,
308
- cookies=self.cookies,
309
- headers=self.headers,
321
+ 'https://pi.ai/api/chat/voice',
322
+ params=params,
323
+ cookies=self.cookies,
324
+ headers=self.headers,
310
325
  timeout=self.timeout
311
326
  )
312
-
327
+
313
328
  if not audio_response.ok:
314
329
  return
315
-
330
+
316
331
  audio_response.raise_for_status()
317
-
332
+
318
333
  with open(output_file, "wb") as file:
319
334
  file.write(audio_response.content)
320
-
335
+
321
336
  except requests.exceptions.RequestException:
322
337
  pass
323
338
 
@@ -16,7 +16,7 @@ class Venice(Provider):
16
16
  """
17
17
 
18
18
  AVAILABLE_MODELS = [
19
- "llama-3.3-70b",
19
+ "mistral-31-24b",
20
20
  "llama-3.2-3b-akash",
21
21
  "qwen2dot5-coder-32b",
22
22
  "deepseek-coder-v2-lite",