webscout 7.3__py3-none-any.whl → 7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (62) hide show
  1. webscout/Provider/AISEARCH/__init__.py +4 -3
  2. webscout/Provider/AISEARCH/genspark_search.py +208 -0
  3. webscout/Provider/AllenAI.py +282 -0
  4. webscout/Provider/C4ai.py +414 -0
  5. webscout/Provider/Cloudflare.py +18 -21
  6. webscout/Provider/DeepSeek.py +3 -32
  7. webscout/Provider/Deepinfra.py +52 -44
  8. webscout/Provider/ElectronHub.py +634 -0
  9. webscout/Provider/GithubChat.py +362 -0
  10. webscout/Provider/Glider.py +7 -41
  11. webscout/Provider/HeckAI.py +217 -0
  12. webscout/Provider/HuggingFaceChat.py +462 -0
  13. webscout/Provider/Jadve.py +49 -63
  14. webscout/Provider/Marcus.py +7 -50
  15. webscout/Provider/Netwrck.py +6 -53
  16. webscout/Provider/PI.py +106 -93
  17. webscout/Provider/Perplexitylabs.py +395 -0
  18. webscout/Provider/Phind.py +29 -3
  19. webscout/Provider/QwenLM.py +7 -61
  20. webscout/Provider/TTI/__init__.py +1 -0
  21. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  22. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  23. webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
  24. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  25. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  26. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  27. webscout/Provider/TextPollinationsAI.py +3 -2
  28. webscout/Provider/TwoAI.py +200 -0
  29. webscout/Provider/Venice.py +200 -0
  30. webscout/Provider/WiseCat.py +1 -18
  31. webscout/Provider/Youchat.py +1 -1
  32. webscout/Provider/__init__.py +25 -2
  33. webscout/Provider/akashgpt.py +315 -0
  34. webscout/Provider/chatglm.py +5 -5
  35. webscout/Provider/copilot.py +416 -0
  36. webscout/Provider/flowith.py +181 -0
  37. webscout/Provider/freeaichat.py +251 -221
  38. webscout/Provider/granite.py +17 -53
  39. webscout/Provider/koala.py +9 -1
  40. webscout/Provider/llamatutor.py +6 -46
  41. webscout/Provider/llmchat.py +7 -46
  42. webscout/Provider/multichat.py +29 -91
  43. webscout/Provider/yep.py +4 -24
  44. webscout/exceptions.py +19 -9
  45. webscout/update_checker.py +55 -93
  46. webscout/version.py +1 -1
  47. webscout-7.5.dist-info/LICENSE.md +146 -0
  48. {webscout-7.3.dist-info → webscout-7.5.dist-info}/METADATA +46 -172
  49. {webscout-7.3.dist-info → webscout-7.5.dist-info}/RECORD +52 -42
  50. webscout/Local/__init__.py +0 -10
  51. webscout/Local/_version.py +0 -3
  52. webscout/Local/formats.py +0 -747
  53. webscout/Local/model.py +0 -1368
  54. webscout/Local/samplers.py +0 -125
  55. webscout/Local/thread.py +0 -539
  56. webscout/Local/ui.py +0 -401
  57. webscout/Local/utils.py +0 -388
  58. webscout/Provider/dgaf.py +0 -214
  59. webscout-7.3.dist-info/LICENSE.md +0 -211
  60. {webscout-7.3.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
  61. {webscout-7.3.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
  62. {webscout-7.3.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
webscout/Provider/PI.py CHANGED
@@ -1,4 +1,4 @@
1
-
1
+ from uuid import uuid4
2
2
  import cloudscraper
3
3
  import json
4
4
  import re
@@ -10,7 +10,6 @@ from webscout.AIutel import AwesomePrompts
10
10
  from webscout.AIbase import Provider
11
11
  from typing import Dict, Union, Any, Optional
12
12
  from webscout import LitAgent
13
- from webscout.Litlogger import Logger, LogFormat
14
13
 
15
14
  class PiAI(Provider):
16
15
  """
@@ -21,10 +20,21 @@ class PiAI(Provider):
21
20
  AVAILABLE_VOICES (Dict[str, int]): Available voice options for audio responses
22
21
  """
23
22
 
23
+ AVAILABLE_VOICES: Dict[str, int] = {
24
+ "voice1": 1,
25
+ "voice2": 2,
26
+ "voice3": 3,
27
+ "voice4": 4,
28
+ "voice5": 5,
29
+ "voice6": 6,
30
+ "voice7": 7,
31
+ "voice8": 8
32
+ }
33
+
24
34
  def __init__(
25
35
  self,
26
36
  is_conversation: bool = True,
27
- max_tokens: int = 600,
37
+ max_tokens: int = 2048,
28
38
  timeout: int = 30,
29
39
  intro: str = None,
30
40
  filepath: str = None,
@@ -32,21 +42,29 @@ class PiAI(Provider):
32
42
  proxies: dict = {},
33
43
  history_offset: int = 10250,
34
44
  act: str = None,
35
- logging: bool = False,
45
+ voice: bool = False,
46
+ voice_name: str = "voice3",
47
+ output_file: str = "PiAI.mp3"
36
48
  ):
37
49
  """
38
- Initializes the PiAI provider with specified parameters.
50
+ Initializes PiAI with voice support.
51
+
52
+ Args:
53
+ voice (bool): Enable/disable voice output
54
+ voice_name (str): Name of the voice to use (if None, uses default)
55
+ output_file (str): Path to save voice output (default: PiAI.mp3)
39
56
  """
57
+ # Voice settings
58
+ self.voice_enabled = voice
59
+ self.voice_name = voice_name
60
+ self.output_file = output_file
61
+
62
+ if voice and voice_name and voice_name not in self.AVAILABLE_VOICES:
63
+ raise ValueError(f"Voice '{voice_name}' not available. Choose from: {list(self.AVAILABLE_VOICES.keys())}")
64
+
65
+ # Initialize other attributes
40
66
  self.scraper = cloudscraper.create_scraper()
41
67
  self.url = 'https://pi.ai/api/chat'
42
- self.AVAILABLE_VOICES: Dict[str, str] = {
43
- "William": 1,
44
- "Samantha": 2,
45
- "Peter": 3,
46
- "Amy": 4,
47
- "Alice": 5,
48
- "Harry": 6
49
- }
50
68
  self.headers = {
51
69
  'Accept': 'text/event-stream',
52
70
  'Accept-Encoding': 'gzip, deflate, br, zstd',
@@ -65,14 +83,15 @@ class PiAI(Provider):
65
83
  'X-Api-Version': '3'
66
84
  }
67
85
  self.cookies = {
68
- '__Host-session': 'Ca5SoyAMJEaaB79jj1T69',
69
- '__cf_bm': 'g07oaL0jcstNfKDyZv7_YFjN0jnuBZjbMiXOWhy7V7A-1723536536-1.0.1.1-xwukd03L7oIAUqPG.OHbFNatDdHGZ28mRGsbsqfjBlpuy.b8w6UZIk8F3knMhhtNzwo4JQhBVdtYOlG0MvAw8A'
86
+ '__cf_bm': uuid4().hex
70
87
  }
71
88
 
72
89
  self.session = requests.Session()
90
+ self.session.headers.update(self.headers)
91
+ self.session.proxies = proxies
92
+
73
93
  self.is_conversation = is_conversation
74
94
  self.max_tokens_to_sample = max_tokens
75
- self.stream_chunk_size = 64
76
95
  self.timeout = timeout
77
96
  self.last_response = {} if self.is_conversation else {'text': ""}
78
97
  self.conversation_id = None
@@ -82,37 +101,26 @@ class PiAI(Provider):
82
101
  for method in dir(Optimizers)
83
102
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
84
103
  )
85
- self.session.headers.update(self.headers)
104
+
105
+ # Setup conversation
86
106
  Conversation.intro = (
87
107
  AwesomePrompts().get_act(
88
108
  act, raise_not_found=True, default=None, case_insensitive=True
89
- )
90
- if act
91
- else intro or Conversation.intro
109
+ ) if act else intro or Conversation.intro
92
110
  )
93
111
  self.conversation = Conversation(
94
112
  is_conversation, self.max_tokens_to_sample, filepath, update_file
95
113
  )
96
114
  self.conversation.history_offset = history_offset
97
115
  self.session.proxies = proxies
98
-
99
- self.logger = Logger(name="PiAI", format=LogFormat.MODERN_EMOJI) if logging else None
100
-
101
- self.knowledge_cutoff = "December 2023"
102
116
 
103
117
  if self.is_conversation:
104
118
  self.start_conversation()
105
119
 
106
- if self.logger:
107
- self.logger.info("PiAI instance initialized successfully")
108
-
109
120
  def start_conversation(self) -> str:
110
121
  """
111
122
  Initializes a new conversation and returns the conversation ID.
112
123
  """
113
- if self.logger:
114
- self.logger.debug("Starting new conversation")
115
-
116
124
  response = self.scraper.post(
117
125
  "https://pi.ai/api/chat/start",
118
126
  headers=self.headers,
@@ -122,33 +130,44 @@ class PiAI(Provider):
122
130
  )
123
131
 
124
132
  if not response.ok:
125
- if self.logger:
126
- self.logger.error(f"Failed to start conversation. Status code: {response.status_code}")
127
133
  raise Exception(f"Failed to start conversation: {response.status_code}")
128
134
 
129
135
  data = response.json()
130
136
  self.conversation_id = data['conversations'][0]['sid']
131
137
 
132
- if self.logger:
133
- self.logger.info(f"Conversation started successfully with ID: {self.conversation_id}")
134
-
135
138
  return self.conversation_id
136
139
 
137
140
  def ask(
138
141
  self,
139
142
  prompt: str,
140
- voice_name: Optional[str] = None,
141
143
  stream: bool = False,
142
144
  raw: bool = False,
143
145
  optimizer: str = None,
144
146
  conversationally: bool = False,
147
+ voice: bool = None,
148
+ voice_name: str = None,
145
149
  output_file: str = None
146
150
  ) -> dict:
147
151
  """
148
152
  Interact with Pi.ai by sending a prompt and receiving a response.
153
+
154
+ Args:
155
+ prompt (str): The prompt to send
156
+ stream (bool): Whether to stream the response
157
+ raw (bool): Return raw response format
158
+ optimizer (str): Prompt optimizer to use
159
+ conversationally (bool): Use conversation context
160
+ voice (bool): Override default voice setting
161
+ voice_name (str): Override default voice name
162
+ output_file (str): Override default output file path
149
163
  """
150
- if self.logger:
151
- self.logger.debug(f"Processing request - Prompt: {prompt[:50]}... Voice: {voice_name}")
164
+ # Voice configuration
165
+ voice = self.voice_enabled if voice is None else voice
166
+ voice_name = self.voice_name if voice_name is None else voice_name
167
+ output_file = self.output_file if output_file is None else output_file
168
+
169
+ if voice and voice_name and voice_name not in self.AVAILABLE_VOICES:
170
+ raise ValueError(f"Voice '{voice_name}' not available. Choose from: {list(self.AVAILABLE_VOICES.keys())}")
152
171
 
153
172
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
154
173
  if optimizer:
@@ -157,18 +176,14 @@ class PiAI(Provider):
157
176
  conversation_prompt if conversationally else prompt
158
177
  )
159
178
  else:
160
- if self.logger:
161
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
162
- raise Exception(
163
- f"Optimizer is not one of {self.__available_optimizers}"
164
- )
179
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
165
180
 
166
181
  data = {
167
182
  'text': conversation_prompt,
168
183
  'conversation': self.conversation_id
169
184
  }
170
185
 
171
- def for_stream():
186
+ def process_stream():
172
187
  response = self.scraper.post(
173
188
  self.url,
174
189
  headers=self.headers,
@@ -179,15 +194,13 @@ class PiAI(Provider):
179
194
  )
180
195
 
181
196
  if not response.ok:
182
- if self.logger:
183
- self.logger.error(f"API request failed. Status code: {response.status_code}")
184
197
  raise Exception(f"API request failed: {response.status_code}")
185
198
 
186
199
  output_str = response.content.decode('utf-8')
187
200
  sids = re.findall(r'"sid":"(.*?)"', output_str)
188
201
  second_sid = sids[1] if len(sids) >= 2 else None
189
202
 
190
- if voice_name and second_sid:
203
+ if voice and voice_name and second_sid:
191
204
  threading.Thread(
192
205
  target=self.download_audio_threaded,
193
206
  args=(voice_name, second_sid, output_file)
@@ -204,65 +217,74 @@ class PiAI(Provider):
204
217
  self.last_response.update(resp)
205
218
  yield parsed_data if raw else resp
206
219
  except json.JSONDecodeError:
207
- if self.logger:
208
- self.logger.warning("Failed to parse JSON from stream")
209
220
  continue
210
221
 
211
222
  self.conversation.update_chat_history(
212
223
  prompt, self.get_message(self.last_response)
213
224
  )
214
225
 
215
- def for_non_stream():
216
- for _ in for_stream():
226
+ if stream:
227
+ return process_stream()
228
+ else:
229
+ # For non-stream, collect all responses and return the final one
230
+ for res in process_stream():
217
231
  pass
218
232
  return self.last_response
219
233
 
220
- return for_stream() if stream else for_non_stream()
221
-
222
234
  def chat(
223
235
  self,
224
236
  prompt: str,
225
- voice_name: Optional[str] = None,
226
237
  stream: bool = False,
227
238
  optimizer: str = None,
228
239
  conversationally: bool = False,
229
- output_file: str = "PiAi.mp3"
240
+ voice: bool = None,
241
+ voice_name: str = None,
242
+ output_file: str = None
230
243
  ) -> str:
231
244
  """
232
245
  Generates a response based on the provided prompt.
246
+
247
+ Args:
248
+ prompt (str): The prompt to send
249
+ stream (bool): Whether to stream the response
250
+ optimizer (str): Prompt optimizer to use
251
+ conversationally (bool): Use conversation context
252
+ voice (bool): Override default voice setting
253
+ voice_name (str): Override default voice name
254
+ output_file (str): Override default output file path
233
255
  """
234
- if self.logger:
235
- self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
256
+ # Use instance defaults if not specified
257
+ voice = self.voice_enabled if voice is None else voice
258
+ voice_name = self.voice_name if voice_name is None else voice_name
259
+ output_file = self.output_file if output_file is None else output_file
236
260
 
237
- if voice_name and voice_name not in self.AVAILABLE_VOICES:
238
- if self.logger:
239
- self.logger.error(f"Invalid voice requested: {voice_name}")
240
- raise ValueError(f"Voice '{voice_name}' not one of [{', '.join(self.AVAILABLE_VOICES.keys())}]")
241
-
242
- def for_stream():
243
- for response in self.ask(
244
- prompt,
245
- voice_name,
246
- True,
247
- optimizer=optimizer,
248
- conversationally=conversationally,
249
- output_file=output_file
250
- ):
251
- yield self.get_message(response).encode('utf-8').decode('utf-8')
261
+ if voice and voice_name and voice_name not in self.AVAILABLE_VOICES:
262
+ raise ValueError(f"Voice '{voice_name}' not available. Choose from: {list(self.AVAILABLE_VOICES.keys())}")
252
263
 
253
- def for_non_stream():
254
- return self.get_message(
255
- self.ask(
264
+ if stream:
265
+ def stream_generator():
266
+ for response in self.ask(
256
267
  prompt,
257
- voice_name,
258
- False,
268
+ stream=True,
259
269
  optimizer=optimizer,
260
270
  conversationally=conversationally,
271
+ voice=voice,
272
+ voice_name=voice_name,
261
273
  output_file=output_file
262
- )
263
- ).encode('utf-8').decode('utf-8')
264
-
265
- return for_stream() if stream else for_non_stream()
274
+ ):
275
+ yield self.get_message(response).encode('utf-8').decode('utf-8')
276
+ return stream_generator()
277
+ else:
278
+ response = self.ask(
279
+ prompt,
280
+ stream=False,
281
+ optimizer=optimizer,
282
+ conversationally=conversationally,
283
+ voice=voice,
284
+ voice_name=voice_name,
285
+ output_file=output_file
286
+ )
287
+ return self.get_message(response)
266
288
 
267
289
  def get_message(self, response: dict) -> str:
268
290
  """Retrieves message only from response"""
@@ -271,9 +293,6 @@ class PiAI(Provider):
271
293
 
272
294
  def download_audio_threaded(self, voice_name: str, second_sid: str, output_file: str) -> None:
273
295
  """Downloads audio in a separate thread."""
274
- if self.logger:
275
- self.logger.debug(f"Starting audio download - Voice: {voice_name}, SID: {second_sid}")
276
-
277
296
  params = {
278
297
  'mode': 'eager',
279
298
  'voice': f'voice{self.AVAILABLE_VOICES[voice_name]}',
@@ -290,8 +309,6 @@ class PiAI(Provider):
290
309
  )
291
310
 
292
311
  if not audio_response.ok:
293
- if self.logger:
294
- self.logger.error(f"Audio download failed. Status code: {audio_response.status_code}")
295
312
  return
296
313
 
297
314
  audio_response.raise_for_status()
@@ -299,16 +316,12 @@ class PiAI(Provider):
299
316
  with open(output_file, "wb") as file:
300
317
  file.write(audio_response.content)
301
318
 
302
- if self.logger:
303
- self.logger.info(f"Audio file successfully downloaded to: {output_file}")
304
-
305
- except requests.exceptions.RequestException as e:
306
- if self.logger:
307
- self.logger.error(f"Audio download failed: {str(e)}")
319
+ except requests.exceptions.RequestException:
320
+ pass
308
321
 
309
322
  if __name__ == '__main__':
310
323
  from rich import print
311
- ai = PiAI(logging=True)
324
+ ai = PiAI()
312
325
  response = ai.chat(input(">>> "), stream=True)
313
326
  for chunk in response:
314
327
  print(chunk, end="", flush=True)