webscout 7.3__py3-none-any.whl → 7.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Provider/AISEARCH/__init__.py +4 -3
- webscout/Provider/AISEARCH/genspark_search.py +208 -0
- webscout/Provider/AllenAI.py +282 -0
- webscout/Provider/Deepinfra.py +43 -44
- webscout/Provider/ElectronHub.py +634 -0
- webscout/Provider/Glider.py +7 -41
- webscout/Provider/HeckAI.py +200 -0
- webscout/Provider/Jadve.py +49 -63
- webscout/Provider/PI.py +106 -93
- webscout/Provider/Perplexitylabs.py +395 -0
- webscout/Provider/QwenLM.py +7 -61
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TextPollinationsAI.py +3 -2
- webscout/Provider/TwoAI.py +200 -0
- webscout/Provider/Venice.py +200 -0
- webscout/Provider/WiseCat.py +1 -18
- webscout/Provider/__init__.py +12 -0
- webscout/Provider/akashgpt.py +312 -0
- webscout/Provider/chatglm.py +5 -5
- webscout/Provider/freeaichat.py +251 -221
- webscout/Provider/koala.py +9 -1
- webscout/Provider/yep.py +4 -24
- webscout/version.py +1 -1
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/METADATA +44 -49
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/RECORD +32 -21
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/LICENSE.md +0 -0
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/WHEEL +0 -0
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/entry_points.txt +0 -0
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,395 @@
|
|
|
1
|
+
import ssl
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import socket
|
|
5
|
+
import random
|
|
6
|
+
from threading import Thread, Event
|
|
7
|
+
from curl_cffi import requests
|
|
8
|
+
from websocket import WebSocketApp
|
|
9
|
+
from typing import Dict, Any, Union, Generator, List, Optional
|
|
10
|
+
|
|
11
|
+
from webscout.AIutel import Optimizers
|
|
12
|
+
from webscout.AIutel import Conversation
|
|
13
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
14
|
+
from webscout.AIbase import Provider
|
|
15
|
+
from webscout import exceptions
|
|
16
|
+
from webscout import LitAgent
|
|
17
|
+
|
|
18
|
+
class PerplexityLabs(Provider):
|
|
19
|
+
"""
|
|
20
|
+
A client for interacting with the Perplexity AI Labs API.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
AVAILABLE_MODELS = [
|
|
24
|
+
"r1-1776",
|
|
25
|
+
"sonar-pro",
|
|
26
|
+
"sonar",
|
|
27
|
+
"sonar-reasoning-pro",
|
|
28
|
+
"sonar-reasoning"
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
is_conversation: bool = True,
|
|
34
|
+
max_tokens: int = 2048,
|
|
35
|
+
timeout: int = 60,
|
|
36
|
+
intro: str = None,
|
|
37
|
+
filepath: str = None,
|
|
38
|
+
update_file: bool = True,
|
|
39
|
+
proxies: dict = {},
|
|
40
|
+
history_offset: int = 10250,
|
|
41
|
+
act: str = None,
|
|
42
|
+
model: str = "r1-1776",
|
|
43
|
+
connection_timeout: float = 10.0,
|
|
44
|
+
max_retries: int = 3,
|
|
45
|
+
):
|
|
46
|
+
"""
|
|
47
|
+
Initialize the Perplexity client.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
is_conversation: Whether to maintain conversation context
|
|
51
|
+
max_tokens: Maximum token limit for responses
|
|
52
|
+
timeout: Response timeout in seconds
|
|
53
|
+
intro: Conversation intro/system prompt
|
|
54
|
+
filepath: Path for conversation history persistence
|
|
55
|
+
update_file: Whether to update the conversation file
|
|
56
|
+
proxies: Optional proxy configuration
|
|
57
|
+
history_offset: History truncation limit
|
|
58
|
+
act: Persona to use for responses
|
|
59
|
+
model: Default model to use
|
|
60
|
+
connection_timeout: Maximum time to wait for connection
|
|
61
|
+
max_retries: Number of connection retry attempts
|
|
62
|
+
"""
|
|
63
|
+
if model not in self.AVAILABLE_MODELS:
|
|
64
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
65
|
+
|
|
66
|
+
self.model = model
|
|
67
|
+
self.connection_timeout = connection_timeout
|
|
68
|
+
self.timeout = timeout
|
|
69
|
+
self.max_retries = max_retries
|
|
70
|
+
self.connected = Event()
|
|
71
|
+
self.last_answer = None
|
|
72
|
+
|
|
73
|
+
# Initialize session with headers using LitAgent user agent
|
|
74
|
+
self.session = requests.Session(headers={
|
|
75
|
+
'User-Agent': LitAgent().random(),
|
|
76
|
+
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
77
|
+
'accept-language': 'en-US,en;q=0.9',
|
|
78
|
+
'cache-control': 'max-age=0',
|
|
79
|
+
'dnt': '1',
|
|
80
|
+
'priority': 'u=0, i',
|
|
81
|
+
'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"',
|
|
82
|
+
'sec-ch-ua-mobile': '?0',
|
|
83
|
+
'sec-ch-ua-platform': '"Windows"',
|
|
84
|
+
'sec-fetch-dest': 'document',
|
|
85
|
+
'sec-fetch-mode': 'navigate',
|
|
86
|
+
'sec-fetch-site': 'same-origin',
|
|
87
|
+
'sec-fetch-user': '?1',
|
|
88
|
+
'upgrade-insecure-requests': '1',
|
|
89
|
+
})
|
|
90
|
+
|
|
91
|
+
# Apply proxies if provided
|
|
92
|
+
self.session.proxies.update(proxies)
|
|
93
|
+
|
|
94
|
+
# Set up conversation handling
|
|
95
|
+
self.is_conversation = is_conversation
|
|
96
|
+
self.max_tokens_to_sample = max_tokens
|
|
97
|
+
|
|
98
|
+
self.__available_optimizers = (
|
|
99
|
+
method
|
|
100
|
+
for method in dir(Optimizers)
|
|
101
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
102
|
+
)
|
|
103
|
+
Conversation.intro = (
|
|
104
|
+
AwesomePrompts().get_act(
|
|
105
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
106
|
+
)
|
|
107
|
+
if act
|
|
108
|
+
else intro or Conversation.intro
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
self.conversation = Conversation(
|
|
112
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
113
|
+
)
|
|
114
|
+
self.conversation.history_offset = history_offset
|
|
115
|
+
|
|
116
|
+
# Initialize connection
|
|
117
|
+
self._initialize_connection()
|
|
118
|
+
|
|
119
|
+
def _initialize_connection(self) -> None:
|
|
120
|
+
"""Initialize the connection to Perplexity with retries"""
|
|
121
|
+
for attempt in range(1, self.max_retries + 1):
|
|
122
|
+
try:
|
|
123
|
+
# Get a session ID via polling
|
|
124
|
+
self.timestamp = format(random.getrandbits(32), '08x')
|
|
125
|
+
poll_url = f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}'
|
|
126
|
+
|
|
127
|
+
response = self.session.get(poll_url)
|
|
128
|
+
if response.status_code != 200:
|
|
129
|
+
if attempt == self.max_retries:
|
|
130
|
+
raise ConnectionError(f"Failed to get session ID: HTTP {response.status_code}")
|
|
131
|
+
continue
|
|
132
|
+
|
|
133
|
+
# Extract the session ID
|
|
134
|
+
try:
|
|
135
|
+
self.sid = json.loads(response.text[1:])['sid']
|
|
136
|
+
except (json.JSONDecodeError, KeyError) as e:
|
|
137
|
+
if attempt == self.max_retries:
|
|
138
|
+
raise ConnectionError(f"Failed to parse session ID: {e}")
|
|
139
|
+
continue
|
|
140
|
+
|
|
141
|
+
# Authenticate the session
|
|
142
|
+
auth_url = f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}&sid={self.sid}'
|
|
143
|
+
auth_response = self.session.post(auth_url, data='40{"jwt":"anonymous-ask-user"}')
|
|
144
|
+
|
|
145
|
+
if auth_response.status_code != 200 or auth_response.text != 'OK':
|
|
146
|
+
if attempt == self.max_retries:
|
|
147
|
+
raise ConnectionError("Authentication failed")
|
|
148
|
+
continue
|
|
149
|
+
|
|
150
|
+
# Setup SSL socket
|
|
151
|
+
context = ssl.create_default_context()
|
|
152
|
+
context.minimum_version = ssl.TLSVersion.TLSv1_3
|
|
153
|
+
try:
|
|
154
|
+
self.sock = context.wrap_socket(
|
|
155
|
+
socket.create_connection(('www.perplexity.ai', 443), timeout=self.connection_timeout),
|
|
156
|
+
server_hostname='www.perplexity.ai'
|
|
157
|
+
)
|
|
158
|
+
except (socket.timeout, socket.error, ssl.SSLError) as e:
|
|
159
|
+
if attempt == self.max_retries:
|
|
160
|
+
raise ConnectionError(f"Socket connection failed: {e}")
|
|
161
|
+
continue
|
|
162
|
+
|
|
163
|
+
# Setup WebSocket
|
|
164
|
+
ws_url = f'wss://www.perplexity.ai/socket.io/?EIO=4&transport=websocket&sid={self.sid}'
|
|
165
|
+
cookies = '; '.join([f'{key}={value}' for key, value in self.session.cookies.get_dict().items()])
|
|
166
|
+
|
|
167
|
+
self.connected.clear()
|
|
168
|
+
self.ws = WebSocketApp(
|
|
169
|
+
url=ws_url,
|
|
170
|
+
header={'User-Agent': self.session.headers['User-Agent']},
|
|
171
|
+
cookie=cookies,
|
|
172
|
+
on_open=self._on_open,
|
|
173
|
+
on_message=self._on_message,
|
|
174
|
+
on_error=self._on_error,
|
|
175
|
+
on_close=self._on_close,
|
|
176
|
+
socket=self.sock
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
# Start WebSocket in a thread
|
|
180
|
+
self.ws_thread = Thread(target=self.ws.run_forever, daemon=True)
|
|
181
|
+
self.ws_thread.start()
|
|
182
|
+
|
|
183
|
+
# Wait for connection to be established
|
|
184
|
+
if self.connected.wait(timeout=self.connection_timeout):
|
|
185
|
+
return
|
|
186
|
+
|
|
187
|
+
except Exception as e:
|
|
188
|
+
if attempt == self.max_retries:
|
|
189
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to connect: {e}")
|
|
190
|
+
|
|
191
|
+
# If we get here, the attempt failed, wait before retrying
|
|
192
|
+
if attempt < self.max_retries:
|
|
193
|
+
retry_delay = 2 ** attempt # Exponential backoff
|
|
194
|
+
time.sleep(retry_delay)
|
|
195
|
+
|
|
196
|
+
raise exceptions.FailedToGenerateResponseError("Failed to connect to Perplexity after multiple attempts")
|
|
197
|
+
|
|
198
|
+
def _on_open(self, ws):
|
|
199
|
+
"""Handle websocket open event"""
|
|
200
|
+
ws.send('2probe')
|
|
201
|
+
ws.send('5')
|
|
202
|
+
|
|
203
|
+
def _on_close(self, ws, close_status_code, close_msg):
|
|
204
|
+
"""Handle websocket close event"""
|
|
205
|
+
self.connected.clear()
|
|
206
|
+
|
|
207
|
+
def _on_message(self, ws, message):
|
|
208
|
+
"""Handle websocket message events"""
|
|
209
|
+
if message == '2':
|
|
210
|
+
ws.send('3')
|
|
211
|
+
|
|
212
|
+
elif message == '3probe':
|
|
213
|
+
self.connected.set()
|
|
214
|
+
|
|
215
|
+
elif message.startswith('40'):
|
|
216
|
+
self.connected.set()
|
|
217
|
+
|
|
218
|
+
elif message.startswith('42'):
|
|
219
|
+
try:
|
|
220
|
+
response = json.loads(message[2:])[1]
|
|
221
|
+
if 'final' in response or 'partial' in response:
|
|
222
|
+
self.last_answer = response
|
|
223
|
+
except (json.JSONDecodeError, IndexError):
|
|
224
|
+
pass
|
|
225
|
+
|
|
226
|
+
def _on_error(self, ws, error):
|
|
227
|
+
"""Handle websocket error events"""
|
|
228
|
+
self.connected.clear()
|
|
229
|
+
|
|
230
|
+
def ask(
|
|
231
|
+
self,
|
|
232
|
+
prompt: str,
|
|
233
|
+
stream: bool = False,
|
|
234
|
+
raw: bool = False,
|
|
235
|
+
optimizer: str = None,
|
|
236
|
+
conversationally: bool = False,
|
|
237
|
+
model: str = None
|
|
238
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
239
|
+
"""
|
|
240
|
+
Send a query to Perplexity AI and get a response.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
prompt: The question to ask
|
|
244
|
+
stream: Whether to stream the response
|
|
245
|
+
raw: Return raw response format
|
|
246
|
+
optimizer: Optimizer function to apply to prompt
|
|
247
|
+
conversationally: Use conversation context
|
|
248
|
+
model: Override the model to use
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
If stream=True: Generator yielding response updates
|
|
252
|
+
If stream=False: Final response dictionary
|
|
253
|
+
"""
|
|
254
|
+
# Check if connection is still active and reconnect if needed
|
|
255
|
+
if not self.connected.is_set():
|
|
256
|
+
self._initialize_connection()
|
|
257
|
+
|
|
258
|
+
# Use specified model or default
|
|
259
|
+
use_model = model or self.model
|
|
260
|
+
if use_model not in self.AVAILABLE_MODELS:
|
|
261
|
+
raise ValueError(f"Invalid model: {use_model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
|
|
262
|
+
|
|
263
|
+
# Process prompt with conversation and optimizers
|
|
264
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
265
|
+
if optimizer:
|
|
266
|
+
if optimizer in self.__available_optimizers:
|
|
267
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
268
|
+
conversation_prompt if conversationally else prompt
|
|
269
|
+
)
|
|
270
|
+
else:
|
|
271
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
272
|
+
|
|
273
|
+
self.last_answer = None
|
|
274
|
+
|
|
275
|
+
# Send the query through websocket
|
|
276
|
+
payload = json.dumps([
|
|
277
|
+
'perplexity_labs',
|
|
278
|
+
{
|
|
279
|
+
'messages': [{'role': 'user', 'content': conversation_prompt}],
|
|
280
|
+
'model': use_model,
|
|
281
|
+
'source': 'default',
|
|
282
|
+
'version': '2.18',
|
|
283
|
+
}
|
|
284
|
+
])
|
|
285
|
+
self.ws.send('42' + payload)
|
|
286
|
+
|
|
287
|
+
def for_stream():
|
|
288
|
+
"""Handle streaming responses"""
|
|
289
|
+
last_seen = None
|
|
290
|
+
start_time = time.time()
|
|
291
|
+
streaming_text = ""
|
|
292
|
+
|
|
293
|
+
while True:
|
|
294
|
+
# Check for timeout
|
|
295
|
+
if time.time() - start_time > self.timeout:
|
|
296
|
+
raise exceptions.FailedToGenerateResponseError("Response stream timed out")
|
|
297
|
+
|
|
298
|
+
# If we have a new response different from what we've seen
|
|
299
|
+
if self.last_answer != last_seen:
|
|
300
|
+
last_seen = self.last_answer
|
|
301
|
+
if last_seen is not None:
|
|
302
|
+
if 'output' in last_seen:
|
|
303
|
+
current_output = last_seen['output']
|
|
304
|
+
# For delta output in streaming
|
|
305
|
+
delta = current_output[len(streaming_text):]
|
|
306
|
+
streaming_text = current_output
|
|
307
|
+
resp = dict(text=delta)
|
|
308
|
+
yield resp if raw else resp
|
|
309
|
+
|
|
310
|
+
# If we have the final response, add to history and return
|
|
311
|
+
if self.last_answer and self.last_answer.get('final', False):
|
|
312
|
+
answer = self.last_answer
|
|
313
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
314
|
+
return
|
|
315
|
+
|
|
316
|
+
time.sleep(0.01)
|
|
317
|
+
|
|
318
|
+
def for_non_stream():
|
|
319
|
+
"""Handle non-streaming responses"""
|
|
320
|
+
start_time = time.time()
|
|
321
|
+
|
|
322
|
+
while True:
|
|
323
|
+
# Check for successful response
|
|
324
|
+
if self.last_answer and self.last_answer.get('final', False):
|
|
325
|
+
answer = self.last_answer
|
|
326
|
+
self.conversation.update_chat_history(prompt, answer['output'])
|
|
327
|
+
return answer if raw else dict(text=answer['output'])
|
|
328
|
+
|
|
329
|
+
# Check for timeout
|
|
330
|
+
if time.time() - start_time > self.timeout:
|
|
331
|
+
raise exceptions.FailedToGenerateResponseError("Response timed out")
|
|
332
|
+
|
|
333
|
+
time.sleep(0.01)
|
|
334
|
+
|
|
335
|
+
return for_stream() if stream else for_non_stream()
|
|
336
|
+
|
|
337
|
+
def chat(
|
|
338
|
+
self,
|
|
339
|
+
prompt: str,
|
|
340
|
+
stream: bool = False,
|
|
341
|
+
optimizer: str = None,
|
|
342
|
+
conversationally: bool = False,
|
|
343
|
+
model: str = None
|
|
344
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
345
|
+
"""
|
|
346
|
+
Send a query and get just the text response.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
prompt: The question to ask
|
|
350
|
+
stream: Whether to stream the response
|
|
351
|
+
optimizer: Optimizer function to apply to prompt
|
|
352
|
+
conversationally: Use conversation context
|
|
353
|
+
model: Override the model to use
|
|
354
|
+
|
|
355
|
+
Returns:
|
|
356
|
+
If stream=True: Generator yielding text chunks
|
|
357
|
+
If stream=False: Complete response text
|
|
358
|
+
"""
|
|
359
|
+
def for_stream():
|
|
360
|
+
for response in self.ask(
|
|
361
|
+
prompt,
|
|
362
|
+
stream=True,
|
|
363
|
+
optimizer=optimizer,
|
|
364
|
+
conversationally=conversationally,
|
|
365
|
+
model=model
|
|
366
|
+
):
|
|
367
|
+
yield self.get_message(response)
|
|
368
|
+
|
|
369
|
+
def for_non_stream():
|
|
370
|
+
return self.get_message(
|
|
371
|
+
self.ask(
|
|
372
|
+
prompt,
|
|
373
|
+
stream=False,
|
|
374
|
+
optimizer=optimizer,
|
|
375
|
+
conversationally=conversationally,
|
|
376
|
+
model=model
|
|
377
|
+
)
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
return for_stream() if stream else for_non_stream()
|
|
381
|
+
|
|
382
|
+
def get_message(self, response: dict) -> str:
|
|
383
|
+
"""Extract text from response dictionary"""
|
|
384
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
385
|
+
return response["text"]
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
if __name__ == "__main__":
|
|
389
|
+
from rich import print
|
|
390
|
+
|
|
391
|
+
# Example usage
|
|
392
|
+
ai = PerplexityLabs(timeout=60, model="r1-1776")
|
|
393
|
+
|
|
394
|
+
for chunk in ai.chat("Explain the concept of neural networks", stream=True):
|
|
395
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/QwenLM.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
|
|
2
1
|
import requests
|
|
3
2
|
import json
|
|
4
3
|
from typing import Any, Dict, Generator, Optional
|
|
@@ -11,10 +10,6 @@ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
|
11
10
|
from webscout.AIbase import Provider, AsyncProvider
|
|
12
11
|
from webscout import exceptions
|
|
13
12
|
|
|
14
|
-
# Import logging tools from our internal modules
|
|
15
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
16
|
-
from webscout import LitAgent as Lit
|
|
17
|
-
|
|
18
13
|
class QwenLM(Provider):
|
|
19
14
|
"""
|
|
20
15
|
A class to interact with the QwenLM API
|
|
@@ -24,7 +19,7 @@ class QwenLM(Provider):
|
|
|
24
19
|
"qwen-max-latest",
|
|
25
20
|
"qwen-plus-latest",
|
|
26
21
|
"qwen2.5-14b-instruct-1m",
|
|
27
|
-
"qwq-32b
|
|
22
|
+
"qwq-32b",
|
|
28
23
|
"qwen2.5-coder-32b-instruct",
|
|
29
24
|
"qwen-turbo-latest",
|
|
30
25
|
"qwen2.5-72b-instruct",
|
|
@@ -45,25 +40,14 @@ class QwenLM(Provider):
|
|
|
45
40
|
history_offset: int = 10250,
|
|
46
41
|
act: Optional[str] = None,
|
|
47
42
|
model: str = "qwen-plus-latest",
|
|
48
|
-
system_prompt: str = "You are a helpful AI assistant."
|
|
49
|
-
logging: bool = False # New parameter to enable logging
|
|
43
|
+
system_prompt: str = "You are a helpful AI assistant."
|
|
50
44
|
):
|
|
51
|
-
"""Initializes the QwenLM API client
|
|
45
|
+
"""Initializes the QwenLM API client."""
|
|
52
46
|
if model not in self.AVAILABLE_MODELS:
|
|
53
47
|
raise ValueError(
|
|
54
48
|
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
55
49
|
)
|
|
56
50
|
|
|
57
|
-
# Setup logger if logging is enabled
|
|
58
|
-
self.logger = Logger(
|
|
59
|
-
name="QwenLM",
|
|
60
|
-
format=LogFormat.MODERN_EMOJI,
|
|
61
|
-
|
|
62
|
-
) if logging else None
|
|
63
|
-
|
|
64
|
-
if self.logger:
|
|
65
|
-
self.logger.info(f"Initializing QwenLM with model: {model}")
|
|
66
|
-
|
|
67
51
|
self.session = cloudscraper.create_scraper()
|
|
68
52
|
self.is_conversation = is_conversation
|
|
69
53
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -91,7 +75,7 @@ class QwenLM(Provider):
|
|
|
91
75
|
if self.chat_type != "t2t":
|
|
92
76
|
AVAILABLE_MODELS = [
|
|
93
77
|
'qwen-plus-latest', 'qvq-72b-preview',
|
|
94
|
-
'qvq-32b
|
|
78
|
+
'qvq-32b', 'qwen-turbo-latest',
|
|
95
79
|
'qwen-max-latest'
|
|
96
80
|
]
|
|
97
81
|
|
|
@@ -113,9 +97,6 @@ class QwenLM(Provider):
|
|
|
113
97
|
)
|
|
114
98
|
self.conversation.history_offset = history_offset
|
|
115
99
|
|
|
116
|
-
if self.logger:
|
|
117
|
-
self.logger.info("QwenLM initialized successfully")
|
|
118
|
-
|
|
119
100
|
def _load_cookies(self) -> tuple[str, str]:
|
|
120
101
|
"""Load cookies from a JSON file and build a cookie header string."""
|
|
121
102
|
try:
|
|
@@ -128,18 +109,12 @@ class QwenLM(Provider):
|
|
|
128
109
|
(cookie.get("value") for cookie in cookies if cookie.get("name") == "token"),
|
|
129
110
|
"",
|
|
130
111
|
)
|
|
131
|
-
if self.logger:
|
|
132
|
-
self.logger.debug("Cookies loaded successfully")
|
|
133
112
|
return cookie_string, token
|
|
134
113
|
except FileNotFoundError:
|
|
135
|
-
if self.logger:
|
|
136
|
-
self.logger.error("cookies.json file not found!")
|
|
137
114
|
raise exceptions.InvalidAuthenticationError(
|
|
138
115
|
"Error: cookies.json file not found!"
|
|
139
116
|
)
|
|
140
117
|
except json.JSONDecodeError:
|
|
141
|
-
if self.logger:
|
|
142
|
-
self.logger.error("Invalid JSON format in cookies.json!")
|
|
143
118
|
raise exceptions.InvalidAuthenticationError(
|
|
144
119
|
"Error: Invalid JSON format in cookies.json!"
|
|
145
120
|
)
|
|
@@ -152,10 +127,7 @@ class QwenLM(Provider):
|
|
|
152
127
|
optimizer: Optional[str] = None,
|
|
153
128
|
conversationally: bool = False,
|
|
154
129
|
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
155
|
-
"""Chat with AI
|
|
156
|
-
if self.logger:
|
|
157
|
-
self.logger.debug(f"Processing ask() request. Prompt: {prompt[:50]}...")
|
|
158
|
-
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
130
|
+
"""Chat with AI."""
|
|
159
131
|
|
|
160
132
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
161
133
|
if optimizer:
|
|
@@ -163,11 +135,7 @@ class QwenLM(Provider):
|
|
|
163
135
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
164
136
|
conversation_prompt if conversationally else prompt
|
|
165
137
|
)
|
|
166
|
-
if self.logger:
|
|
167
|
-
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
168
138
|
else:
|
|
169
|
-
if self.logger:
|
|
170
|
-
self.logger.error(f"Invalid optimizer: {optimizer}")
|
|
171
139
|
raise Exception(
|
|
172
140
|
f"Optimizer is not one of {list(self.__available_optimizers)}"
|
|
173
141
|
)
|
|
@@ -184,15 +152,10 @@ class QwenLM(Provider):
|
|
|
184
152
|
}
|
|
185
153
|
|
|
186
154
|
def for_stream() -> Generator[Dict[str, Any], None, None]:
|
|
187
|
-
if self.logger:
|
|
188
|
-
self.logger.debug("Sending streaming request to QwenLM API")
|
|
189
|
-
|
|
190
155
|
response = self.session.post(
|
|
191
156
|
self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
|
|
192
157
|
)
|
|
193
158
|
if not response.ok:
|
|
194
|
-
if self.logger:
|
|
195
|
-
self.logger.error(f"API request failed - Status: {response.status_code}, Reason: {response.reason}")
|
|
196
159
|
raise exceptions.FailedToGenerateResponseError(
|
|
197
160
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
198
161
|
)
|
|
@@ -202,8 +165,6 @@ class QwenLM(Provider):
|
|
|
202
165
|
if line and line.startswith("data: "):
|
|
203
166
|
data = line[6:]
|
|
204
167
|
if data == "[DONE]":
|
|
205
|
-
if self.logger:
|
|
206
|
-
self.logger.debug("Stream finished with [DONE] marker")
|
|
207
168
|
break
|
|
208
169
|
try:
|
|
209
170
|
json_data = json.loads(data)
|
|
@@ -225,26 +186,18 @@ class QwenLM(Provider):
|
|
|
225
186
|
delta = new_content[len(cumulative_text):]
|
|
226
187
|
cumulative_text = new_content
|
|
227
188
|
if delta:
|
|
228
|
-
if self.logger:
|
|
229
|
-
self.logger.debug(f"Yielding delta: {delta}")
|
|
230
189
|
yield delta if raw else {"text": delta}
|
|
231
190
|
except json.JSONDecodeError:
|
|
232
|
-
if self.logger:
|
|
233
|
-
self.logger.error("JSON decode error during streaming")
|
|
234
191
|
continue
|
|
235
192
|
self.last_response.update(dict(text=cumulative_text))
|
|
236
193
|
self.conversation.update_chat_history(
|
|
237
194
|
prompt, self.get_message(self.last_response)
|
|
238
195
|
)
|
|
239
|
-
if self.logger:
|
|
240
|
-
self.logger.debug("Finished processing stream response")
|
|
241
196
|
|
|
242
197
|
def for_non_stream() -> Dict[str, Any]:
|
|
243
198
|
"""
|
|
244
199
|
Handles non-streaming responses by aggregating all streamed chunks into a single string.
|
|
245
200
|
"""
|
|
246
|
-
if self.logger:
|
|
247
|
-
self.logger.debug("Processing non-streaming request")
|
|
248
201
|
|
|
249
202
|
# Initialize an empty string to accumulate the full response
|
|
250
203
|
full_response = ""
|
|
@@ -257,7 +210,6 @@ class QwenLM(Provider):
|
|
|
257
210
|
elif isinstance(response, str): # If the response is a string, directly append it
|
|
258
211
|
full_response += response
|
|
259
212
|
except Exception as e:
|
|
260
|
-
self.logger.error(f"Error processing response: {str(e)}")
|
|
261
213
|
raise
|
|
262
214
|
|
|
263
215
|
# Ensure last_response is updated with the aggregated text
|
|
@@ -266,9 +218,6 @@ class QwenLM(Provider):
|
|
|
266
218
|
# Update conversation history with the final response
|
|
267
219
|
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
268
220
|
|
|
269
|
-
if self.logger:
|
|
270
|
-
self.logger.debug(f"Non-streaming response: {full_response}")
|
|
271
|
-
|
|
272
221
|
return {"text": full_response} # Return the dictionary containing the full response
|
|
273
222
|
|
|
274
223
|
return for_stream() if stream else for_non_stream()
|
|
@@ -281,9 +230,7 @@ class QwenLM(Provider):
|
|
|
281
230
|
optimizer: Optional[str] = None,
|
|
282
231
|
conversationally: bool = False,
|
|
283
232
|
) -> str | Generator[str, None, None]:
|
|
284
|
-
"""Generate response string from chat
|
|
285
|
-
if self.logger:
|
|
286
|
-
self.logger.debug(f"Processing chat() request. Prompt: {prompt[:50]}...")
|
|
233
|
+
"""Generate response string from chat."""
|
|
287
234
|
|
|
288
235
|
def for_stream() -> Generator[str, None, None]:
|
|
289
236
|
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
@@ -302,8 +249,7 @@ class QwenLM(Provider):
|
|
|
302
249
|
|
|
303
250
|
if __name__ == "__main__":
|
|
304
251
|
from rich import print
|
|
305
|
-
|
|
306
|
-
ai = QwenLM(cookies_path="cookies.json", logging=False)
|
|
252
|
+
ai = QwenLM(cookies_path="cookies.json")
|
|
307
253
|
response = ai.chat(input(">>> "), stream=False)
|
|
308
254
|
ai.chat_type = "search" # search - used WEB, t2t - chatbot, t2i - image_gen
|
|
309
255
|
print(response)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""PiclumenImager Provider Package - Your go-to for high-quality AI art! 🎨
|
|
2
|
+
|
|
3
|
+
Examples:
|
|
4
|
+
>>> # Synchronous usage
|
|
5
|
+
>>> from webscout import PiclumenImager
|
|
6
|
+
>>> provider = PiclumenImager()
|
|
7
|
+
>>> images = provider.generate("A cool underwater creature")
|
|
8
|
+
>>> provider.save(images, dir="my_images")
|
|
9
|
+
>>>
|
|
10
|
+
>>> # Asynchronous usage
|
|
11
|
+
>>> import asyncio
|
|
12
|
+
>>> from webscout import AsyncPiclumenImager
|
|
13
|
+
>>> async def main():
|
|
14
|
+
... provider = AsyncPiclumenImager()
|
|
15
|
+
... images = await provider.generate("A cool cyberpunk city")
|
|
16
|
+
... await provider.save(images, dir="my_images")
|
|
17
|
+
>>> asyncio.run(main())
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from .sync_piclumen import PiclumenImager
|
|
21
|
+
from .async_piclumen import AsyncPiclumenImager
|
|
22
|
+
|
|
23
|
+
__all__ = ["PiclumenImager", "AsyncPiclumenImager"]
|