webscout 8.3.3__py3-none-any.whl → 8.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +53 -800
- webscout/Bard.py +2 -22
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +26 -11
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +81 -57
- webscout/Provider/ExaChat.py +9 -5
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/FreeGemini.py +2 -2
- webscout/Provider/Gemini.py +3 -10
- webscout/Provider/GeminiProxy.py +31 -5
- webscout/Provider/LambdaChat.py +39 -31
- webscout/Provider/Netwrck.py +5 -8
- webscout/Provider/OLLAMA.py +8 -9
- webscout/Provider/OPENAI/README.md +1 -1
- webscout/Provider/OPENAI/TogetherAI.py +57 -48
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +1 -3
- webscout/Provider/OPENAI/autoproxy.py +1 -1
- webscout/Provider/OPENAI/copilot.py +73 -26
- webscout/Provider/OPENAI/deepinfra.py +60 -24
- webscout/Provider/OPENAI/exachat.py +9 -5
- webscout/Provider/OPENAI/monochat.py +3 -3
- webscout/Provider/OPENAI/netwrck.py +4 -7
- webscout/Provider/OPENAI/qodo.py +630 -0
- webscout/Provider/OPENAI/scirachat.py +86 -49
- webscout/Provider/OPENAI/textpollinations.py +19 -14
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +478 -0
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/monochat.py +3 -3
- webscout/Provider/TTI/together.py +7 -6
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TextPollinationsAI.py +19 -14
- webscout/Provider/TogetherAI.py +57 -44
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +4 -10
- webscout/Provider/copilot.py +58 -61
- webscout/Provider/freeaichat.py +64 -55
- webscout/Provider/monochat.py +275 -0
- webscout/Provider/scira_chat.py +115 -21
- webscout/Provider/toolbaz.py +5 -10
- webscout/Provider/typefully.py +1 -11
- webscout/Provider/x0gpt.py +325 -315
- webscout/__init__.py +4 -11
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +119 -5
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/sanitize.py +1074 -0
- webscout/version.py +1 -1
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -150
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/RECORD +70 -72
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/freeaichat.py +0 -363
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
|
@@ -321,36 +321,92 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
321
321
|
messages=[{"role": "user", "content": "Hello!"}]
|
|
322
322
|
)
|
|
323
323
|
"""
|
|
324
|
-
#
|
|
325
|
-
|
|
326
|
-
"
|
|
327
|
-
"
|
|
328
|
-
"
|
|
329
|
-
"
|
|
330
|
-
"
|
|
331
|
-
"
|
|
332
|
-
"
|
|
333
|
-
"
|
|
334
|
-
"
|
|
335
|
-
"
|
|
336
|
-
"
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
"
|
|
341
|
-
"
|
|
342
|
-
"
|
|
343
|
-
"
|
|
344
|
-
"
|
|
345
|
-
"
|
|
346
|
-
"
|
|
347
|
-
"
|
|
348
|
-
"
|
|
349
|
-
"
|
|
350
|
-
"
|
|
324
|
+
# Model mapping: actual model names to Scira API format
|
|
325
|
+
MODEL_MAPPING = {
|
|
326
|
+
"grok-3-mini": "scira-default",
|
|
327
|
+
"grok-3-mini-fast": "scira-x-fast-mini",
|
|
328
|
+
"grok-3-fast": "scira-x-fast",
|
|
329
|
+
"gpt-4.1-nano": "scira-nano",
|
|
330
|
+
"grok-3": "scira-grok-3",
|
|
331
|
+
"grok-4": "scira-grok-4",
|
|
332
|
+
"grok-2-vision-1212": "scira-vision",
|
|
333
|
+
"grok-2-latest": "scira-g2",
|
|
334
|
+
"gpt-4o-mini": "scira-4o-mini",
|
|
335
|
+
"o4-mini-2025-04-16": "scira-o4-mini",
|
|
336
|
+
"o3": "scira-o3",
|
|
337
|
+
"qwen/qwen3-32b": "scira-qwen-32b",
|
|
338
|
+
"qwen3-30b-a3b": "scira-qwen-30b",
|
|
339
|
+
"deepseek-v3-0324": "scira-deepseek-v3",
|
|
340
|
+
"claude-3-5-haiku-20241022": "scira-haiku",
|
|
341
|
+
"mistral-small-latest": "scira-mistral",
|
|
342
|
+
"gemini-2.5-flash-lite-preview-06-17": "scira-google-lite",
|
|
343
|
+
"gemini-2.5-flash": "scira-google",
|
|
344
|
+
"gemini-2.5-pro": "scira-google-pro",
|
|
345
|
+
"claude-sonnet-4-20250514": "scira-anthropic",
|
|
346
|
+
"claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
|
|
347
|
+
"claude-4-opus-20250514": "scira-opus",
|
|
348
|
+
"claude-4-opus-20250514-pro": "scira-opus-pro",
|
|
349
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
|
|
350
|
+
"kimi-k2-instruct": "scira-kimi-k2",
|
|
351
|
+
"scira-kimi-k2": "kimi-k2-instruct",
|
|
351
352
|
}
|
|
353
|
+
# Reverse mapping: Scira format to actual model names
|
|
354
|
+
SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
|
|
355
|
+
# Add special cases for aliases and duplicate mappings
|
|
356
|
+
SCIRA_TO_MODEL["scira-anthropic-thinking"] = "claude-sonnet-4-20250514"
|
|
357
|
+
SCIRA_TO_MODEL["scira-opus-pro"] = "claude-4-opus-20250514"
|
|
358
|
+
SCIRA_TO_MODEL["scira-x-fast"] = "grok-3-fast"
|
|
359
|
+
SCIRA_TO_MODEL["scira-x-fast-mini"] = "grok-3-mini-fast"
|
|
360
|
+
SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
|
|
361
|
+
SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
|
|
362
|
+
SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
|
|
363
|
+
SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
|
|
364
|
+
SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
|
|
365
|
+
SCIRA_TO_MODEL["scira-kimi-k2"] = "kimi-k2-instruct"
|
|
366
|
+
SCIRA_TO_MODEL["kimi-k2-instruct"] = "scira-kimi-k2"
|
|
367
|
+
MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
|
|
368
|
+
# Available models list (actual model names + scira aliases)
|
|
369
|
+
AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
|
|
352
370
|
# Optional: pretty display names for UI (reverse mapping)
|
|
353
|
-
MODEL_DISPLAY_NAMES = {v: k for k, v in
|
|
371
|
+
MODEL_DISPLAY_NAMES = {v: k for k, v in MODEL_MAPPING.items()}
|
|
372
|
+
|
|
373
|
+
@classmethod
|
|
374
|
+
def _resolve_model(cls, model: str) -> str:
|
|
375
|
+
"""
|
|
376
|
+
Resolve a model name to its Scira API format.
|
|
377
|
+
|
|
378
|
+
Args:
|
|
379
|
+
model: Either an actual model name or a Scira alias
|
|
380
|
+
|
|
381
|
+
Returns:
|
|
382
|
+
The Scira API format model name
|
|
383
|
+
|
|
384
|
+
Raises:
|
|
385
|
+
ValueError: If the model is not supported
|
|
386
|
+
"""
|
|
387
|
+
# If it's already a Scira format, return as-is
|
|
388
|
+
if model in cls.SCIRA_TO_MODEL:
|
|
389
|
+
return model
|
|
390
|
+
# If it's an actual model name, convert to Scira format
|
|
391
|
+
if model in cls.MODEL_MAPPING:
|
|
392
|
+
return cls.MODEL_MAPPING[model]
|
|
393
|
+
# Model not found
|
|
394
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {cls.AVAILABLE_MODELS}")
|
|
395
|
+
|
|
396
|
+
def convert_model_name(self, model: str) -> str:
|
|
397
|
+
"""
|
|
398
|
+
Convert model display names or internal keys to ones supported by SciraChat.
|
|
399
|
+
Args:
|
|
400
|
+
model: Model name or alias to convert
|
|
401
|
+
Returns:
|
|
402
|
+
SciraChat model name
|
|
403
|
+
"""
|
|
404
|
+
# Use the new _resolve_model logic
|
|
405
|
+
try:
|
|
406
|
+
return self._resolve_model(model)
|
|
407
|
+
except Exception as e:
|
|
408
|
+
print(f"Warning: {e} Using 'scira-default' instead.")
|
|
409
|
+
return "scira-default"
|
|
354
410
|
|
|
355
411
|
def __init__(
|
|
356
412
|
self,
|
|
@@ -447,25 +503,6 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
447
503
|
print(f"Warning: Error formatting text: {e}")
|
|
448
504
|
return text
|
|
449
505
|
|
|
450
|
-
def convert_model_name(self, model: str) -> str:
|
|
451
|
-
"""
|
|
452
|
-
Convert model display names or internal keys to ones supported by SciraChat.
|
|
453
|
-
|
|
454
|
-
Args:
|
|
455
|
-
model: Model name or alias to convert
|
|
456
|
-
|
|
457
|
-
Returns:
|
|
458
|
-
SciraChat model name
|
|
459
|
-
"""
|
|
460
|
-
# If model is a display name (alias), map to internal key
|
|
461
|
-
if model in self.MODEL_NAME_MAP:
|
|
462
|
-
return self.MODEL_NAME_MAP[model]
|
|
463
|
-
# If model is already an internal key, return it if valid
|
|
464
|
-
if model in self.MODEL_DISPLAY_NAMES:
|
|
465
|
-
return model
|
|
466
|
-
# Default to scira-default if model not found
|
|
467
|
-
print(f"Warning: Unknown model '{model}'. Using 'scira-default' instead.")
|
|
468
|
-
return "scira-default"
|
|
469
506
|
|
|
470
507
|
@property
|
|
471
508
|
def models(self):
|
|
@@ -478,9 +515,9 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
478
515
|
if __name__ == "__main__":
|
|
479
516
|
ai = SciraChat()
|
|
480
517
|
response = ai.chat.completions.create(
|
|
481
|
-
model="
|
|
518
|
+
model="grok-3-mini-fast-latest",
|
|
482
519
|
messages=[
|
|
483
|
-
{"role": "user", "content": "who
|
|
520
|
+
{"role": "user", "content": "who are u?"}
|
|
484
521
|
],
|
|
485
522
|
stream=True
|
|
486
523
|
)
|
|
@@ -276,28 +276,33 @@ class TextPollinations(OpenAICompatibleProvider):
|
|
|
276
276
|
"""
|
|
277
277
|
|
|
278
278
|
AVAILABLE_MODELS = [
|
|
279
|
+
"deepseek",
|
|
280
|
+
"deepseek-reasoning",
|
|
281
|
+
"gemma-roblox",
|
|
282
|
+
"grok",
|
|
283
|
+
"llama-fast-roblox",
|
|
284
|
+
"llama-roblox",
|
|
285
|
+
"llamascout",
|
|
286
|
+
"mistral",
|
|
287
|
+
"mistral-nemo-roblox",
|
|
288
|
+
"mistral-roblox",
|
|
279
289
|
"openai",
|
|
290
|
+
"openai-audio",
|
|
280
291
|
"openai-fast",
|
|
281
292
|
"openai-large",
|
|
293
|
+
"openai-reasoning",
|
|
282
294
|
"openai-roblox",
|
|
295
|
+
"phi",
|
|
283
296
|
"qwen-coder",
|
|
284
|
-
"
|
|
285
|
-
"
|
|
286
|
-
"mistral",
|
|
287
|
-
"unity",
|
|
288
|
-
"mirexa",
|
|
289
|
-
"midijourney",
|
|
290
|
-
"rtist",
|
|
291
|
-
"searchgpt",
|
|
297
|
+
"bidara",
|
|
298
|
+
"elixposearch",
|
|
292
299
|
"evil",
|
|
293
|
-
"deepseek-reasoning",
|
|
294
|
-
"phi",
|
|
295
|
-
"hormoz",
|
|
296
300
|
"hypnosis-tracy",
|
|
297
|
-
"
|
|
301
|
+
"midijourney",
|
|
302
|
+
"mirexa",
|
|
303
|
+
"rtist",
|
|
298
304
|
"sur",
|
|
299
|
-
"
|
|
300
|
-
"openai-audio",
|
|
305
|
+
"unity",
|
|
301
306
|
]
|
|
302
307
|
|
|
303
308
|
def __init__(
|
|
@@ -1,19 +1,17 @@
|
|
|
1
|
-
import ssl
|
|
2
1
|
import json
|
|
3
2
|
import time
|
|
4
|
-
import socket
|
|
5
3
|
import random
|
|
6
|
-
from threading import
|
|
4
|
+
from threading import Event
|
|
7
5
|
from curl_cffi import requests
|
|
8
|
-
from
|
|
9
|
-
from typing import Dict, Any, Union, Generator, List, Optional
|
|
6
|
+
from typing import Dict, Any, Union, Generator
|
|
10
7
|
|
|
11
8
|
from webscout.AIutel import Optimizers
|
|
12
9
|
from webscout.AIutel import Conversation
|
|
13
|
-
from webscout.AIutel import AwesomePrompts
|
|
10
|
+
from webscout.AIutel import AwesomePrompts
|
|
14
11
|
from webscout.AIbase import Provider
|
|
15
12
|
from webscout import exceptions
|
|
16
|
-
|
|
13
|
+
|
|
14
|
+
API_URL = "https://www.perplexity.ai/socket.io/"
|
|
17
15
|
|
|
18
16
|
class PerplexityLabs(Provider):
|
|
19
17
|
"""
|
|
@@ -70,26 +68,16 @@ class PerplexityLabs(Provider):
|
|
|
70
68
|
self.connected = Event()
|
|
71
69
|
self.last_answer = None
|
|
72
70
|
|
|
73
|
-
# Initialize session with headers
|
|
74
|
-
self.
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
'dnt': '1',
|
|
80
|
-
'priority': 'u=0, i',
|
|
81
|
-
'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"',
|
|
82
|
-
'sec-ch-ua-mobile': '?0',
|
|
83
|
-
'sec-ch-ua-platform': '"Windows"',
|
|
84
|
-
'sec-fetch-dest': 'document',
|
|
85
|
-
'sec-fetch-mode': 'navigate',
|
|
86
|
-
'sec-fetch-site': 'same-origin',
|
|
87
|
-
'sec-fetch-user': '?1',
|
|
88
|
-
'upgrade-insecure-requests': '1',
|
|
89
|
-
})
|
|
71
|
+
# Initialize session with headers matching the working example
|
|
72
|
+
self.headers = {
|
|
73
|
+
"Origin": "https://labs.perplexity.ai",
|
|
74
|
+
"Referer": "https://labs.perplexity.ai/",
|
|
75
|
+
}
|
|
76
|
+
self.session = requests.Session(impersonate="chrome")
|
|
90
77
|
|
|
91
78
|
# Apply proxies if provided
|
|
92
|
-
|
|
79
|
+
if proxies:
|
|
80
|
+
self.session.proxies.update(proxies)
|
|
93
81
|
|
|
94
82
|
# Set up conversation handling
|
|
95
83
|
self.is_conversation = is_conversation
|
|
@@ -117,14 +105,14 @@ class PerplexityLabs(Provider):
|
|
|
117
105
|
self._initialize_connection()
|
|
118
106
|
|
|
119
107
|
def _initialize_connection(self) -> None:
|
|
120
|
-
"""Initialize the connection to Perplexity
|
|
108
|
+
"""Initialize the connection to Perplexity using polling approach"""
|
|
121
109
|
for attempt in range(1, self.max_retries + 1):
|
|
122
110
|
try:
|
|
123
111
|
# Get a session ID via polling
|
|
124
112
|
self.timestamp = format(random.getrandbits(32), '08x')
|
|
125
|
-
poll_url = f'
|
|
113
|
+
poll_url = f'{API_URL}?EIO=4&transport=polling&t={self.timestamp}'
|
|
126
114
|
|
|
127
|
-
response = self.session.get(poll_url)
|
|
115
|
+
response = self.session.get(poll_url, headers=self.headers)
|
|
128
116
|
if response.status_code != 200:
|
|
129
117
|
if attempt == self.max_retries:
|
|
130
118
|
raise ConnectionError(f"Failed to get session ID: HTTP {response.status_code}")
|
|
@@ -132,57 +120,35 @@ class PerplexityLabs(Provider):
|
|
|
132
120
|
|
|
133
121
|
# Extract the session ID
|
|
134
122
|
try:
|
|
135
|
-
|
|
123
|
+
text = response.text
|
|
124
|
+
if not text.startswith("0"):
|
|
125
|
+
raise ConnectionError("Invalid response format")
|
|
126
|
+
self.sid = json.loads(text[1:])['sid']
|
|
136
127
|
except (json.JSONDecodeError, KeyError) as e:
|
|
137
128
|
if attempt == self.max_retries:
|
|
138
129
|
raise ConnectionError(f"Failed to parse session ID: {e}")
|
|
139
130
|
continue
|
|
140
131
|
|
|
141
132
|
# Authenticate the session
|
|
142
|
-
auth_url = f'
|
|
143
|
-
|
|
133
|
+
self.auth_url = f'{API_URL}?EIO=4&transport=polling&t={self.timestamp}&sid={self.sid}'
|
|
134
|
+
post_data = '40{"jwt":"anonymous-ask-user"}'
|
|
135
|
+
auth_response = self.session.post(self.auth_url, data=post_data, headers=self.headers)
|
|
144
136
|
|
|
145
137
|
if auth_response.status_code != 200 or auth_response.text != 'OK':
|
|
146
138
|
if attempt == self.max_retries:
|
|
147
139
|
raise ConnectionError("Authentication failed")
|
|
148
140
|
continue
|
|
149
141
|
|
|
150
|
-
#
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
try:
|
|
154
|
-
self.sock = context.wrap_socket(
|
|
155
|
-
socket.create_connection(('www.perplexity.ai', 443), timeout=self.connection_timeout),
|
|
156
|
-
server_hostname='www.perplexity.ai'
|
|
157
|
-
)
|
|
158
|
-
except (socket.timeout, socket.error, ssl.SSLError) as e:
|
|
142
|
+
# Get additional response to complete handshake
|
|
143
|
+
get_response = self.session.get(self.auth_url, headers=self.headers)
|
|
144
|
+
if get_response.status_code != 200:
|
|
159
145
|
if attempt == self.max_retries:
|
|
160
|
-
raise ConnectionError(
|
|
146
|
+
raise ConnectionError("Failed to complete authentication handshake")
|
|
161
147
|
continue
|
|
162
148
|
|
|
163
|
-
#
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
self.connected.clear()
|
|
168
|
-
self.ws = WebSocketApp(
|
|
169
|
-
url=ws_url,
|
|
170
|
-
header={'User-Agent': self.session.headers['User-Agent']},
|
|
171
|
-
cookie=cookies,
|
|
172
|
-
on_open=self._on_open,
|
|
173
|
-
on_message=self._on_message,
|
|
174
|
-
on_error=self._on_error,
|
|
175
|
-
on_close=self._on_close,
|
|
176
|
-
socket=self.sock
|
|
177
|
-
)
|
|
178
|
-
|
|
179
|
-
# Start WebSocket in a thread
|
|
180
|
-
self.ws_thread = Thread(target=self.ws.run_forever, daemon=True)
|
|
181
|
-
self.ws_thread.start()
|
|
182
|
-
|
|
183
|
-
# Wait for connection to be established
|
|
184
|
-
if self.connected.wait(timeout=self.connection_timeout):
|
|
185
|
-
return
|
|
149
|
+
# Connection successful - using polling instead of WebSocket
|
|
150
|
+
self.connected.set()
|
|
151
|
+
return
|
|
186
152
|
|
|
187
153
|
except Exception as e:
|
|
188
154
|
if attempt == self.max_retries:
|
|
@@ -195,37 +161,107 @@ class PerplexityLabs(Provider):
|
|
|
195
161
|
|
|
196
162
|
raise exceptions.FailedToGenerateResponseError("Failed to connect to Perplexity after multiple attempts")
|
|
197
163
|
|
|
198
|
-
def
|
|
199
|
-
"""
|
|
200
|
-
|
|
201
|
-
|
|
164
|
+
def _send_query_polling(self, message_data):
|
|
165
|
+
"""Send query using polling approach"""
|
|
166
|
+
payload = '42' + json.dumps(["perplexity_labs", message_data])
|
|
167
|
+
response = self.session.post(self.auth_url, data=payload, headers=self.headers, timeout=10)
|
|
168
|
+
return response.status_code == 200
|
|
202
169
|
|
|
203
|
-
def
|
|
204
|
-
"""
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
170
|
+
def _poll_for_response(self, timeout_seconds):
|
|
171
|
+
"""Poll for response using the polling approach"""
|
|
172
|
+
start_time = time.time()
|
|
173
|
+
last_message = 0
|
|
174
|
+
full_output = ""
|
|
175
|
+
|
|
176
|
+
while True:
|
|
177
|
+
if time.time() - start_time > timeout_seconds:
|
|
178
|
+
if last_message == 0:
|
|
179
|
+
raise exceptions.FailedToGenerateResponseError("Response timed out")
|
|
180
|
+
else:
|
|
181
|
+
# Return partial response if we got some content
|
|
182
|
+
yield {"text": "", "final": True, "full_output": full_output}
|
|
183
|
+
return
|
|
217
184
|
|
|
218
|
-
elif message.startswith('42'):
|
|
219
185
|
try:
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
186
|
+
poll_response = self.session.get(self.auth_url, headers=self.headers, timeout=3)
|
|
187
|
+
|
|
188
|
+
if poll_response.status_code == 400:
|
|
189
|
+
# Session expired, try to return what we have
|
|
190
|
+
if full_output:
|
|
191
|
+
yield {"text": "", "final": True, "full_output": full_output}
|
|
192
|
+
return
|
|
193
|
+
else:
|
|
194
|
+
raise exceptions.FailedToGenerateResponseError("Session expired")
|
|
195
|
+
|
|
196
|
+
if poll_response.status_code != 200:
|
|
197
|
+
time.sleep(0.5)
|
|
198
|
+
continue
|
|
199
|
+
|
|
200
|
+
response_text = poll_response.text
|
|
201
|
+
|
|
202
|
+
# Handle heartbeat
|
|
203
|
+
if response_text == '2':
|
|
204
|
+
try:
|
|
205
|
+
self.session.post(self.auth_url, data='3', headers=self.headers, timeout=3)
|
|
206
|
+
except:
|
|
207
|
+
pass
|
|
208
|
+
continue
|
|
209
|
+
|
|
210
|
+
# Handle data messages containing output
|
|
211
|
+
if '42[' in response_text and 'output' in response_text:
|
|
212
|
+
try:
|
|
213
|
+
# Find the JSON part more reliably
|
|
214
|
+
start = response_text.find('42[')
|
|
215
|
+
if start != -1:
|
|
216
|
+
# Find the end of this JSON message
|
|
217
|
+
bracket_count = 0
|
|
218
|
+
json_start = start + 2
|
|
219
|
+
json_end = json_start
|
|
220
|
+
|
|
221
|
+
for j, char in enumerate(response_text[json_start:]):
|
|
222
|
+
if char == '[':
|
|
223
|
+
bracket_count += 1
|
|
224
|
+
elif char == ']':
|
|
225
|
+
bracket_count -= 1
|
|
226
|
+
if bracket_count == 0:
|
|
227
|
+
json_end = json_start + j + 1
|
|
228
|
+
break
|
|
229
|
+
|
|
230
|
+
json_str = response_text[json_start:json_end]
|
|
231
|
+
parsed_data = json.loads(json_str)
|
|
232
|
+
|
|
233
|
+
if len(parsed_data) > 1 and isinstance(parsed_data[1], dict):
|
|
234
|
+
data = parsed_data[1]
|
|
235
|
+
|
|
236
|
+
# Handle error responses
|
|
237
|
+
if data.get("status") == "failed":
|
|
238
|
+
error_message = data.get("text", "Unknown API error")
|
|
239
|
+
raise exceptions.FailedToGenerateResponseError(f"API Error: {error_message}")
|
|
240
|
+
|
|
241
|
+
# Handle normal responses
|
|
242
|
+
if "output" in data:
|
|
243
|
+
current_output = data["output"]
|
|
244
|
+
if len(current_output) > last_message:
|
|
245
|
+
delta = current_output[last_message:]
|
|
246
|
+
last_message = len(current_output)
|
|
247
|
+
full_output = current_output
|
|
248
|
+
yield {"text": delta, "final": data.get("final", False), "full_output": full_output}
|
|
249
|
+
|
|
250
|
+
if data.get("final", False):
|
|
251
|
+
return
|
|
252
|
+
|
|
253
|
+
except (json.JSONDecodeError, IndexError, KeyError) as e:
|
|
254
|
+
# Continue on parsing errors
|
|
255
|
+
pass
|
|
256
|
+
|
|
257
|
+
except Exception as e:
|
|
258
|
+
# Handle timeout and other errors more gracefully
|
|
259
|
+
if "timeout" in str(e).lower():
|
|
260
|
+
continue
|
|
261
|
+
time.sleep(0.5)
|
|
262
|
+
continue
|
|
263
|
+
|
|
264
|
+
time.sleep(0.5)
|
|
229
265
|
|
|
230
266
|
def ask(
|
|
231
267
|
self,
|
|
@@ -270,67 +306,47 @@ class PerplexityLabs(Provider):
|
|
|
270
306
|
else:
|
|
271
307
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
272
308
|
|
|
273
|
-
|
|
309
|
+
# Send the query using polling approach
|
|
310
|
+
message_data = {
|
|
311
|
+
"version": "2.18",
|
|
312
|
+
"source": "default",
|
|
313
|
+
"model": use_model,
|
|
314
|
+
"messages": [{"role": "user", "content": conversation_prompt}],
|
|
315
|
+
}
|
|
274
316
|
|
|
275
|
-
# Send
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
{
|
|
279
|
-
'messages': [{'role': 'user', 'content': conversation_prompt}],
|
|
280
|
-
'model': use_model,
|
|
281
|
-
'source': 'default',
|
|
282
|
-
'version': '2.18',
|
|
283
|
-
}
|
|
284
|
-
])
|
|
285
|
-
self.ws.send('42' + payload)
|
|
317
|
+
# Send query
|
|
318
|
+
if not self._send_query_polling(message_data):
|
|
319
|
+
raise exceptions.FailedToGenerateResponseError("Failed to send query")
|
|
286
320
|
|
|
287
321
|
def for_stream():
|
|
288
|
-
"""Handle streaming responses"""
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
# Check for timeout
|
|
295
|
-
if time.time() - start_time > self.timeout:
|
|
296
|
-
raise exceptions.FailedToGenerateResponseError("Response stream timed out")
|
|
297
|
-
|
|
298
|
-
# If we have a new response different from what we've seen
|
|
299
|
-
if self.last_answer != last_seen:
|
|
300
|
-
last_seen = self.last_answer
|
|
301
|
-
if last_seen is not None:
|
|
302
|
-
if 'output' in last_seen:
|
|
303
|
-
current_output = last_seen['output']
|
|
304
|
-
# For delta output in streaming
|
|
305
|
-
delta = current_output[len(streaming_text):]
|
|
306
|
-
streaming_text = current_output
|
|
307
|
-
resp = dict(text=delta)
|
|
308
|
-
yield resp if raw else resp
|
|
322
|
+
"""Handle streaming responses using polling"""
|
|
323
|
+
full_text = ""
|
|
324
|
+
for response_chunk in self._poll_for_response(self.timeout):
|
|
325
|
+
if response_chunk["text"]:
|
|
326
|
+
full_text += response_chunk["text"]
|
|
327
|
+
yield dict(text=response_chunk["text"]) if raw else dict(text=response_chunk["text"])
|
|
309
328
|
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
answer = self.last_answer
|
|
313
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
329
|
+
if response_chunk["final"]:
|
|
330
|
+
self.conversation.update_chat_history(prompt, full_text)
|
|
314
331
|
return
|
|
315
|
-
|
|
316
|
-
time.sleep(0.01)
|
|
317
332
|
|
|
318
333
|
def for_non_stream():
|
|
319
|
-
"""Handle non-streaming responses"""
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
if self.last_answer and self.last_answer.get('final', False):
|
|
325
|
-
answer = self.last_answer
|
|
326
|
-
self.conversation.update_chat_history(prompt, answer['output'])
|
|
327
|
-
return answer if raw else dict(text=answer['output'])
|
|
334
|
+
"""Handle non-streaming responses using polling"""
|
|
335
|
+
full_text = ""
|
|
336
|
+
for response_chunk in self._poll_for_response(self.timeout):
|
|
337
|
+
if response_chunk["text"]:
|
|
338
|
+
full_text += response_chunk["text"]
|
|
328
339
|
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
340
|
+
if response_chunk["final"]:
|
|
341
|
+
self.conversation.update_chat_history(prompt, full_text)
|
|
342
|
+
return dict(text=full_text) if raw else dict(text=full_text)
|
|
343
|
+
|
|
344
|
+
# If we get here, no final response was received
|
|
345
|
+
if full_text:
|
|
346
|
+
self.conversation.update_chat_history(prompt, full_text)
|
|
347
|
+
return dict(text=full_text) if raw else dict(text=full_text)
|
|
348
|
+
else:
|
|
349
|
+
raise exceptions.FailedToGenerateResponseError("No response received")
|
|
334
350
|
|
|
335
351
|
return for_stream() if stream else for_non_stream()
|
|
336
352
|
|
|
@@ -396,7 +412,7 @@ if __name__ == "__main__":
|
|
|
396
412
|
|
|
397
413
|
for model in PerplexityLabs.AVAILABLE_MODELS:
|
|
398
414
|
try:
|
|
399
|
-
test_ai = PerplexityLabs(model=model, timeout=
|
|
415
|
+
test_ai = PerplexityLabs(model=model, timeout=30, connection_timeout=5.0)
|
|
400
416
|
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
401
417
|
response_text = ""
|
|
402
418
|
for chunk in response:
|
|
@@ -412,4 +428,4 @@ if __name__ == "__main__":
|
|
|
412
428
|
display_text = "Empty or invalid response"
|
|
413
429
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
414
430
|
except Exception as e:
|
|
415
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
431
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)[:80]}")
|