webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/Cloudflare.py
CHANGED
|
@@ -1,13 +1,15 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from uuid import uuid4
|
|
3
|
-
|
|
3
|
+
|
|
4
|
+
import re # Import re
|
|
5
|
+
from curl_cffi import CurlError
|
|
4
6
|
from webscout.AIutel import Optimizers
|
|
5
7
|
from webscout.AIutel import Conversation
|
|
6
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
7
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
8
10
|
from webscout import exceptions
|
|
9
|
-
from typing import Union, Any, AsyncGenerator, Dict
|
|
10
|
-
import
|
|
11
|
+
from typing import Optional, Union, Any, AsyncGenerator, Dict
|
|
12
|
+
from curl_cffi.requests import Session
|
|
11
13
|
from webscout.litagent import LitAgent
|
|
12
14
|
|
|
13
15
|
class Cloudflare(Provider):
|
|
@@ -96,7 +98,7 @@ class Cloudflare(Provider):
|
|
|
96
98
|
if model not in self.AVAILABLE_MODELS:
|
|
97
99
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
98
100
|
|
|
99
|
-
self.
|
|
101
|
+
self.session = Session() # Use curl_cffi Session
|
|
100
102
|
self.is_conversation = is_conversation
|
|
101
103
|
self.max_tokens_to_sample = max_tokens
|
|
102
104
|
self.chat_endpoint = "https://playground.ai.cloudflare.com/api/inference"
|
|
@@ -136,7 +138,7 @@ class Cloudflare(Provider):
|
|
|
136
138
|
)
|
|
137
139
|
|
|
138
140
|
# Initialize session and apply proxies
|
|
139
|
-
self.session = cloudscraper.create_scraper()
|
|
141
|
+
# self.session = cloudscraper.create_scraper() # Replaced above
|
|
140
142
|
self.session.headers.update(self.headers)
|
|
141
143
|
self.session.proxies = proxies
|
|
142
144
|
|
|
@@ -156,6 +158,19 @@ class Cloudflare(Provider):
|
|
|
156
158
|
# if self.logger:
|
|
157
159
|
# self.logger.info("Cloudflare initialized successfully")
|
|
158
160
|
|
|
161
|
+
@staticmethod
|
|
162
|
+
def _cloudflare_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
163
|
+
"""Extracts content from Cloudflare stream JSON objects."""
|
|
164
|
+
# Updated for the 0:"..." format
|
|
165
|
+
if isinstance(chunk, str):
|
|
166
|
+
# Use re.search to find the pattern 0:"<content>"
|
|
167
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
168
|
+
if match:
|
|
169
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
170
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
171
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
172
|
+
return None
|
|
173
|
+
|
|
159
174
|
def ask(
|
|
160
175
|
self,
|
|
161
176
|
prompt: str,
|
|
@@ -201,37 +216,69 @@ class Cloudflare(Provider):
|
|
|
201
216
|
|
|
202
217
|
def for_stream():
|
|
203
218
|
# if self.logger:
|
|
204
|
-
# self.logger.debug("Sending streaming request to Cloudflare API...")
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
219
|
+
# self.logger.debug("Sending streaming request to Cloudflare API...")
|
|
220
|
+
streaming_text = "" # Initialize outside try block
|
|
221
|
+
try:
|
|
222
|
+
response = self.session.post(
|
|
223
|
+
self.chat_endpoint,
|
|
224
|
+
headers=self.headers,
|
|
225
|
+
cookies=self.cookies,
|
|
226
|
+
data=json.dumps(payload),
|
|
227
|
+
stream=True,
|
|
228
|
+
timeout=self.timeout,
|
|
229
|
+
impersonate="chrome120" # Add impersonate
|
|
230
|
+
)
|
|
231
|
+
response.raise_for_status()
|
|
232
|
+
|
|
233
|
+
# Use sanitize_stream
|
|
234
|
+
processed_stream = sanitize_stream(
|
|
235
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
236
|
+
intro_value=None,
|
|
237
|
+
to_json=False,
|
|
238
|
+
skip_markers=None,
|
|
239
|
+
content_extractor=self._cloudflare_extractor, # Use the specific extractor
|
|
240
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
218
241
|
)
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
242
|
+
|
|
243
|
+
for content_chunk in processed_stream:
|
|
244
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
245
|
+
streaming_text += content_chunk
|
|
246
|
+
yield content_chunk if raw else dict(text=content_chunk)
|
|
247
|
+
|
|
248
|
+
except CurlError as e:
|
|
249
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
250
|
+
except Exception as e:
|
|
251
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
|
|
252
|
+
finally:
|
|
253
|
+
# Update history after stream finishes or fails
|
|
254
|
+
self.last_response.update(dict(text=streaming_text))
|
|
255
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
256
|
+
|
|
257
|
+
def for_non_stream():
|
|
258
|
+
# Aggregate the stream using the updated for_stream logic
|
|
259
|
+
full_text = ""
|
|
260
|
+
last_response_dict = {}
|
|
227
261
|
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
228
262
|
# if self.logger:
|
|
229
263
|
# self.logger.info("Streaming response completed successfully")
|
|
264
|
+
try:
|
|
265
|
+
# Ensure raw=False so for_stream yields dicts
|
|
266
|
+
for chunk_data in for_stream():
|
|
267
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
268
|
+
full_text += chunk_data["text"]
|
|
269
|
+
last_response_dict = {"text": full_text} # Keep track of last dict structure
|
|
270
|
+
# Handle raw string case if raw=True was passed
|
|
271
|
+
elif raw and isinstance(chunk_data, str):
|
|
272
|
+
full_text += chunk_data
|
|
273
|
+
last_response_dict = {"text": full_text} # Update dict even for raw
|
|
274
|
+
except Exception as e:
|
|
275
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
276
|
+
if not full_text:
|
|
277
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
230
278
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
return self.last_response
|
|
279
|
+
# last_response and history are updated within for_stream's finally block
|
|
280
|
+
# Return the final aggregated response dict or raw text
|
|
281
|
+
return full_text if raw else last_response_dict
|
|
235
282
|
|
|
236
283
|
return for_stream() if stream else for_non_stream()
|
|
237
284
|
|
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
import os
|
|
4
|
-
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
+
from typing import Any, Dict, Optional, Generator, Union, List
|
|
5
6
|
|
|
6
7
|
from webscout.AIutel import Optimizers
|
|
7
8
|
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
10
|
from webscout.AIbase import Provider, AsyncProvider
|
|
10
11
|
from webscout import exceptions
|
|
11
12
|
from webscout.litagent import LitAgent
|
|
@@ -65,14 +66,24 @@ class DeepInfra(Provider):
|
|
|
65
66
|
# "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
|
|
66
67
|
"Qwen/Qwen2.5-72B-Instruct",
|
|
67
68
|
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
69
|
+
"Qwen/Qwen3-14B",
|
|
70
|
+
"Qwen/Qwen3-30B-A3B",
|
|
71
|
+
"Qwen/Qwen3-32B",
|
|
68
72
|
# "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
|
|
69
73
|
# "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
|
|
70
74
|
]
|
|
71
75
|
|
|
76
|
+
@staticmethod
|
|
77
|
+
def _deepinfra_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
78
|
+
"""Extracts content from DeepInfra stream JSON objects."""
|
|
79
|
+
if isinstance(chunk, dict):
|
|
80
|
+
return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
|
|
81
|
+
return None
|
|
82
|
+
|
|
72
83
|
def __init__(
|
|
73
84
|
self,
|
|
74
85
|
is_conversation: bool = True,
|
|
75
|
-
max_tokens: int = 2049,
|
|
86
|
+
max_tokens: int = 2049,
|
|
76
87
|
timeout: int = 30,
|
|
77
88
|
intro: str = None,
|
|
78
89
|
filepath: str = None,
|
|
@@ -82,43 +93,39 @@ class DeepInfra(Provider):
|
|
|
82
93
|
act: str = None,
|
|
83
94
|
model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
84
95
|
system_prompt: str = "You are a helpful assistant.",
|
|
85
|
-
browser: str = "chrome"
|
|
96
|
+
browser: str = "chrome" # Note: browser fingerprinting might be less effective with impersonate
|
|
86
97
|
):
|
|
87
98
|
"""Initializes the DeepInfra API client."""
|
|
88
99
|
if model not in self.AVAILABLE_MODELS:
|
|
89
100
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
90
|
-
|
|
101
|
+
|
|
91
102
|
self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
92
|
-
|
|
93
|
-
# Initialize LitAgent for
|
|
103
|
+
|
|
104
|
+
# Initialize LitAgent (keep if needed for other headers or logic)
|
|
94
105
|
self.agent = LitAgent()
|
|
95
|
-
#
|
|
106
|
+
# Fingerprint generation might be less relevant with impersonate
|
|
96
107
|
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
97
|
-
|
|
98
|
-
# Use the fingerprint for headers
|
|
108
|
+
|
|
109
|
+
# Use the fingerprint for headers (keep relevant ones)
|
|
99
110
|
self.headers = {
|
|
100
|
-
"Accept": self.fingerprint["accept"],
|
|
101
|
-
"Accept-
|
|
102
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
111
|
+
"Accept": self.fingerprint["accept"], # Keep Accept
|
|
112
|
+
"Accept-Language": self.fingerprint["accept_language"], # Keep Accept-Language
|
|
103
113
|
"Content-Type": "application/json",
|
|
104
|
-
"Cache-Control": "no-cache",
|
|
105
|
-
"
|
|
106
|
-
"
|
|
107
|
-
"
|
|
108
|
-
"
|
|
109
|
-
"Sec-Fetch-Dest": "empty",
|
|
114
|
+
"Cache-Control": "no-cache", # Keep Cache-Control
|
|
115
|
+
"Origin": "https://deepinfra.com", # Keep Origin
|
|
116
|
+
"Pragma": "no-cache", # Keep Pragma
|
|
117
|
+
"Referer": "https://deepinfra.com/", # Keep Referer
|
|
118
|
+
"Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-*
|
|
110
119
|
"Sec-Fetch-Mode": "cors",
|
|
111
120
|
"Sec-Fetch-Site": "same-site",
|
|
112
|
-
"X-Deepinfra-Source": "web-embed",
|
|
113
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
114
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
115
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
116
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
121
|
+
"X-Deepinfra-Source": "web-embed", # Keep custom headers
|
|
117
122
|
}
|
|
118
|
-
|
|
119
|
-
|
|
123
|
+
|
|
124
|
+
# Initialize curl_cffi Session
|
|
125
|
+
self.session = Session()
|
|
126
|
+
# Update curl_cffi session headers and proxies
|
|
120
127
|
self.session.headers.update(self.headers)
|
|
121
|
-
self.session.proxies
|
|
128
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
122
129
|
self.system_prompt = system_prompt
|
|
123
130
|
self.is_conversation = is_conversation
|
|
124
131
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -147,32 +154,28 @@ class DeepInfra(Provider):
|
|
|
147
154
|
def refresh_identity(self, browser: str = None):
|
|
148
155
|
"""
|
|
149
156
|
Refreshes the browser identity fingerprint.
|
|
150
|
-
|
|
157
|
+
|
|
151
158
|
Args:
|
|
152
159
|
browser: Specific browser to use for the new fingerprint
|
|
153
160
|
"""
|
|
154
161
|
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
155
162
|
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
156
|
-
|
|
157
|
-
# Update headers with new fingerprint
|
|
163
|
+
|
|
164
|
+
# Update headers with new fingerprint (only relevant ones)
|
|
158
165
|
self.headers.update({
|
|
159
166
|
"Accept": self.fingerprint["accept"],
|
|
160
167
|
"Accept-Language": self.fingerprint["accept_language"],
|
|
161
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
|
|
162
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
163
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
164
168
|
})
|
|
165
|
-
|
|
169
|
+
|
|
166
170
|
# Update session headers
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
171
|
+
self.session.headers.update(self.headers) # Update only relevant headers
|
|
172
|
+
|
|
170
173
|
return self.fingerprint
|
|
171
174
|
|
|
172
175
|
def ask(
|
|
173
176
|
self,
|
|
174
177
|
prompt: str,
|
|
175
|
-
stream: bool = False,
|
|
178
|
+
stream: bool = False, # API supports streaming
|
|
176
179
|
raw: bool = False,
|
|
177
180
|
optimizer: str = None,
|
|
178
181
|
conversationally: bool = False,
|
|
@@ -193,61 +196,88 @@ class DeepInfra(Provider):
|
|
|
193
196
|
{"role": "system", "content": self.system_prompt},
|
|
194
197
|
{"role": "user", "content": conversation_prompt},
|
|
195
198
|
],
|
|
196
|
-
"stream": stream
|
|
199
|
+
"stream": stream # Pass stream argument to payload
|
|
197
200
|
}
|
|
198
201
|
|
|
199
202
|
def for_stream():
|
|
203
|
+
streaming_text = "" # Initialize outside try block
|
|
200
204
|
try:
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
205
|
+
# Use curl_cffi session post with impersonate
|
|
206
|
+
response = self.session.post(
|
|
207
|
+
self.url,
|
|
208
|
+
# headers are set on the session
|
|
209
|
+
data=json.dumps(payload),
|
|
210
|
+
stream=True,
|
|
211
|
+
timeout=self.timeout,
|
|
212
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
213
|
+
)
|
|
214
|
+
response.raise_for_status() # Check for HTTP errors
|
|
215
|
+
|
|
216
|
+
# Use sanitize_stream
|
|
217
|
+
processed_stream = sanitize_stream(
|
|
218
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
219
|
+
intro_value="data:",
|
|
220
|
+
to_json=True, # Stream sends JSON
|
|
221
|
+
skip_markers=["[DONE]"],
|
|
222
|
+
content_extractor=self._deepinfra_extractor, # Use the specific extractor
|
|
223
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
for content_chunk in processed_stream:
|
|
227
|
+
# content_chunk is the string extracted by _deepinfra_extractor
|
|
228
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
229
|
+
streaming_text += content_chunk
|
|
230
|
+
resp = dict(text=content_chunk)
|
|
231
|
+
yield resp if not raw else content_chunk
|
|
232
|
+
|
|
233
|
+
except CurlError as e:
|
|
234
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
235
|
+
except Exception as e:
|
|
236
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
|
|
237
|
+
finally:
|
|
238
|
+
# Update history after stream finishes or fails
|
|
239
|
+
if streaming_text:
|
|
227
240
|
self.last_response = {"text": streaming_text}
|
|
228
241
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
229
|
-
|
|
230
|
-
except requests.RequestException as e:
|
|
231
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
242
|
+
|
|
232
243
|
|
|
233
244
|
def for_non_stream():
|
|
234
245
|
try:
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
)
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
246
|
+
# Use curl_cffi session post with impersonate for non-streaming
|
|
247
|
+
response = self.session.post(
|
|
248
|
+
self.url,
|
|
249
|
+
# headers are set on the session
|
|
250
|
+
data=json.dumps(payload),
|
|
251
|
+
timeout=self.timeout,
|
|
252
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
253
|
+
)
|
|
254
|
+
response.raise_for_status() # Check for HTTP errors
|
|
255
|
+
|
|
256
|
+
response_text = response.text # Get raw text
|
|
257
|
+
|
|
258
|
+
# Use sanitize_stream to parse the non-streaming JSON response
|
|
259
|
+
processed_stream = sanitize_stream(
|
|
260
|
+
data=response_text,
|
|
261
|
+
to_json=True, # Parse the whole text as JSON
|
|
262
|
+
intro_value=None,
|
|
263
|
+
# Extractor for non-stream structure
|
|
264
|
+
content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
|
|
265
|
+
yield_raw_on_error=False
|
|
266
|
+
)
|
|
267
|
+
# Extract the single result
|
|
268
|
+
content = next(processed_stream, None)
|
|
269
|
+
content = content if isinstance(content, str) else "" # Ensure it's a string
|
|
270
|
+
|
|
271
|
+
self.last_response = {"text": content}
|
|
272
|
+
self.conversation.update_chat_history(prompt, content)
|
|
273
|
+
return self.last_response if not raw else content # Return dict or raw string
|
|
274
|
+
|
|
275
|
+
except CurlError as e: # Catch CurlError
|
|
276
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
277
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
|
|
278
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
279
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
280
|
+
|
|
251
281
|
|
|
252
282
|
return for_stream() if stream else for_non_stream()
|
|
253
283
|
|
|
@@ -258,20 +288,31 @@ class DeepInfra(Provider):
|
|
|
258
288
|
optimizer: str = None,
|
|
259
289
|
conversationally: bool = False,
|
|
260
290
|
) -> Union[str, Generator[str, None, None]]:
|
|
261
|
-
def
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
291
|
+
def for_stream_chat():
|
|
292
|
+
# ask() yields dicts or strings when streaming
|
|
293
|
+
gen = self.ask(
|
|
294
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
295
|
+
optimizer=optimizer, conversationally=conversationally
|
|
267
296
|
)
|
|
268
|
-
|
|
297
|
+
for response_dict in gen:
|
|
298
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
299
|
+
|
|
300
|
+
def for_non_stream_chat():
|
|
301
|
+
# ask() returns dict or str when not streaming
|
|
302
|
+
response_data = self.ask(
|
|
303
|
+
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
304
|
+
optimizer=optimizer, conversationally=conversationally
|
|
305
|
+
)
|
|
306
|
+
return self.get_message(response_data) # get_message expects dict
|
|
307
|
+
|
|
308
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
269
309
|
|
|
270
310
|
def get_message(self, response: dict) -> str:
|
|
271
311
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
272
312
|
return response["text"]
|
|
273
313
|
|
|
274
314
|
if __name__ == "__main__":
|
|
315
|
+
# Ensure curl_cffi is installed
|
|
275
316
|
print("-" * 80)
|
|
276
317
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
277
318
|
print("-" * 80)
|
|
@@ -283,7 +324,7 @@ if __name__ == "__main__":
|
|
|
283
324
|
response_text = ""
|
|
284
325
|
for chunk in response:
|
|
285
326
|
response_text += chunk
|
|
286
|
-
|
|
327
|
+
|
|
287
328
|
if response_text and len(response_text.strip()) > 0:
|
|
288
329
|
status = "✓"
|
|
289
330
|
# Clean and truncate response
|
|
@@ -294,4 +335,4 @@ if __name__ == "__main__":
|
|
|
294
335
|
display_text = "Empty or invalid response"
|
|
295
336
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
296
337
|
except Exception as e:
|
|
297
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
338
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|