webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/Hunyuan.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
import os
|
|
4
5
|
from typing import Any, Dict, Optional, Generator, Union
|
|
@@ -26,7 +27,7 @@ class Hunyuan(Provider):
|
|
|
26
27
|
def __init__(
|
|
27
28
|
self,
|
|
28
29
|
is_conversation: bool = True,
|
|
29
|
-
max_tokens: int = 2048,
|
|
30
|
+
max_tokens: int = 2048, # Note: max_tokens is not used by this API
|
|
30
31
|
timeout: int = 30,
|
|
31
32
|
intro: str = None,
|
|
32
33
|
filepath: str = None,
|
|
@@ -35,7 +36,7 @@ class Hunyuan(Provider):
|
|
|
35
36
|
history_offset: int = 10250,
|
|
36
37
|
act: str = None,
|
|
37
38
|
model: str = "hunyuan-t1-latest",
|
|
38
|
-
browser: str = "chrome",
|
|
39
|
+
browser: str = "chrome", # Note: browser fingerprinting might be less effective with impersonate
|
|
39
40
|
api_key: str = None,
|
|
40
41
|
system_prompt: str = "You are a helpful assistant.",
|
|
41
42
|
):
|
|
@@ -46,28 +47,23 @@ class Hunyuan(Provider):
|
|
|
46
47
|
|
|
47
48
|
self.url = "https://llm.hunyuan.tencent.com/aide/api/v2/triton_image/demo_text_chat/"
|
|
48
49
|
|
|
49
|
-
# Initialize LitAgent for
|
|
50
|
+
# Initialize LitAgent (keep if needed for other headers or logic)
|
|
50
51
|
self.agent = LitAgent()
|
|
51
|
-
#
|
|
52
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
52
|
+
# Fingerprint generation might be less relevant with impersonate
|
|
53
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
53
54
|
|
|
54
|
-
# Use the fingerprint for headers
|
|
55
|
+
# Use the fingerprint for headers (keep relevant ones)
|
|
55
56
|
self.headers = {
|
|
56
57
|
"Accept": "*/*",
|
|
57
|
-
"Accept-
|
|
58
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
58
|
+
"Accept-Language": self.fingerprint["accept_language"], # Keep Accept-Language
|
|
59
59
|
"Content-Type": "application/json",
|
|
60
|
-
"DNT": "1",
|
|
61
|
-
"Origin": "https://llm.hunyuan.tencent.com",
|
|
62
|
-
"Referer": "https://llm.hunyuan.tencent.com/",
|
|
63
|
-
"Sec-
|
|
64
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
65
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
66
|
-
"Sec-Fetch-Dest": "empty",
|
|
60
|
+
"DNT": "1", # Keep DNT
|
|
61
|
+
"Origin": "https://llm.hunyuan.tencent.com", # Keep Origin
|
|
62
|
+
"Referer": "https://llm.hunyuan.tencent.com/", # Keep Referer
|
|
63
|
+
"Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-*
|
|
67
64
|
"Sec-Fetch-Mode": "cors",
|
|
68
65
|
"Sec-Fetch-Site": "same-origin",
|
|
69
|
-
"Sec-GPC": "1",
|
|
70
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
66
|
+
"Sec-GPC": "1", # Keep Sec-GPC
|
|
71
67
|
}
|
|
72
68
|
|
|
73
69
|
# Add authorization if API key is provided
|
|
@@ -77,9 +73,11 @@ class Hunyuan(Provider):
|
|
|
77
73
|
# Default test key (may not work long-term)
|
|
78
74
|
self.headers["Authorization"] = "Bearer 7auGXNATFSKl7dF"
|
|
79
75
|
|
|
80
|
-
|
|
76
|
+
# Initialize curl_cffi Session
|
|
77
|
+
self.session = Session()
|
|
78
|
+
# Update curl_cffi session headers and proxies
|
|
81
79
|
self.session.headers.update(self.headers)
|
|
82
|
-
self.session.proxies
|
|
80
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
83
81
|
self.system_message = system_prompt
|
|
84
82
|
self.is_conversation = is_conversation
|
|
85
83
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -115,24 +113,20 @@ class Hunyuan(Provider):
|
|
|
115
113
|
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
116
114
|
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
117
115
|
|
|
118
|
-
# Update headers with new fingerprint
|
|
116
|
+
# Update headers with new fingerprint (only relevant ones)
|
|
119
117
|
self.headers.update({
|
|
120
118
|
"Accept-Language": self.fingerprint["accept_language"],
|
|
121
|
-
"Sec-CH-UA": f'"{self.fingerprint["sec_ch_ua"]}"' or self.headers["Sec-CH-UA"],
|
|
122
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
123
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
124
119
|
})
|
|
125
120
|
|
|
126
121
|
# Update session headers
|
|
127
|
-
|
|
128
|
-
self.session.headers[header] = value
|
|
122
|
+
self.session.headers.update(self.headers) # Update only relevant headers
|
|
129
123
|
|
|
130
124
|
return self.fingerprint
|
|
131
125
|
|
|
132
126
|
def ask(
|
|
133
127
|
self,
|
|
134
128
|
prompt: str,
|
|
135
|
-
stream: bool = False,
|
|
129
|
+
stream: bool = False, # API supports streaming
|
|
136
130
|
raw: bool = False,
|
|
137
131
|
optimizer: str = None,
|
|
138
132
|
conversationally: bool = False,
|
|
@@ -140,9 +134,7 @@ class Hunyuan(Provider):
|
|
|
140
134
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
141
135
|
if optimizer:
|
|
142
136
|
if optimizer in self.__available_optimizers:
|
|
143
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
144
|
-
conversation_prompt if conversationally else prompt
|
|
145
|
-
)
|
|
137
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
146
138
|
else:
|
|
147
139
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
148
140
|
|
|
@@ -152,7 +144,7 @@ class Hunyuan(Provider):
|
|
|
152
144
|
|
|
153
145
|
# Payload construction
|
|
154
146
|
payload = {
|
|
155
|
-
"stream": stream
|
|
147
|
+
"stream": True, # API seems to require stream=True based on response format
|
|
156
148
|
"model": self.model,
|
|
157
149
|
"query_id": query_id,
|
|
158
150
|
"messages": [
|
|
@@ -164,66 +156,74 @@ class Hunyuan(Provider):
|
|
|
164
156
|
}
|
|
165
157
|
|
|
166
158
|
def for_stream():
|
|
159
|
+
streaming_text = "" # Initialize outside try block
|
|
167
160
|
try:
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
161
|
+
# Use curl_cffi session post with impersonate
|
|
162
|
+
response = self.session.post(
|
|
163
|
+
self.url,
|
|
164
|
+
data=json.dumps(payload),
|
|
165
|
+
stream=True,
|
|
166
|
+
timeout=self.timeout,
|
|
167
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
168
|
+
)
|
|
169
|
+
response.raise_for_status() # Check for HTTP errors
|
|
173
170
|
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
171
|
+
# Iterate over bytes and decode manually
|
|
172
|
+
for line_bytes in response.iter_lines():
|
|
173
|
+
if line_bytes:
|
|
174
|
+
try:
|
|
175
|
+
line = line_bytes.decode('utf-8').strip()
|
|
178
176
|
if line.startswith("data: "):
|
|
179
177
|
json_str = line[6:]
|
|
180
178
|
if json_str == "[DONE]":
|
|
181
179
|
break
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
180
|
+
json_data = json.loads(json_str)
|
|
181
|
+
if 'choices' in json_data:
|
|
182
|
+
choice = json_data['choices'][0]
|
|
183
|
+
if 'delta' in choice and 'content' in choice['delta']:
|
|
184
|
+
content = choice['delta']['content']
|
|
185
|
+
if content: # Ensure content is not None or empty
|
|
188
186
|
streaming_text += content
|
|
189
187
|
resp = dict(text=content)
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
188
|
+
# Yield dict or raw string chunk
|
|
189
|
+
yield resp if not raw else content
|
|
190
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
191
|
+
continue # Ignore lines that are not valid JSON or cannot be decoded
|
|
193
192
|
|
|
194
|
-
|
|
195
|
-
|
|
193
|
+
# Update history after stream finishes
|
|
194
|
+
self.last_response = {"text": streaming_text}
|
|
195
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
196
196
|
|
|
197
|
-
except
|
|
198
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
197
|
+
except CurlError as e: # Catch CurlError
|
|
198
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
199
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
200
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
201
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
202
|
+
|
|
199
203
|
|
|
200
204
|
def for_non_stream():
|
|
205
|
+
# Aggregate the stream using the updated for_stream logic
|
|
206
|
+
full_text = ""
|
|
201
207
|
try:
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
full_text = ""
|
|
210
|
-
for line in response.text.split('\n'):
|
|
211
|
-
if line.startswith("data: ") and line[6:] != "[DONE]":
|
|
212
|
-
try:
|
|
213
|
-
json_data = json.loads(line[6:])
|
|
214
|
-
if 'choices' in json_data:
|
|
215
|
-
choice = json_data['choices'][0]
|
|
216
|
-
if 'delta' in choice and 'content' in choice['delta']:
|
|
217
|
-
full_text += choice['delta']['content']
|
|
218
|
-
except json.JSONDecodeError:
|
|
219
|
-
continue
|
|
220
|
-
|
|
221
|
-
self.last_response = {"text": full_text}
|
|
222
|
-
self.conversation.update_chat_history(prompt, full_text)
|
|
223
|
-
return {"text": full_text}
|
|
208
|
+
# Ensure raw=False so for_stream yields dicts
|
|
209
|
+
for chunk_data in for_stream():
|
|
210
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
211
|
+
full_text += chunk_data["text"]
|
|
212
|
+
# Handle raw string case if raw=True was passed
|
|
213
|
+
elif raw and isinstance(chunk_data, str):
|
|
214
|
+
full_text += chunk_data
|
|
224
215
|
except Exception as e:
|
|
225
|
-
|
|
216
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
217
|
+
if not full_text:
|
|
218
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
226
219
|
|
|
220
|
+
# last_response and history are updated within for_stream
|
|
221
|
+
# Return the final aggregated response dict or raw string
|
|
222
|
+
return full_text if raw else self.last_response
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
# Since the API endpoint suggests streaming, always call the stream generator.
|
|
226
|
+
# The non-stream wrapper will handle aggregation if stream=False.
|
|
227
227
|
return for_stream() if stream else for_non_stream()
|
|
228
228
|
|
|
229
229
|
def chat(
|
|
@@ -233,20 +233,31 @@ class Hunyuan(Provider):
|
|
|
233
233
|
optimizer: str = None,
|
|
234
234
|
conversationally: bool = False,
|
|
235
235
|
) -> Union[str, Generator[str, None, None]]:
|
|
236
|
-
def
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
236
|
+
def for_stream_chat():
|
|
237
|
+
# ask() yields dicts or strings when streaming
|
|
238
|
+
gen = self.ask(
|
|
239
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
240
|
+
optimizer=optimizer, conversationally=conversationally
|
|
242
241
|
)
|
|
243
|
-
|
|
242
|
+
for response_dict in gen:
|
|
243
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
244
|
+
|
|
245
|
+
def for_non_stream_chat():
|
|
246
|
+
# ask() returns dict or str when not streaming
|
|
247
|
+
response_data = self.ask(
|
|
248
|
+
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
249
|
+
optimizer=optimizer, conversationally=conversationally
|
|
250
|
+
)
|
|
251
|
+
return self.get_message(response_data) # get_message expects dict
|
|
252
|
+
|
|
253
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
244
254
|
|
|
245
255
|
def get_message(self, response: dict) -> str:
|
|
246
256
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
247
257
|
return response["text"]
|
|
248
258
|
|
|
249
259
|
if __name__ == "__main__":
|
|
260
|
+
# Ensure curl_cffi is installed
|
|
250
261
|
print("-" * 80)
|
|
251
262
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
252
263
|
print("-" * 80)
|
webscout/Provider/Jadve.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
import re
|
|
4
5
|
from typing import Union, Any, Dict, Optional, Generator
|
|
5
6
|
|
|
6
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
7
8
|
from webscout.AIbase import Provider
|
|
8
9
|
from webscout import exceptions
|
|
9
10
|
from webscout.litagent import LitAgent
|
|
@@ -27,7 +28,7 @@ class JadveOpenAI(Provider):
|
|
|
27
28
|
history_offset: int = 10250,
|
|
28
29
|
act: str = None,
|
|
29
30
|
model: str = "gpt-4o-mini",
|
|
30
|
-
system_prompt: str = "You are a helpful AI assistant."
|
|
31
|
+
system_prompt: str = "You are a helpful AI assistant." # Note: system_prompt is not used by this API
|
|
31
32
|
):
|
|
32
33
|
"""
|
|
33
34
|
Initializes the JadveOpenAI client.
|
|
@@ -48,7 +49,8 @@ class JadveOpenAI(Provider):
|
|
|
48
49
|
if model not in self.AVAILABLE_MODELS:
|
|
49
50
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
50
51
|
|
|
51
|
-
|
|
52
|
+
# Initialize curl_cffi Session
|
|
53
|
+
self.session = Session()
|
|
52
54
|
self.is_conversation = is_conversation
|
|
53
55
|
self.max_tokens_to_sample = max_tokens
|
|
54
56
|
self.api_endpoint = "https://openai.jadve.com/stream"
|
|
@@ -61,24 +63,21 @@ class JadveOpenAI(Provider):
|
|
|
61
63
|
# Headers for API requests
|
|
62
64
|
self.headers = {
|
|
63
65
|
"accept": "*/*",
|
|
64
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
65
66
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
66
67
|
"content-type": "application/json",
|
|
67
68
|
"dnt": "1",
|
|
68
69
|
"origin": "https://jadve.com",
|
|
69
|
-
"priority": "u=1, i",
|
|
70
|
+
"priority": "u=1, i", # Keep priority header if needed
|
|
70
71
|
"referer": "https://jadve.com/",
|
|
71
|
-
"sec-ch-ua": '"Not(A:Brand";v="99", "Microsoft Edge";v="133", "Chromium";v="133"',
|
|
72
|
-
"sec-ch-ua-mobile": "?0",
|
|
73
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
74
72
|
"sec-fetch-dest": "empty",
|
|
75
73
|
"sec-fetch-mode": "cors",
|
|
76
74
|
"sec-fetch-site": "same-site",
|
|
77
|
-
"
|
|
78
|
-
"x-authorization": "Bearer"
|
|
75
|
+
"x-authorization": "Bearer" # Keep custom headers
|
|
79
76
|
}
|
|
77
|
+
|
|
78
|
+
# Update curl_cffi session headers and proxies
|
|
80
79
|
self.session.headers.update(self.headers)
|
|
81
|
-
self.session.proxies = proxies
|
|
80
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
82
81
|
|
|
83
82
|
self.__available_optimizers = (
|
|
84
83
|
method for method in dir(Optimizers)
|
|
@@ -98,10 +97,21 @@ class JadveOpenAI(Provider):
|
|
|
98
97
|
)
|
|
99
98
|
self.conversation.history_offset = history_offset
|
|
100
99
|
|
|
100
|
+
@staticmethod
|
|
101
|
+
def _jadve_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
102
|
+
"""Extracts content from the Jadve stream format '0:"..."'."""
|
|
103
|
+
if isinstance(chunk, str):
|
|
104
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
105
|
+
if match:
|
|
106
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
107
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
108
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
109
|
+
return None
|
|
110
|
+
|
|
101
111
|
def ask(
|
|
102
112
|
self,
|
|
103
113
|
prompt: str,
|
|
104
|
-
stream: bool = False,
|
|
114
|
+
stream: bool = False, # API supports streaming
|
|
105
115
|
raw: bool = False,
|
|
106
116
|
optimizer: str = None,
|
|
107
117
|
conversationally: bool = False,
|
|
@@ -121,83 +131,90 @@ class JadveOpenAI(Provider):
|
|
|
121
131
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
122
132
|
if optimizer:
|
|
123
133
|
if optimizer in self.__available_optimizers:
|
|
124
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
125
|
-
conversation_prompt if conversationally else prompt
|
|
126
|
-
)
|
|
134
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
127
135
|
else:
|
|
128
|
-
raise Exception(
|
|
129
|
-
f"Optimizer is not one of {list(self.__available_optimizers)}"
|
|
130
|
-
)
|
|
136
|
+
raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
|
|
131
137
|
|
|
132
138
|
payload = {
|
|
133
139
|
"messages": [
|
|
140
|
+
{"role": "system", "content": self.system_prompt},
|
|
134
141
|
{"role": "user", "content": [{"type": "text", "text": conversation_prompt}]}
|
|
135
142
|
],
|
|
136
143
|
"model": self.model,
|
|
137
144
|
"botId": "",
|
|
138
145
|
"chatId": "",
|
|
139
|
-
"stream":
|
|
146
|
+
"stream": True, # API endpoint suggests streaming is default/required
|
|
140
147
|
"temperature": 0.7,
|
|
141
148
|
"returnTokensUsage": True,
|
|
142
149
|
"useTools": False
|
|
143
150
|
}
|
|
144
151
|
|
|
145
152
|
def for_stream():
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
153
|
+
full_response_text = "" # Initialize outside try block
|
|
154
|
+
try:
|
|
155
|
+
# Use curl_cffi session post with impersonate
|
|
156
|
+
response = self.session.post(
|
|
157
|
+
self.api_endpoint,
|
|
158
|
+
# headers are set on the session
|
|
159
|
+
json=payload,
|
|
160
|
+
stream=True,
|
|
161
|
+
timeout=self.timeout,
|
|
162
|
+
# proxies are set on the session
|
|
163
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
164
|
+
)
|
|
165
|
+
response.raise_for_status() # Check for HTTP errors
|
|
149
166
|
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
167
|
+
# Use sanitize_stream
|
|
168
|
+
processed_stream = sanitize_stream(
|
|
169
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
170
|
+
intro_value=None, # No simple prefix
|
|
171
|
+
to_json=False, # Content is text after extraction
|
|
172
|
+
content_extractor=self._jadve_extractor, # Use the specific extractor
|
|
173
|
+
# end_marker="e:", # Add if 'e:' reliably marks the end
|
|
174
|
+
yield_raw_on_error=True
|
|
153
175
|
)
|
|
154
176
|
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
full_response_text += chunk
|
|
173
|
-
# Return the current chunk
|
|
174
|
-
yield chunk if raw else dict(text=chunk)
|
|
175
|
-
|
|
176
|
-
# Remove matched parts from the buffer
|
|
177
|
-
matched_parts = [f'0:"{match}"' for match in matches]
|
|
178
|
-
for part in matched_parts:
|
|
179
|
-
buffer = buffer.replace(part, '', 1)
|
|
180
|
-
|
|
181
|
-
# Check if we've reached the end of the response
|
|
182
|
-
if 'e:' in line or 'd:' in line:
|
|
183
|
-
# No need to process usage data without logging
|
|
184
|
-
break
|
|
185
|
-
|
|
186
|
-
self.last_response.update(dict(text=full_response_text))
|
|
187
|
-
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
177
|
+
for content_chunk in processed_stream:
|
|
178
|
+
# content_chunk is the string extracted by _jadve_extractor
|
|
179
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
180
|
+
full_response_text += content_chunk
|
|
181
|
+
resp = {"text": content_chunk}
|
|
182
|
+
yield resp if not raw else content_chunk
|
|
183
|
+
|
|
184
|
+
# Update history after stream finishes
|
|
185
|
+
self.last_response = {"text": full_response_text}
|
|
186
|
+
self.conversation.update_chat_history(prompt, full_response_text)
|
|
187
|
+
|
|
188
|
+
except CurlError as e: # Catch CurlError
|
|
189
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
190
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
191
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
192
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
|
|
193
|
+
|
|
188
194
|
|
|
189
195
|
def for_non_stream():
|
|
190
|
-
#
|
|
196
|
+
# Aggregate the stream using the updated for_stream logic
|
|
191
197
|
collected_text = ""
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
198
|
+
try:
|
|
199
|
+
# Ensure raw=False so for_stream yields dicts
|
|
200
|
+
for chunk_data in for_stream():
|
|
201
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
202
|
+
collected_text += chunk_data["text"]
|
|
203
|
+
# Handle raw string case if raw=True was passed
|
|
204
|
+
elif raw and isinstance(chunk_data, str):
|
|
205
|
+
collected_text += chunk_data
|
|
206
|
+
except Exception as e:
|
|
207
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
208
|
+
if not collected_text:
|
|
209
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
210
|
+
|
|
211
|
+
# last_response and history are updated within for_stream
|
|
212
|
+
# Return the final aggregated response dict or raw string
|
|
213
|
+
return collected_text if raw else self.last_response
|
|
214
|
+
|
|
200
215
|
|
|
216
|
+
# Since the API endpoint suggests streaming, always call the stream generator.
|
|
217
|
+
# The non-stream wrapper will handle aggregation if stream=False.
|
|
201
218
|
return for_stream() if stream else for_non_stream()
|
|
202
219
|
|
|
203
220
|
def chat(
|
|
@@ -208,7 +225,7 @@ class JadveOpenAI(Provider):
|
|
|
208
225
|
conversationally: bool = False,
|
|
209
226
|
) -> Union[str, Generator[str, None, None]]:
|
|
210
227
|
"""
|
|
211
|
-
Generate a chat response (string).
|
|
228
|
+
Generate a chat response (string).
|
|
212
229
|
|
|
213
230
|
Args:
|
|
214
231
|
prompt (str): Prompt to be sent.
|
|
@@ -218,18 +235,24 @@ class JadveOpenAI(Provider):
|
|
|
218
235
|
Returns:
|
|
219
236
|
str or generator: Generated response string or generator yielding response chunks.
|
|
220
237
|
"""
|
|
221
|
-
def
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
238
|
+
def for_stream_chat():
|
|
239
|
+
# ask() yields dicts or strings when streaming
|
|
240
|
+
gen = self.ask(
|
|
241
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
242
|
+
optimizer=optimizer, conversationally=conversationally
|
|
243
|
+
)
|
|
244
|
+
for response_dict in gen:
|
|
245
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
226
246
|
|
|
227
|
-
def
|
|
228
|
-
|
|
229
|
-
|
|
247
|
+
def for_non_stream_chat():
|
|
248
|
+
# ask() returns dict or str when not streaming
|
|
249
|
+
response_data = self.ask(
|
|
250
|
+
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
251
|
+
optimizer=optimizer, conversationally=conversationally
|
|
230
252
|
)
|
|
253
|
+
return self.get_message(response_data) # get_message expects dict
|
|
231
254
|
|
|
232
|
-
return
|
|
255
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
233
256
|
|
|
234
257
|
def get_message(self, response: dict) -> str:
|
|
235
258
|
"""
|
|
@@ -241,9 +264,11 @@ class JadveOpenAI(Provider):
|
|
|
241
264
|
str: Extracted text.
|
|
242
265
|
"""
|
|
243
266
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
244
|
-
|
|
267
|
+
# Extractor handles formatting
|
|
268
|
+
return response.get("text", "")
|
|
245
269
|
|
|
246
270
|
if __name__ == "__main__":
|
|
271
|
+
# Ensure curl_cffi is installed
|
|
247
272
|
print("-" * 80)
|
|
248
273
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
249
274
|
print("-" * 80)
|