webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
webscout/Provider/hermes.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
from typing import Union, Any, Dict, Generator, Optional
|
|
4
5
|
|
|
@@ -38,7 +39,7 @@ class NousHermes(Provider):
|
|
|
38
39
|
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
39
40
|
)
|
|
40
41
|
|
|
41
|
-
self.session =
|
|
42
|
+
self.session = Session()
|
|
42
43
|
self.is_conversation = is_conversation
|
|
43
44
|
self.max_tokens_to_sample = max_tokens
|
|
44
45
|
self.timeout = timeout
|
|
@@ -49,15 +50,14 @@ class NousHermes(Provider):
|
|
|
49
50
|
self.temperature = temperature
|
|
50
51
|
self.top_p = top_p
|
|
51
52
|
self.cookies_path = cookies_path
|
|
52
|
-
self.
|
|
53
|
+
self.cookies_dict = self._load_cookies()
|
|
54
|
+
|
|
53
55
|
self.headers = {
|
|
54
56
|
'accept': '*/*',
|
|
55
57
|
'accept-language': 'en-US,en;q=0.9',
|
|
56
58
|
'content-type': 'application/json',
|
|
57
59
|
'origin': 'https://hermes.nousresearch.com',
|
|
58
60
|
'referer': 'https://hermes.nousresearch.com/',
|
|
59
|
-
'user-agent': LitAgent().random(),
|
|
60
|
-
'cookie': self.cookies
|
|
61
61
|
}
|
|
62
62
|
|
|
63
63
|
self.__available_optimizers = (
|
|
@@ -77,20 +77,31 @@ class NousHermes(Provider):
|
|
|
77
77
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
78
78
|
)
|
|
79
79
|
self.conversation.history_offset = history_offset
|
|
80
|
+
# Update curl_cffi session headers and proxies
|
|
80
81
|
self.session.proxies = proxies
|
|
82
|
+
|
|
83
|
+
# Apply cookies to curl_cffi session
|
|
84
|
+
if self.cookies_dict:
|
|
85
|
+
for name, value in self.cookies_dict.items():
|
|
86
|
+
self.session.cookies.set(name, value, domain="hermes.nousresearch.com")
|
|
81
87
|
|
|
82
|
-
def _load_cookies(self) -> Optional[str]:
|
|
83
|
-
"""Load cookies from a JSON file and
|
|
88
|
+
def _load_cookies(self) -> Optional[Dict[str, str]]:
|
|
89
|
+
"""Load cookies from a JSON file and return them as a dictionary."""
|
|
84
90
|
try:
|
|
85
91
|
with open(self.cookies_path, 'r') as f:
|
|
86
92
|
cookies_data = json.load(f)
|
|
87
|
-
|
|
93
|
+
# Convert list of cookie objects to a dictionary
|
|
94
|
+
return {cookie['name']: cookie['value'] for cookie in cookies_data if 'name' in cookie and 'value' in cookie}
|
|
88
95
|
except FileNotFoundError:
|
|
89
|
-
print("
|
|
96
|
+
print(f"Warning: Cookies file not found at {self.cookies_path}")
|
|
90
97
|
return None
|
|
91
98
|
except json.JSONDecodeError:
|
|
92
|
-
print("
|
|
99
|
+
print(f"Warning: Invalid JSON format in cookies file at {self.cookies_path}")
|
|
93
100
|
return None
|
|
101
|
+
except Exception as e:
|
|
102
|
+
print(f"Warning: Error loading cookies: {e}")
|
|
103
|
+
return None
|
|
104
|
+
|
|
94
105
|
|
|
95
106
|
def ask(
|
|
96
107
|
self,
|
|
@@ -134,32 +145,59 @@ class NousHermes(Provider):
|
|
|
134
145
|
"top_p": self.top_p,
|
|
135
146
|
}
|
|
136
147
|
def for_stream():
|
|
137
|
-
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
|
|
138
|
-
if not response.ok:
|
|
139
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
140
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
141
|
-
)
|
|
142
148
|
full_response = ""
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
149
|
+
try:
|
|
150
|
+
response = self.session.post(
|
|
151
|
+
self.api_endpoint,
|
|
152
|
+
json=payload,
|
|
153
|
+
stream=True,
|
|
154
|
+
timeout=self.timeout,
|
|
155
|
+
impersonate="chrome110"
|
|
156
|
+
)
|
|
157
|
+
response.raise_for_status()
|
|
158
|
+
|
|
159
|
+
for line_bytes in response.iter_lines():
|
|
160
|
+
if line_bytes:
|
|
161
|
+
try:
|
|
162
|
+
decoded_line = line_bytes.decode('utf-8')
|
|
163
|
+
if decoded_line.startswith('data: '):
|
|
164
|
+
data_str = decoded_line.replace('data: ', '', 1)
|
|
165
|
+
data = json.loads(data_str)
|
|
166
|
+
if data.get('type') == 'llm_response':
|
|
167
|
+
content = data.get('content', '')
|
|
168
|
+
if content:
|
|
169
|
+
full_response += content
|
|
170
|
+
resp = dict(text=content)
|
|
171
|
+
yield resp if not raw else content
|
|
172
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
173
|
+
continue
|
|
174
|
+
|
|
175
|
+
self.last_response = dict(text=full_response)
|
|
176
|
+
self.conversation.update_chat_history(
|
|
177
|
+
prompt, full_response
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
except CurlError as e:
|
|
181
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
182
|
+
except Exception as e:
|
|
183
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
184
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
|
|
185
|
+
|
|
158
186
|
|
|
159
187
|
def for_non_stream():
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
188
|
+
collected_text = ""
|
|
189
|
+
try:
|
|
190
|
+
for chunk_data in for_stream():
|
|
191
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
192
|
+
collected_text += chunk_data["text"]
|
|
193
|
+
elif raw and isinstance(chunk_data, str):
|
|
194
|
+
collected_text += chunk_data
|
|
195
|
+
except Exception as e:
|
|
196
|
+
if not collected_text:
|
|
197
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
198
|
+
|
|
199
|
+
return collected_text if raw else self.last_response
|
|
200
|
+
|
|
163
201
|
|
|
164
202
|
return for_stream() if stream else for_non_stream()
|
|
165
203
|
|
|
@@ -180,23 +218,25 @@ class NousHermes(Provider):
|
|
|
180
218
|
str: Response generated
|
|
181
219
|
"""
|
|
182
220
|
|
|
183
|
-
def
|
|
184
|
-
|
|
185
|
-
prompt, True,
|
|
186
|
-
|
|
187
|
-
|
|
221
|
+
def for_stream_chat():
|
|
222
|
+
gen = self.ask(
|
|
223
|
+
prompt, stream=True, raw=False,
|
|
224
|
+
optimizer=optimizer, conversationally=conversationally
|
|
225
|
+
)
|
|
226
|
+
for response_dict in gen:
|
|
227
|
+
yield self.get_message(response_dict)
|
|
188
228
|
|
|
189
|
-
def
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
)
|
|
229
|
+
def for_non_stream_chat():
|
|
230
|
+
response_data = self.ask(
|
|
231
|
+
prompt,
|
|
232
|
+
stream=False,
|
|
233
|
+
raw=False,
|
|
234
|
+
optimizer=optimizer,
|
|
235
|
+
conversationally=conversationally,
|
|
197
236
|
)
|
|
237
|
+
return self.get_message(response_data)
|
|
198
238
|
|
|
199
|
-
return
|
|
239
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
200
240
|
|
|
201
241
|
def get_message(self, response: dict) -> str:
|
|
202
242
|
"""Retrieves message only from response
|
webscout/Provider/koala.py
CHANGED
webscout/Provider/learnfastai.py
CHANGED
|
@@ -2,8 +2,8 @@ import os
|
|
|
2
2
|
import json
|
|
3
3
|
from typing import Optional, Union, Generator
|
|
4
4
|
import uuid
|
|
5
|
-
import
|
|
6
|
-
import
|
|
5
|
+
from curl_cffi.requests import Session
|
|
6
|
+
from curl_cffi import CurlError
|
|
7
7
|
|
|
8
8
|
from webscout.AIutel import Optimizers
|
|
9
9
|
from webscout.AIutel import Conversation
|
|
@@ -20,7 +20,7 @@ class LearnFast(Provider):
|
|
|
20
20
|
def __init__(
|
|
21
21
|
self,
|
|
22
22
|
is_conversation: bool = True,
|
|
23
|
-
max_tokens: int = 600,
|
|
23
|
+
max_tokens: int = 600, # Note: max_tokens is not used by this API
|
|
24
24
|
timeout: int = 30,
|
|
25
25
|
intro: str = None,
|
|
26
26
|
filepath: str = None,
|
|
@@ -28,12 +28,13 @@ class LearnFast(Provider):
|
|
|
28
28
|
proxies: dict = {},
|
|
29
29
|
history_offset: int = 10250,
|
|
30
30
|
act: str = None,
|
|
31
|
-
system_prompt: str = "You are a helpful AI assistant.",
|
|
31
|
+
system_prompt: str = "You are a helpful AI assistant.", # Note: system_prompt is not used by this API
|
|
32
32
|
):
|
|
33
33
|
"""
|
|
34
34
|
Initializes the LearnFast.ai API with given parameters.
|
|
35
35
|
"""
|
|
36
|
-
|
|
36
|
+
# Initialize curl_cffi Session
|
|
37
|
+
self.session = Session()
|
|
37
38
|
self.is_conversation = is_conversation
|
|
38
39
|
self.max_tokens_to_sample = max_tokens
|
|
39
40
|
self.api_endpoint = 'https://autosite.erweima.ai/api/v1/chat'
|
|
@@ -44,21 +45,17 @@ class LearnFast(Provider):
|
|
|
44
45
|
self.headers = {
|
|
45
46
|
"authority": "autosite.erweima.ai",
|
|
46
47
|
"accept": "*/*",
|
|
47
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
48
48
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
49
49
|
"authorization": "", # Always empty
|
|
50
50
|
"content-type": "application/json",
|
|
51
51
|
"dnt": "1",
|
|
52
52
|
"origin": "https://learnfast.ai",
|
|
53
|
-
"priority": "u=1, i",
|
|
53
|
+
"priority": "u=1, i", # Keep priority header if needed
|
|
54
54
|
"referer": "https://learnfast.ai/",
|
|
55
|
-
"sec-ch-ua": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
|
|
56
|
-
"sec-ch-ua-mobile": "?0",
|
|
57
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
58
55
|
"sec-fetch-dest": "empty",
|
|
59
56
|
"sec-fetch-mode": "cors",
|
|
60
57
|
"sec-fetch-site": "cross-site",
|
|
61
|
-
|
|
58
|
+
# uniqueid will be added dynamically in ask()
|
|
62
59
|
}
|
|
63
60
|
|
|
64
61
|
self.__available_optimizers = (
|
|
@@ -66,7 +63,10 @@ class LearnFast(Provider):
|
|
|
66
63
|
for method in dir(Optimizers)
|
|
67
64
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
68
65
|
)
|
|
66
|
+
# Update curl_cffi session headers and proxies
|
|
69
67
|
self.session.headers.update(self.headers)
|
|
68
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
69
|
+
|
|
70
70
|
Conversation.intro = (
|
|
71
71
|
AwesomePrompts().get_act(
|
|
72
72
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -78,7 +78,6 @@ class LearnFast(Provider):
|
|
|
78
78
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
79
79
|
)
|
|
80
80
|
self.conversation.history_offset = history_offset
|
|
81
|
-
self.session.proxies = proxies
|
|
82
81
|
|
|
83
82
|
def generate_unique_id(self) -> str:
|
|
84
83
|
"""Generate a 32-character hexadecimal unique ID."""
|
|
@@ -98,14 +97,21 @@ class LearnFast(Provider):
|
|
|
98
97
|
with open(image_path, "rb") as img_file:
|
|
99
98
|
files = {"file": img_file}
|
|
100
99
|
try:
|
|
101
|
-
response =
|
|
100
|
+
response = self.session.post(
|
|
101
|
+
"https://0x0.st",
|
|
102
|
+
files=files,
|
|
103
|
+
# Add impersonate if using the main session
|
|
104
|
+
impersonate="chrome110"
|
|
105
|
+
)
|
|
102
106
|
response.raise_for_status()
|
|
103
107
|
image_url = response.text.strip()
|
|
104
108
|
if not image_url.startswith("http"):
|
|
105
109
|
raise ValueError("Received an invalid URL from 0x0.st.")
|
|
106
110
|
return image_url
|
|
107
|
-
except
|
|
108
|
-
raise Exception(f"Failed to upload image to 0x0.st: {e}") from e
|
|
111
|
+
except CurlError as e: # Catch CurlError
|
|
112
|
+
raise Exception(f"Failed to upload image to 0x0.st (CurlError): {e}") from e
|
|
113
|
+
except Exception as e: # Catch other potential errors
|
|
114
|
+
raise Exception(f"Failed to upload image to 0x0.st: {e}") from e
|
|
109
115
|
|
|
110
116
|
def create_payload(
|
|
111
117
|
self,
|
|
@@ -135,7 +141,7 @@ class LearnFast(Provider):
|
|
|
135
141
|
def ask(
|
|
136
142
|
self,
|
|
137
143
|
prompt: str,
|
|
138
|
-
stream: bool = False,
|
|
144
|
+
stream: bool = False, # API supports streaming
|
|
139
145
|
raw: bool = False,
|
|
140
146
|
optimizer: str = None,
|
|
141
147
|
conversationally: bool = False,
|
|
@@ -170,8 +176,9 @@ class LearnFast(Provider):
|
|
|
170
176
|
unique_id = self.generate_unique_id()
|
|
171
177
|
session_id = self.generate_session_id()
|
|
172
178
|
|
|
173
|
-
# Update headers with the unique ID
|
|
174
|
-
self.headers
|
|
179
|
+
# Update headers with the unique ID for this request
|
|
180
|
+
current_headers = self.headers.copy()
|
|
181
|
+
current_headers["uniqueid"] = unique_id
|
|
175
182
|
|
|
176
183
|
# Upload image and get URL if image_path is provided
|
|
177
184
|
image_url = None
|
|
@@ -187,35 +194,72 @@ class LearnFast(Provider):
|
|
|
187
194
|
# Convert the payload to a JSON string
|
|
188
195
|
data = json.dumps(payload)
|
|
189
196
|
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
197
|
+
def for_stream():
|
|
198
|
+
full_response = "" # Initialize outside try block
|
|
199
|
+
try:
|
|
200
|
+
# Use curl_cffi session post with impersonate
|
|
201
|
+
response = self.session.post(
|
|
202
|
+
self.api_endpoint,
|
|
203
|
+
headers=current_headers, # Use headers with uniqueid
|
|
204
|
+
data=data,
|
|
205
|
+
stream=True,
|
|
206
|
+
timeout=self.timeout,
|
|
207
|
+
# proxies are set on the session
|
|
208
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
209
|
+
)
|
|
210
|
+
response.raise_for_status() # Check for HTTP errors
|
|
194
211
|
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
212
|
+
# Process the streamed response
|
|
213
|
+
# Iterate over bytes and decode manually
|
|
214
|
+
for line_bytes in response.iter_lines():
|
|
215
|
+
if line_bytes:
|
|
216
|
+
try:
|
|
217
|
+
line = line_bytes.decode('utf-8').strip()
|
|
218
|
+
if line == "[DONE]":
|
|
219
|
+
break
|
|
220
|
+
json_response = json.loads(line)
|
|
221
|
+
if json_response.get('code') == 200 and json_response.get('data'):
|
|
222
|
+
message = json_response['data'].get('message', '')
|
|
223
|
+
if message:
|
|
224
|
+
full_response += message
|
|
225
|
+
resp = {"text": message}
|
|
226
|
+
# Yield dict or raw string chunk
|
|
227
|
+
yield resp if not raw else message
|
|
228
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
229
|
+
pass # Ignore lines that are not valid JSON or cannot be decoded
|
|
230
|
+
|
|
231
|
+
# Update history after stream finishes
|
|
232
|
+
self.last_response = {"text": full_response}
|
|
233
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
214
234
|
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
235
|
+
except CurlError as e: # Catch CurlError
|
|
236
|
+
raise exceptions.FailedToGenerateResponseError(f"An error occurred (CurlError): {e}") from e
|
|
237
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
238
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
239
|
+
raise exceptions.FailedToGenerateResponseError(f"An error occurred ({type(e).__name__}): {e} - {err_text}") from e
|
|
240
|
+
|
|
241
|
+
def for_non_stream():
|
|
242
|
+
# Aggregate the stream using the updated for_stream logic
|
|
243
|
+
full_response_text = ""
|
|
244
|
+
try:
|
|
245
|
+
# Ensure raw=False so for_stream yields dicts
|
|
246
|
+
for chunk_data in for_stream():
|
|
247
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
248
|
+
full_response_text += chunk_data["text"]
|
|
249
|
+
# Handle raw string case if raw=True was passed
|
|
250
|
+
elif raw and isinstance(chunk_data, str):
|
|
251
|
+
full_response_text += chunk_data
|
|
252
|
+
except Exception as e:
|
|
253
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
254
|
+
if not full_response_text:
|
|
255
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
256
|
+
|
|
257
|
+
# last_response and history are updated within for_stream
|
|
258
|
+
# Return the final aggregated response dict or raw string
|
|
259
|
+
return full_response_text if raw else self.last_response
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
return for_stream() if stream else for_non_stream()
|
|
219
263
|
|
|
220
264
|
def chat(
|
|
221
265
|
self,
|
|
@@ -237,14 +281,23 @@ class LearnFast(Provider):
|
|
|
237
281
|
Union[str, Generator[str, None, None]]: Response generated
|
|
238
282
|
"""
|
|
239
283
|
try:
|
|
240
|
-
|
|
284
|
+
# ask() yields dicts or strings when streaming
|
|
285
|
+
response_gen = self.ask(
|
|
286
|
+
prompt, stream=stream, raw=False, # Ensure ask yields dicts/dict
|
|
287
|
+
optimizer=optimizer, conversationally=conversationally,
|
|
288
|
+
image_path=image_path
|
|
289
|
+
)
|
|
241
290
|
if stream:
|
|
242
|
-
|
|
243
|
-
|
|
291
|
+
def stream_wrapper():
|
|
292
|
+
for chunk_dict in response_gen:
|
|
293
|
+
yield self.get_message(chunk_dict) # get_message expects dict
|
|
294
|
+
return stream_wrapper()
|
|
244
295
|
else:
|
|
245
|
-
|
|
296
|
+
# response_gen is the final dict in non-stream mode
|
|
297
|
+
return self.get_message(response_gen) # get_message expects dict
|
|
246
298
|
except Exception as e:
|
|
247
|
-
|
|
299
|
+
# Return error message directly, consider raising instead for better error handling upstream
|
|
300
|
+
return f"Error: {str(e)}"
|
|
248
301
|
|
|
249
302
|
def get_message(self, response: dict) -> str:
|
|
250
303
|
"""Retrieves message only from response
|
|
@@ -259,6 +312,7 @@ class LearnFast(Provider):
|
|
|
259
312
|
return response["text"]
|
|
260
313
|
|
|
261
314
|
if __name__ == "__main__":
|
|
315
|
+
# Ensure curl_cffi is installed
|
|
262
316
|
from rich import print
|
|
263
317
|
ai = LearnFast()
|
|
264
318
|
response = ai.chat(input(">>> "), stream=True)
|