webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/granite.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
|
-
from typing import Union, Any, Dict, Generator
|
|
4
|
+
from typing import Optional, Union, Any, Dict, Generator
|
|
4
5
|
|
|
5
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
6
7
|
from webscout.AIbase import Provider
|
|
7
8
|
from webscout import exceptions
|
|
8
9
|
from webscout.litagent import LitAgent as Lit
|
|
@@ -19,7 +20,7 @@ class IBMGranite(Provider):
|
|
|
19
20
|
self,
|
|
20
21
|
api_key: str,
|
|
21
22
|
is_conversation: bool = True,
|
|
22
|
-
max_tokens: int = 600,
|
|
23
|
+
max_tokens: int = 600, # Note: max_tokens is not used by this API
|
|
23
24
|
timeout: int = 30,
|
|
24
25
|
intro: str = None,
|
|
25
26
|
filepath: str = None,
|
|
@@ -35,7 +36,8 @@ class IBMGranite(Provider):
|
|
|
35
36
|
if model not in self.AVAILABLE_MODELS:
|
|
36
37
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
37
38
|
|
|
38
|
-
|
|
39
|
+
# Initialize curl_cffi Session
|
|
40
|
+
self.session = Session()
|
|
39
41
|
self.is_conversation = is_conversation
|
|
40
42
|
self.max_tokens_to_sample = max_tokens
|
|
41
43
|
self.api_endpoint = "https://d18n68ssusgr7r.cloudfront.net/v1/chat/completions"
|
|
@@ -46,18 +48,19 @@ class IBMGranite(Provider):
|
|
|
46
48
|
self.system_prompt = system_prompt
|
|
47
49
|
self.thinking = thinking
|
|
48
50
|
|
|
49
|
-
# Use Lit agent
|
|
51
|
+
# Use Lit agent (keep if needed for other headers or logic)
|
|
50
52
|
self.headers = {
|
|
51
|
-
"authority": "d18n68ssusgr7r.cloudfront.net",
|
|
52
|
-
"accept": "application/json,application/jsonl",
|
|
53
|
+
"authority": "d18n68ssusgr7r.cloudfront.net", # Keep authority
|
|
54
|
+
"accept": "application/json,application/jsonl", # Keep accept
|
|
53
55
|
"content-type": "application/json",
|
|
54
|
-
"origin": "https://www.ibm.com",
|
|
55
|
-
"referer": "https://www.ibm.com/",
|
|
56
|
-
"user-agent": Lit().random(),
|
|
56
|
+
"origin": "https://www.ibm.com", # Keep origin
|
|
57
|
+
"referer": "https://www.ibm.com/", # Keep referer
|
|
57
58
|
}
|
|
58
59
|
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
60
|
+
|
|
61
|
+
# Update curl_cffi session headers and proxies
|
|
59
62
|
self.session.headers.update(self.headers)
|
|
60
|
-
self.session.proxies = proxies
|
|
63
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
61
64
|
|
|
62
65
|
self.__available_optimizers = (
|
|
63
66
|
method for method in dir(Optimizers)
|
|
@@ -74,10 +77,17 @@ class IBMGranite(Provider):
|
|
|
74
77
|
self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
|
|
75
78
|
self.conversation.history_offset = history_offset
|
|
76
79
|
|
|
80
|
+
@staticmethod
|
|
81
|
+
def _granite_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
82
|
+
"""Extracts content from IBM Granite stream JSON lists [3, "text"]."""
|
|
83
|
+
if isinstance(chunk, list) and len(chunk) == 2 and chunk[0] == 3 and isinstance(chunk[1], str):
|
|
84
|
+
return chunk[1]
|
|
85
|
+
return None
|
|
86
|
+
|
|
77
87
|
def ask(
|
|
78
88
|
self,
|
|
79
89
|
prompt: str,
|
|
80
|
-
stream: bool = False,
|
|
90
|
+
stream: bool = False, # API supports streaming
|
|
81
91
|
raw: bool = False,
|
|
82
92
|
optimizer: str = None,
|
|
83
93
|
conversationally: bool = False,
|
|
@@ -107,48 +117,77 @@ class IBMGranite(Provider):
|
|
|
107
117
|
{"role": "system", "content": self.system_prompt},
|
|
108
118
|
{"role": "user", "content": conversation_prompt},
|
|
109
119
|
],
|
|
110
|
-
"stream": stream
|
|
111
|
-
"thinking": self.thinking,
|
|
120
|
+
"stream": True # API seems to require stream=True based on response format
|
|
112
121
|
}
|
|
113
122
|
|
|
114
123
|
def for_stream():
|
|
124
|
+
streaming_text = "" # Initialize outside try block
|
|
115
125
|
try:
|
|
126
|
+
# Use curl_cffi session post with impersonate
|
|
116
127
|
response = self.session.post(
|
|
117
|
-
self.api_endpoint,
|
|
128
|
+
self.api_endpoint,
|
|
129
|
+
# headers are set on the session
|
|
130
|
+
json=payload,
|
|
131
|
+
stream=True,
|
|
132
|
+
timeout=self.timeout,
|
|
133
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
134
|
+
)
|
|
135
|
+
response.raise_for_status() # Check for HTTP errors
|
|
136
|
+
|
|
137
|
+
# Use sanitize_stream
|
|
138
|
+
processed_stream = sanitize_stream(
|
|
139
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
140
|
+
intro_value=None, # No prefix
|
|
141
|
+
to_json=True, # Stream sends JSON lines (which are lists)
|
|
142
|
+
content_extractor=self._granite_extractor, # Use the specific extractor
|
|
143
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
118
144
|
)
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
except json.JSONDecodeError as e:
|
|
142
|
-
raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}")
|
|
143
|
-
except Exception as e:
|
|
144
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
|
|
145
|
+
|
|
146
|
+
for content_chunk in processed_stream:
|
|
147
|
+
# content_chunk is the string extracted by _granite_extractor
|
|
148
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
149
|
+
streaming_text += content_chunk
|
|
150
|
+
resp = dict(text=content_chunk)
|
|
151
|
+
yield resp if not raw else content_chunk
|
|
152
|
+
|
|
153
|
+
# Update history after stream finishes
|
|
154
|
+
self.last_response = dict(text=streaming_text)
|
|
155
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
156
|
+
|
|
157
|
+
except CurlError as e: # Catch CurlError
|
|
158
|
+
raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
|
|
159
|
+
except json.JSONDecodeError as e: # Keep specific JSON error handling
|
|
160
|
+
raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}") from e
|
|
161
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
162
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
163
|
+
# Use specific exception type if available, otherwise generic
|
|
164
|
+
ex_type = exceptions.FailedToGenerateResponseError if not isinstance(e, exceptions.ProviderConnectionError) else type(e)
|
|
165
|
+
raise ex_type(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
|
|
166
|
+
|
|
145
167
|
|
|
146
168
|
def for_non_stream():
|
|
147
|
-
#
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
169
|
+
# Aggregate the stream using the updated for_stream logic
|
|
170
|
+
full_text = ""
|
|
171
|
+
try:
|
|
172
|
+
# Ensure raw=False so for_stream yields dicts
|
|
173
|
+
for chunk_data in for_stream():
|
|
174
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
175
|
+
full_text += chunk_data["text"]
|
|
176
|
+
# Handle raw string case if raw=True was passed
|
|
177
|
+
elif raw and isinstance(chunk_data, str):
|
|
178
|
+
full_text += chunk_data
|
|
179
|
+
except Exception as e:
|
|
180
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
181
|
+
if not full_text:
|
|
182
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
151
183
|
|
|
184
|
+
# last_response and history are updated within for_stream
|
|
185
|
+
# Return the final aggregated response dict or raw string
|
|
186
|
+
return full_text if raw else self.last_response
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
# Since the API endpoint suggests streaming, always call the stream generator.
|
|
190
|
+
# The non-stream wrapper will handle aggregation if stream=False.
|
|
152
191
|
return for_stream() if stream else for_non_stream()
|
|
153
192
|
|
|
154
193
|
def chat(
|
|
@@ -159,16 +198,24 @@ class IBMGranite(Provider):
|
|
|
159
198
|
conversationally: bool = False,
|
|
160
199
|
) -> Union[str, Generator[str, None, None]]:
|
|
161
200
|
"""Generate response as a string using chat method"""
|
|
162
|
-
def
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
201
|
+
def for_stream_chat():
|
|
202
|
+
# ask() yields dicts or strings when streaming
|
|
203
|
+
gen = self.ask(
|
|
204
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
205
|
+
optimizer=optimizer, conversationally=conversationally
|
|
206
|
+
)
|
|
207
|
+
for response_dict in gen:
|
|
208
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
209
|
+
|
|
210
|
+
def for_non_stream_chat():
|
|
211
|
+
# ask() returns dict or str when not streaming
|
|
212
|
+
response_data = self.ask(
|
|
213
|
+
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
214
|
+
optimizer=optimizer, conversationally=conversationally
|
|
169
215
|
)
|
|
216
|
+
return self.get_message(response_data) # get_message expects dict
|
|
170
217
|
|
|
171
|
-
return
|
|
218
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
172
219
|
|
|
173
220
|
def get_message(self, response: dict) -> str:
|
|
174
221
|
"""Retrieves message only from response"""
|
|
@@ -176,6 +223,7 @@ class IBMGranite(Provider):
|
|
|
176
223
|
return response["text"]
|
|
177
224
|
|
|
178
225
|
if __name__ == "__main__":
|
|
226
|
+
# Ensure curl_cffi is installed
|
|
179
227
|
from rich import print
|
|
180
228
|
# Example usage: Initialize without logging.
|
|
181
229
|
ai = IBMGranite(
|
webscout/Provider/hermes.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
from typing import Union, Any, Dict, Generator, Optional
|
|
4
5
|
|
|
5
6
|
from webscout.AIutel import Optimizers
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
7
8
|
from webscout.AIutel import AwesomePrompts
|
|
8
9
|
from webscout.AIbase import Provider
|
|
9
10
|
from webscout import exceptions
|
|
@@ -38,7 +39,7 @@ class NousHermes(Provider):
|
|
|
38
39
|
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
39
40
|
)
|
|
40
41
|
|
|
41
|
-
self.session =
|
|
42
|
+
self.session = Session()
|
|
42
43
|
self.is_conversation = is_conversation
|
|
43
44
|
self.max_tokens_to_sample = max_tokens
|
|
44
45
|
self.timeout = timeout
|
|
@@ -49,15 +50,14 @@ class NousHermes(Provider):
|
|
|
49
50
|
self.temperature = temperature
|
|
50
51
|
self.top_p = top_p
|
|
51
52
|
self.cookies_path = cookies_path
|
|
52
|
-
self.
|
|
53
|
+
self.cookies_dict = self._load_cookies()
|
|
54
|
+
|
|
53
55
|
self.headers = {
|
|
54
56
|
'accept': '*/*',
|
|
55
57
|
'accept-language': 'en-US,en;q=0.9',
|
|
56
58
|
'content-type': 'application/json',
|
|
57
59
|
'origin': 'https://hermes.nousresearch.com',
|
|
58
60
|
'referer': 'https://hermes.nousresearch.com/',
|
|
59
|
-
'user-agent': LitAgent().random(),
|
|
60
|
-
'cookie': self.cookies
|
|
61
61
|
}
|
|
62
62
|
|
|
63
63
|
self.__available_optimizers = (
|
|
@@ -77,20 +77,38 @@ class NousHermes(Provider):
|
|
|
77
77
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
78
78
|
)
|
|
79
79
|
self.conversation.history_offset = history_offset
|
|
80
|
+
# Update curl_cffi session headers and proxies
|
|
80
81
|
self.session.proxies = proxies
|
|
82
|
+
|
|
83
|
+
# Apply cookies to curl_cffi session
|
|
84
|
+
if self.cookies_dict:
|
|
85
|
+
for name, value in self.cookies_dict.items():
|
|
86
|
+
self.session.cookies.set(name, value, domain="hermes.nousresearch.com")
|
|
81
87
|
|
|
82
|
-
def _load_cookies(self) -> Optional[str]:
|
|
83
|
-
"""Load cookies from a JSON file and
|
|
88
|
+
def _load_cookies(self) -> Optional[Dict[str, str]]:
|
|
89
|
+
"""Load cookies from a JSON file and return them as a dictionary."""
|
|
84
90
|
try:
|
|
85
91
|
with open(self.cookies_path, 'r') as f:
|
|
86
92
|
cookies_data = json.load(f)
|
|
87
|
-
|
|
93
|
+
# Convert list of cookie objects to a dictionary
|
|
94
|
+
return {cookie['name']: cookie['value'] for cookie in cookies_data if 'name' in cookie and 'value' in cookie}
|
|
88
95
|
except FileNotFoundError:
|
|
89
|
-
print("
|
|
96
|
+
print(f"Warning: Cookies file not found at {self.cookies_path}")
|
|
90
97
|
return None
|
|
91
98
|
except json.JSONDecodeError:
|
|
92
|
-
print("
|
|
99
|
+
print(f"Warning: Invalid JSON format in cookies file at {self.cookies_path}")
|
|
93
100
|
return None
|
|
101
|
+
except Exception as e:
|
|
102
|
+
print(f"Warning: Error loading cookies: {e}")
|
|
103
|
+
return None
|
|
104
|
+
|
|
105
|
+
@staticmethod
|
|
106
|
+
def _hermes_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
107
|
+
"""Extracts content from Hermes stream JSON objects."""
|
|
108
|
+
if isinstance(chunk, dict) and chunk.get('type') == 'llm_response':
|
|
109
|
+
return chunk.get('content')
|
|
110
|
+
return None
|
|
111
|
+
|
|
94
112
|
|
|
95
113
|
def ask(
|
|
96
114
|
self,
|
|
@@ -134,32 +152,59 @@ class NousHermes(Provider):
|
|
|
134
152
|
"top_p": self.top_p,
|
|
135
153
|
}
|
|
136
154
|
def for_stream():
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
155
|
+
streaming_text = "" # Initialize outside try block
|
|
156
|
+
try:
|
|
157
|
+
response = self.session.post(
|
|
158
|
+
self.api_endpoint,
|
|
159
|
+
json=payload,
|
|
160
|
+
stream=True,
|
|
161
|
+
timeout=self.timeout,
|
|
162
|
+
impersonate="chrome110" # Keep impersonate
|
|
163
|
+
)
|
|
164
|
+
response.raise_for_status()
|
|
165
|
+
|
|
166
|
+
# Use sanitize_stream
|
|
167
|
+
processed_stream = sanitize_stream(
|
|
168
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
169
|
+
intro_value="data:",
|
|
170
|
+
to_json=True, # Stream sends JSON
|
|
171
|
+
content_extractor=self._hermes_extractor, # Use the specific extractor
|
|
172
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
for content_chunk in processed_stream:
|
|
176
|
+
# content_chunk is the string extracted by _hermes_extractor
|
|
177
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
178
|
+
streaming_text += content_chunk
|
|
179
|
+
resp = dict(text=content_chunk)
|
|
180
|
+
yield resp if not raw else content_chunk
|
|
181
|
+
|
|
182
|
+
self.last_response = dict(text=streaming_text) # Use streaming_text
|
|
183
|
+
self.conversation.update_chat_history(
|
|
184
|
+
prompt, streaming_text # Use streaming_text
|
|
141
185
|
)
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
content = data['content']
|
|
150
|
-
full_response += content
|
|
151
|
-
yield content if raw else dict(text=content)
|
|
152
|
-
except json.JSONDecodeError:
|
|
153
|
-
continue
|
|
154
|
-
self.last_response.update(dict(text=full_response))
|
|
155
|
-
self.conversation.update_chat_history(
|
|
156
|
-
prompt, self.get_message(self.last_response)
|
|
157
|
-
)
|
|
186
|
+
|
|
187
|
+
except CurlError as e:
|
|
188
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
189
|
+
except Exception as e:
|
|
190
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
191
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
|
|
192
|
+
|
|
158
193
|
|
|
159
194
|
def for_non_stream():
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
195
|
+
collected_text = ""
|
|
196
|
+
try:
|
|
197
|
+
for chunk_data in for_stream():
|
|
198
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
199
|
+
collected_text += chunk_data["text"]
|
|
200
|
+
elif raw and isinstance(chunk_data, str):
|
|
201
|
+
collected_text += chunk_data
|
|
202
|
+
except Exception as e:
|
|
203
|
+
if not collected_text:
|
|
204
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
205
|
+
|
|
206
|
+
return collected_text if raw else self.last_response
|
|
207
|
+
|
|
163
208
|
|
|
164
209
|
return for_stream() if stream else for_non_stream()
|
|
165
210
|
|
|
@@ -180,23 +225,25 @@ class NousHermes(Provider):
|
|
|
180
225
|
str: Response generated
|
|
181
226
|
"""
|
|
182
227
|
|
|
183
|
-
def
|
|
184
|
-
|
|
185
|
-
prompt, True,
|
|
186
|
-
|
|
187
|
-
|
|
228
|
+
def for_stream_chat():
|
|
229
|
+
gen = self.ask(
|
|
230
|
+
prompt, stream=True, raw=False,
|
|
231
|
+
optimizer=optimizer, conversationally=conversationally
|
|
232
|
+
)
|
|
233
|
+
for response_dict in gen:
|
|
234
|
+
yield self.get_message(response_dict)
|
|
188
235
|
|
|
189
|
-
def
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
)
|
|
236
|
+
def for_non_stream_chat():
|
|
237
|
+
response_data = self.ask(
|
|
238
|
+
prompt,
|
|
239
|
+
stream=False,
|
|
240
|
+
raw=False,
|
|
241
|
+
optimizer=optimizer,
|
|
242
|
+
conversationally=conversationally,
|
|
197
243
|
)
|
|
244
|
+
return self.get_message(response_data)
|
|
198
245
|
|
|
199
|
-
return
|
|
246
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
200
247
|
|
|
201
248
|
def get_message(self, response: dict) -> str:
|
|
202
249
|
"""Retrieves message only from response
|