webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/llmchat.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
from typing import Union, Any, Dict, Optional, Generator, List
|
|
4
5
|
|
|
@@ -44,7 +45,8 @@ class LLMChat(Provider):
|
|
|
44
45
|
if model not in self.AVAILABLE_MODELS:
|
|
45
46
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
46
47
|
|
|
47
|
-
|
|
48
|
+
# Initialize curl_cffi Session
|
|
49
|
+
self.session = Session()
|
|
48
50
|
self.is_conversation = is_conversation
|
|
49
51
|
self.max_tokens_to_sample = max_tokens
|
|
50
52
|
self.api_endpoint = "https://llmchat.in/inference/stream"
|
|
@@ -56,7 +58,6 @@ class LLMChat(Provider):
|
|
|
56
58
|
self.headers = {
|
|
57
59
|
"Content-Type": "application/json",
|
|
58
60
|
"Accept": "*/*",
|
|
59
|
-
"User-Agent": Lit().random(),
|
|
60
61
|
"Origin": "https://llmchat.in",
|
|
61
62
|
"Referer": "https://llmchat.in/"
|
|
62
63
|
}
|
|
@@ -79,7 +80,10 @@ class LLMChat(Provider):
|
|
|
79
80
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
80
81
|
)
|
|
81
82
|
self.conversation.history_offset = history_offset
|
|
82
|
-
|
|
83
|
+
|
|
84
|
+
# Update curl_cffi session headers and proxies
|
|
85
|
+
self.session.headers.update(self.headers)
|
|
86
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
83
87
|
|
|
84
88
|
def ask(
|
|
85
89
|
self,
|
|
@@ -88,7 +92,7 @@ class LLMChat(Provider):
|
|
|
88
92
|
raw: bool = False,
|
|
89
93
|
optimizer: str = None,
|
|
90
94
|
conversationally: bool = False,
|
|
91
|
-
) -> Dict[str, Any]:
|
|
95
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]: # Corrected return type hint
|
|
92
96
|
"""Chat with LLMChat with logging capabilities"""
|
|
93
97
|
|
|
94
98
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
@@ -109,46 +113,79 @@ class LLMChat(Provider):
|
|
|
109
113
|
{"role": "user", "content": conversation_prompt}
|
|
110
114
|
],
|
|
111
115
|
"max_tokens": self.max_tokens_to_sample,
|
|
112
|
-
"stream": stream
|
|
116
|
+
"stream": True # API seems to always stream based on endpoint name
|
|
113
117
|
}
|
|
114
118
|
|
|
115
119
|
def for_stream():
|
|
120
|
+
full_response = "" # Initialize outside try block
|
|
116
121
|
try:
|
|
122
|
+
# Use curl_cffi session post with impersonate
|
|
123
|
+
response = self.session.post(
|
|
124
|
+
url,
|
|
125
|
+
json=payload,
|
|
126
|
+
stream=True,
|
|
127
|
+
timeout=self.timeout,
|
|
128
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
129
|
+
)
|
|
130
|
+
response.raise_for_status() # Check for HTTP errors
|
|
117
131
|
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
if line:
|
|
124
|
-
line = line.decode('utf-8')
|
|
132
|
+
# Iterate over bytes and decode manually
|
|
133
|
+
for line_bytes in response.iter_lines():
|
|
134
|
+
if line_bytes:
|
|
135
|
+
try:
|
|
136
|
+
line = line_bytes.decode('utf-8')
|
|
125
137
|
if line.startswith('data: '):
|
|
138
|
+
data_str = line[6:]
|
|
139
|
+
if data_str == '[DONE]':
|
|
140
|
+
break
|
|
126
141
|
try:
|
|
127
|
-
data = json.loads(
|
|
142
|
+
data = json.loads(data_str)
|
|
128
143
|
if data.get('response'):
|
|
129
144
|
response_text = data['response']
|
|
130
145
|
full_response += response_text
|
|
131
|
-
|
|
146
|
+
resp = dict(text=response_text)
|
|
147
|
+
# Yield dict or raw string chunk
|
|
148
|
+
yield resp if not raw else response_text
|
|
132
149
|
except json.JSONDecodeError:
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
150
|
+
continue # Ignore invalid JSON data
|
|
151
|
+
except UnicodeDecodeError:
|
|
152
|
+
continue # Ignore decoding errors
|
|
153
|
+
|
|
154
|
+
# Update history after stream finishes
|
|
155
|
+
self.last_response = dict(text=full_response)
|
|
156
|
+
self.conversation.update_chat_history(
|
|
157
|
+
prompt, full_response
|
|
158
|
+
)
|
|
140
159
|
|
|
141
|
-
except
|
|
142
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
160
|
+
except CurlError as e: # Catch CurlError
|
|
161
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
162
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
163
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
164
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
143
165
|
|
|
144
166
|
def for_non_stream():
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
167
|
+
# Aggregate the stream using the updated for_stream logic
|
|
168
|
+
full_response_text = ""
|
|
169
|
+
try:
|
|
170
|
+
# Ensure raw=False so for_stream yields dicts
|
|
171
|
+
for chunk_data in for_stream():
|
|
172
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
173
|
+
full_response_text += chunk_data["text"]
|
|
174
|
+
# Handle raw string case if raw=True was passed
|
|
175
|
+
elif raw and isinstance(chunk_data, str):
|
|
176
|
+
full_response_text += chunk_data
|
|
177
|
+
except Exception as e:
|
|
178
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
179
|
+
if not full_response_text:
|
|
180
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
181
|
+
|
|
182
|
+
# last_response and history are updated within for_stream
|
|
183
|
+
# Return the final aggregated response dict or raw string
|
|
184
|
+
return full_response_text if raw else self.last_response
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
# Since the API endpoint suggests streaming, always call the stream generator.
|
|
188
|
+
# The non-stream wrapper will handle aggregation if stream=False.
|
|
152
189
|
return for_stream() if stream else for_non_stream()
|
|
153
190
|
|
|
154
191
|
def chat(
|
|
@@ -160,23 +197,27 @@ class LLMChat(Provider):
|
|
|
160
197
|
) -> Union[str, Generator[str, None, None]]:
|
|
161
198
|
"""Generate response with logging capabilities"""
|
|
162
199
|
|
|
163
|
-
def
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
200
|
+
def for_stream_chat():
|
|
201
|
+
# ask() yields dicts or strings when streaming
|
|
202
|
+
gen = self.ask(
|
|
203
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
204
|
+
optimizer=optimizer, conversationally=conversationally
|
|
205
|
+
)
|
|
206
|
+
for response_dict in gen:
|
|
207
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
208
|
+
|
|
209
|
+
def for_non_stream_chat():
|
|
210
|
+
# ask() returns dict or str when not streaming
|
|
211
|
+
response_data = self.ask(
|
|
212
|
+
prompt,
|
|
213
|
+
stream=False,
|
|
214
|
+
raw=False, # Ensure ask returns dict
|
|
215
|
+
optimizer=optimizer,
|
|
216
|
+
conversationally=conversationally,
|
|
177
217
|
)
|
|
218
|
+
return self.get_message(response_data) # get_message expects dict
|
|
178
219
|
|
|
179
|
-
return
|
|
220
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
180
221
|
|
|
181
222
|
def get_message(self, response: Dict[str, Any]) -> str:
|
|
182
223
|
"""Retrieves message from response with validation"""
|
|
@@ -184,6 +225,7 @@ class LLMChat(Provider):
|
|
|
184
225
|
return response["text"]
|
|
185
226
|
|
|
186
227
|
if __name__ == "__main__":
|
|
228
|
+
# Ensure curl_cffi is installed
|
|
187
229
|
print("-" * 80)
|
|
188
230
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
189
231
|
print("-" * 80)
|
webscout/Provider/llmchatco.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
import uuid
|
|
4
5
|
import re
|
|
5
6
|
from typing import Union, Any, Dict, Optional, Generator, List
|
|
6
7
|
|
|
7
|
-
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Optimizers, sanitize_stream # Import sanitize_stream
|
|
8
9
|
from webscout.AIutel import Conversation
|
|
9
10
|
from webscout.AIutel import AwesomePrompts
|
|
10
11
|
from webscout.AIbase import Provider
|
|
@@ -37,7 +38,7 @@ class LLMChatCo(Provider):
|
|
|
37
38
|
def __init__(
|
|
38
39
|
self,
|
|
39
40
|
is_conversation: bool = True,
|
|
40
|
-
max_tokens: int = 2048,
|
|
41
|
+
max_tokens: int = 2048, # Note: max_tokens is not used by this API
|
|
41
42
|
timeout: int = 60,
|
|
42
43
|
intro: str = None,
|
|
43
44
|
filepath: str = None,
|
|
@@ -55,7 +56,8 @@ class LLMChatCo(Provider):
|
|
|
55
56
|
if model not in self.AVAILABLE_MODELS:
|
|
56
57
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
57
58
|
|
|
58
|
-
|
|
59
|
+
# Initialize curl_cffi Session
|
|
60
|
+
self.session = Session()
|
|
59
61
|
self.is_conversation = is_conversation
|
|
60
62
|
self.max_tokens_to_sample = max_tokens
|
|
61
63
|
self.api_endpoint = "https://llmchat.co/api/completion"
|
|
@@ -64,10 +66,10 @@ class LLMChatCo(Provider):
|
|
|
64
66
|
self.model = model
|
|
65
67
|
self.system_prompt = system_prompt
|
|
66
68
|
self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
|
|
67
|
-
|
|
68
|
-
# Create LitAgent instance for
|
|
69
|
+
|
|
70
|
+
# Create LitAgent instance (keep if needed for other headers)
|
|
69
71
|
lit_agent = Lit()
|
|
70
|
-
|
|
72
|
+
|
|
71
73
|
# Headers based on the provided request
|
|
72
74
|
self.headers = {
|
|
73
75
|
"Content-Type": "application/json",
|
|
@@ -79,7 +81,8 @@ class LLMChatCo(Provider):
|
|
|
79
81
|
"DNT": "1",
|
|
80
82
|
"Sec-Fetch-Dest": "empty",
|
|
81
83
|
"Sec-Fetch-Mode": "cors",
|
|
82
|
-
"Sec-Fetch-Site": "same-origin"
|
|
84
|
+
"Sec-Fetch-Site": "same-origin",
|
|
85
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
83
86
|
}
|
|
84
87
|
|
|
85
88
|
self.__available_optimizers = (
|
|
@@ -100,28 +103,22 @@ class LLMChatCo(Provider):
|
|
|
100
103
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
101
104
|
)
|
|
102
105
|
self.conversation.history_offset = history_offset
|
|
103
|
-
|
|
106
|
+
# Update curl_cffi session headers and proxies
|
|
107
|
+
self.session.headers.update(self.headers)
|
|
108
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
104
109
|
# Store message history for conversation context
|
|
105
110
|
self.last_assistant_response = ""
|
|
106
111
|
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
if data.startswith('data:'):
|
|
118
|
-
data_content = data[5:].strip()
|
|
119
|
-
if data_content:
|
|
120
|
-
try:
|
|
121
|
-
return {'data': json.loads(data_content)}
|
|
122
|
-
except json.JSONDecodeError:
|
|
123
|
-
return {'data': data_content}
|
|
124
|
-
|
|
112
|
+
@staticmethod
|
|
113
|
+
def _llmchatco_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
114
|
+
"""Extracts text content from LLMChat.co stream JSON objects."""
|
|
115
|
+
if isinstance(chunk, dict) and "answer" in chunk:
|
|
116
|
+
answer = chunk["answer"]
|
|
117
|
+
# Prefer fullText if available and status is COMPLETED
|
|
118
|
+
if answer.get("fullText") and answer.get("status") == "COMPLETED":
|
|
119
|
+
return answer["fullText"]
|
|
120
|
+
elif "text" in answer:
|
|
121
|
+
return answer["text"]
|
|
125
122
|
return None
|
|
126
123
|
|
|
127
124
|
def ask(
|
|
@@ -167,85 +164,78 @@ class LLMChatCo(Provider):
|
|
|
167
164
|
}
|
|
168
165
|
|
|
169
166
|
def for_stream():
|
|
167
|
+
full_response = "" # Initialize outside try block
|
|
170
168
|
try:
|
|
171
|
-
#
|
|
169
|
+
# Use curl_cffi session post with impersonate
|
|
172
170
|
response = self.session.post(
|
|
173
|
-
self.api_endpoint,
|
|
174
|
-
json=payload,
|
|
175
|
-
headers
|
|
176
|
-
stream=True,
|
|
177
|
-
timeout=self.timeout
|
|
171
|
+
self.api_endpoint,
|
|
172
|
+
json=payload,
|
|
173
|
+
# headers are set on the session
|
|
174
|
+
stream=True,
|
|
175
|
+
timeout=self.timeout,
|
|
176
|
+
# proxies are set on the session
|
|
177
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
178
178
|
)
|
|
179
|
-
response.raise_for_status()
|
|
180
|
-
|
|
181
|
-
#
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
#
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
try:
|
|
208
|
-
json_data = json.loads(data_content)
|
|
209
|
-
if "answer" in json_data and "text" in json_data["answer"]:
|
|
210
|
-
text_chunk = json_data["answer"]["text"]
|
|
211
|
-
# If there's a fullText, use it as it's more complete
|
|
212
|
-
if json_data["answer"].get("fullText") and json_data["answer"].get("status") == "COMPLETED":
|
|
213
|
-
text_chunk = json_data["answer"]["fullText"]
|
|
214
|
-
|
|
215
|
-
# Extract only new content since last chunk
|
|
216
|
-
new_text = text_chunk[len(full_response):]
|
|
217
|
-
if new_text:
|
|
218
|
-
full_response = text_chunk
|
|
219
|
-
yield new_text if raw else dict(text=new_text)
|
|
220
|
-
except json.JSONDecodeError:
|
|
221
|
-
continue
|
|
222
|
-
elif data_content and current_event == 'done':
|
|
223
|
-
break
|
|
224
|
-
|
|
225
|
-
self.last_response.update(dict(text=full_response))
|
|
179
|
+
response.raise_for_status() # Check for HTTP errors
|
|
180
|
+
|
|
181
|
+
# Use sanitize_stream
|
|
182
|
+
# Note: This won't handle SSE 'event:' lines, only 'data:' lines.
|
|
183
|
+
# The original code checked for event == 'answer'. We assume relevant data is JSON after 'data:'.
|
|
184
|
+
processed_stream = sanitize_stream(
|
|
185
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
186
|
+
intro_value="data:",
|
|
187
|
+
to_json=True, # Stream sends JSON
|
|
188
|
+
content_extractor=self._llmchatco_extractor, # Use the specific extractor
|
|
189
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
last_yielded_text = ""
|
|
193
|
+
for current_full_text in processed_stream:
|
|
194
|
+
# current_full_text is the full text extracted by _llmchatco_extractor
|
|
195
|
+
if current_full_text and isinstance(current_full_text, str):
|
|
196
|
+
# Calculate the new part of the text
|
|
197
|
+
new_text = current_full_text[len(last_yielded_text):]
|
|
198
|
+
if new_text:
|
|
199
|
+
full_response = current_full_text # Keep track of the latest full text
|
|
200
|
+
last_yielded_text = current_full_text # Update tracker
|
|
201
|
+
resp = dict(text=new_text)
|
|
202
|
+
# Yield dict or raw string chunk
|
|
203
|
+
yield resp if not raw else new_text
|
|
204
|
+
|
|
205
|
+
# Update history after stream finishes
|
|
206
|
+
self.last_response = dict(text=full_response)
|
|
226
207
|
self.last_assistant_response = full_response
|
|
227
208
|
self.conversation.update_chat_history(
|
|
228
|
-
prompt,
|
|
209
|
+
prompt, full_response
|
|
229
210
|
)
|
|
230
211
|
|
|
231
|
-
except
|
|
232
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
233
|
-
except Exception as e:
|
|
234
|
-
|
|
235
|
-
|
|
212
|
+
except CurlError as e: # Catch CurlError
|
|
213
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
214
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
215
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
216
|
+
raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
217
|
+
|
|
236
218
|
def for_non_stream():
|
|
237
|
-
|
|
219
|
+
# Aggregate the stream using the updated for_stream logic
|
|
220
|
+
full_response_text = ""
|
|
238
221
|
try:
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
222
|
+
# Ensure raw=False so for_stream yields dicts
|
|
223
|
+
for chunk_data in for_stream():
|
|
224
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
225
|
+
full_response_text += chunk_data["text"]
|
|
226
|
+
# Handle raw string case if raw=True was passed
|
|
227
|
+
elif raw and isinstance(chunk_data, str):
|
|
228
|
+
full_response_text += chunk_data
|
|
229
|
+
|
|
244
230
|
except Exception as e:
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
231
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
232
|
+
if not full_response_text:
|
|
233
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
234
|
+
|
|
235
|
+
# last_response and history are updated within for_stream
|
|
236
|
+
# Return the final aggregated response dict or raw string
|
|
237
|
+
return full_response_text if raw else self.last_response
|
|
238
|
+
|
|
249
239
|
|
|
250
240
|
return for_stream() if stream else for_non_stream()
|
|
251
241
|
|
|
@@ -259,25 +249,29 @@ class LLMChatCo(Provider):
|
|
|
259
249
|
) -> Union[str, Generator[str, None, None]]:
|
|
260
250
|
"""Generate response with streaming capabilities"""
|
|
261
251
|
|
|
262
|
-
def
|
|
263
|
-
|
|
264
|
-
|
|
252
|
+
def for_stream_chat():
|
|
253
|
+
# ask() yields dicts or strings when streaming
|
|
254
|
+
gen = self.ask(
|
|
255
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
256
|
+
optimizer=optimizer, conversationally=conversationally,
|
|
265
257
|
web_search=web_search
|
|
266
|
-
)
|
|
267
|
-
|
|
258
|
+
)
|
|
259
|
+
for response_dict in gen:
|
|
260
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
268
261
|
|
|
269
|
-
def
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
262
|
+
def for_non_stream_chat():
|
|
263
|
+
# ask() returns dict or str when not streaming
|
|
264
|
+
response_data = self.ask(
|
|
265
|
+
prompt,
|
|
266
|
+
stream=False,
|
|
267
|
+
raw=False, # Ensure ask returns dict
|
|
268
|
+
optimizer=optimizer,
|
|
269
|
+
conversationally=conversationally,
|
|
270
|
+
web_search=web_search
|
|
278
271
|
)
|
|
272
|
+
return self.get_message(response_data) # get_message expects dict
|
|
279
273
|
|
|
280
|
-
return
|
|
274
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
281
275
|
|
|
282
276
|
def get_message(self, response: Dict[str, Any]) -> str:
|
|
283
277
|
"""Retrieves message from response with validation"""
|
|
@@ -285,20 +279,21 @@ class LLMChatCo(Provider):
|
|
|
285
279
|
return response["text"]
|
|
286
280
|
|
|
287
281
|
if __name__ == "__main__":
|
|
282
|
+
# Ensure curl_cffi is installed
|
|
288
283
|
print("-" * 80)
|
|
289
284
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
290
285
|
print("-" * 80)
|
|
291
|
-
|
|
286
|
+
|
|
292
287
|
# Test all available models
|
|
293
288
|
working = 0
|
|
294
289
|
total = len(LLMChatCo.AVAILABLE_MODELS)
|
|
295
|
-
|
|
290
|
+
|
|
296
291
|
for model in LLMChatCo.AVAILABLE_MODELS:
|
|
297
292
|
try:
|
|
298
293
|
test_ai = LLMChatCo(model=model, timeout=60)
|
|
299
294
|
response = test_ai.chat("Say 'Hello' in one word")
|
|
300
295
|
response_text = response
|
|
301
|
-
|
|
296
|
+
|
|
302
297
|
if response_text and len(response_text.strip()) > 0:
|
|
303
298
|
status = "✓"
|
|
304
299
|
# Truncate response if too long
|
|
@@ -308,4 +303,4 @@ if __name__ == "__main__":
|
|
|
308
303
|
display_text = "Empty or invalid response"
|
|
309
304
|
print(f"{model:<50} {status:<10} {display_text}")
|
|
310
305
|
except Exception as e:
|
|
311
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
306
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|