webscout 8.2.4__py3-none-any.whl → 8.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIutel.py +240 -344
- webscout/Extra/autocoder/autocoder.py +66 -5
- webscout/Extra/gguf.py +2 -0
- webscout/Provider/AISEARCH/scira_search.py +3 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +64 -67
- webscout/Provider/ChatGPTClone.py +33 -34
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +69 -56
- webscout/Provider/ElectronHub.py +48 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +24 -18
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +285 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +48 -20
- webscout/Provider/HeckAI.py +18 -36
- webscout/Provider/Jadve.py +30 -37
- webscout/Provider/LambdaChat.py +36 -59
- webscout/Provider/MCPCore.py +18 -21
- webscout/Provider/Marcus.py +23 -14
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +35 -26
- webscout/Provider/OPENAI/__init__.py +1 -1
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/scirachat.py +3 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/PI.py +22 -13
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +24 -12
- webscout/Provider/TextPollinationsAI.py +78 -76
- webscout/Provider/TwoAI.py +120 -88
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +24 -22
- webscout/Provider/VercelAI.py +31 -12
- webscout/Provider/WiseCat.py +1 -1
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/__init__.py +11 -13
- webscout/Provider/ai4chat.py +5 -3
- webscout/Provider/akashgpt.py +59 -66
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/elmo.py +38 -32
- webscout/Provider/freeaichat.py +57 -43
- webscout/Provider/granite.py +24 -21
- webscout/Provider/hermes.py +27 -20
- webscout/Provider/learnfastai.py +25 -20
- webscout/Provider/llmchatco.py +48 -78
- webscout/Provider/multichat.py +13 -3
- webscout/Provider/scira_chat.py +50 -30
- webscout/Provider/scnet.py +27 -21
- webscout/Provider/searchchat.py +16 -24
- webscout/Provider/sonus.py +37 -39
- webscout/Provider/toolbaz.py +24 -46
- webscout/Provider/turboseek.py +37 -41
- webscout/Provider/typefully.py +30 -22
- webscout/Provider/typegpt.py +47 -51
- webscout/Provider/uncovr.py +46 -40
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +305 -448
- webscout/exceptions.py +3 -0
- webscout/swiftcli/__init__.py +80 -794
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/METADATA +166 -45
- {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/RECORD +89 -89
- {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/WHEEL +1 -1
- webscout-8.2.6.dist-info/entry_points.txt +3 -0
- {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- inferno/lol.py +0 -589
- webscout/LLM.py +0 -442
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/PizzaGPT.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/tutorai.py +0 -270
- webscout-8.2.4.dist-info/entry_points.txt +0 -5
- {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/freeaichat.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
|
+
import re
|
|
1
2
|
import requests
|
|
2
3
|
import json
|
|
3
|
-
import
|
|
4
|
+
import uuid
|
|
4
5
|
from typing import Any, Dict, Optional, Generator, Union
|
|
5
6
|
|
|
6
7
|
from webscout.AIutel import Optimizers
|
|
@@ -27,6 +28,12 @@ class FreeAIChat(Provider):
|
|
|
27
28
|
"O3 Mini",
|
|
28
29
|
"O3 Mini High",
|
|
29
30
|
"O3 Mini Low",
|
|
31
|
+
"O4 Mini",
|
|
32
|
+
"O4 Mini High",
|
|
33
|
+
"GPT 4.1",
|
|
34
|
+
"o3",
|
|
35
|
+
"GPT 4.1 Mini",
|
|
36
|
+
|
|
30
37
|
|
|
31
38
|
# Anthropic Models
|
|
32
39
|
"Claude 3.5 haiku",
|
|
@@ -74,8 +81,9 @@ class FreeAIChat(Provider):
|
|
|
74
81
|
|
|
75
82
|
def __init__(
|
|
76
83
|
self,
|
|
84
|
+
api_key: str,
|
|
77
85
|
is_conversation: bool = True,
|
|
78
|
-
max_tokens: int =
|
|
86
|
+
max_tokens: int = 150,
|
|
79
87
|
timeout: int = 30,
|
|
80
88
|
intro: str = None,
|
|
81
89
|
filepath: str = None,
|
|
@@ -83,8 +91,9 @@ class FreeAIChat(Provider):
|
|
|
83
91
|
proxies: dict = {},
|
|
84
92
|
history_offset: int = 10250,
|
|
85
93
|
act: str = None,
|
|
86
|
-
model: str = "GPT
|
|
94
|
+
model: str = "GPT 4o",
|
|
87
95
|
system_prompt: str = "You are a helpful AI assistant.",
|
|
96
|
+
temperature: float = 0.7,
|
|
88
97
|
):
|
|
89
98
|
"""Initializes the FreeAIChat API client."""
|
|
90
99
|
if model not in self.AVAILABLE_MODELS:
|
|
@@ -105,11 +114,13 @@ class FreeAIChat(Provider):
|
|
|
105
114
|
self.session.proxies.update(proxies)
|
|
106
115
|
|
|
107
116
|
self.is_conversation = is_conversation
|
|
108
|
-
self.
|
|
117
|
+
self.max_tokens = max_tokens
|
|
109
118
|
self.timeout = timeout
|
|
110
119
|
self.last_response = {}
|
|
111
120
|
self.model = model
|
|
112
121
|
self.system_prompt = system_prompt
|
|
122
|
+
self.temperature = temperature
|
|
123
|
+
self.api_key = api_key
|
|
113
124
|
|
|
114
125
|
self.__available_optimizers = (
|
|
115
126
|
method
|
|
@@ -125,10 +136,21 @@ class FreeAIChat(Provider):
|
|
|
125
136
|
)
|
|
126
137
|
|
|
127
138
|
self.conversation = Conversation(
|
|
128
|
-
is_conversation, self.
|
|
139
|
+
is_conversation, self.max_tokens, filepath, update_file
|
|
129
140
|
)
|
|
130
141
|
self.conversation.history_offset = history_offset
|
|
131
142
|
|
|
143
|
+
@staticmethod
|
|
144
|
+
def _extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
145
|
+
"""Extracts content from the x0gpt stream format '0:"..."'."""
|
|
146
|
+
if isinstance(chunk, str):
|
|
147
|
+
match = re.search(r'0:"(.*?)"', chunk)
|
|
148
|
+
if match:
|
|
149
|
+
# Decode potential unicode escapes like \u00e9
|
|
150
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
151
|
+
return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
|
|
152
|
+
return None
|
|
153
|
+
|
|
132
154
|
def ask(
|
|
133
155
|
self,
|
|
134
156
|
prompt: str,
|
|
@@ -146,24 +168,19 @@ class FreeAIChat(Provider):
|
|
|
146
168
|
else:
|
|
147
169
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
148
170
|
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
171
|
+
payload = {
|
|
172
|
+
"id": str(uuid.uuid4()),
|
|
173
|
+
"messages": [{
|
|
152
174
|
"role": "user",
|
|
153
175
|
"content": conversation_prompt,
|
|
154
|
-
"
|
|
155
|
-
|
|
156
|
-
"name": self.model,
|
|
157
|
-
# "icon": "https://cdn-avatars.huggingface.co/v1/production/uploads/1620805164087-5ec0135ded25d76864d553f1.png",
|
|
158
|
-
# "provider": "openAI",
|
|
159
|
-
# "contextWindow": 63920
|
|
160
|
-
}
|
|
161
|
-
}
|
|
162
|
-
]
|
|
163
|
-
|
|
164
|
-
payload = {
|
|
176
|
+
"parts": [{"type": "text", "text": conversation_prompt}]
|
|
177
|
+
}],
|
|
165
178
|
"model": self.model,
|
|
166
|
-
"
|
|
179
|
+
"config": {
|
|
180
|
+
"temperature": self.temperature,
|
|
181
|
+
"maxTokens": self.max_tokens
|
|
182
|
+
},
|
|
183
|
+
"apiKey": self.api_key
|
|
167
184
|
}
|
|
168
185
|
|
|
169
186
|
def for_stream():
|
|
@@ -174,28 +191,25 @@ class FreeAIChat(Provider):
|
|
|
174
191
|
f"Request failed with status code {response.status_code}"
|
|
175
192
|
)
|
|
176
193
|
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
198
|
-
|
|
194
|
+
streaming_response = ""
|
|
195
|
+
processed_stream = sanitize_stream(
|
|
196
|
+
data=response.iter_lines(decode_unicode=True),
|
|
197
|
+
intro_value=None,
|
|
198
|
+
to_json=False,
|
|
199
|
+
content_extractor=self._extractor,
|
|
200
|
+
skip_markers=None
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
for content_chunk in processed_stream:
|
|
204
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
205
|
+
streaming_response += content_chunk
|
|
206
|
+
yield dict(text=content_chunk) if raw else dict(text=content_chunk)
|
|
207
|
+
|
|
208
|
+
self.last_response.update(dict(text=streaming_response))
|
|
209
|
+
self.conversation.update_chat_history(
|
|
210
|
+
prompt, self.get_message(self.last_response)
|
|
211
|
+
)
|
|
212
|
+
|
|
199
213
|
except requests.RequestException as e:
|
|
200
214
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
201
215
|
|
|
@@ -268,4 +282,4 @@ if __name__ == "__main__":
|
|
|
268
282
|
display_text = "Empty or invalid response"
|
|
269
283
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
270
284
|
except Exception as e:
|
|
271
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
285
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/granite.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
from curl_cffi.requests import Session
|
|
2
2
|
from curl_cffi import CurlError
|
|
3
3
|
import json
|
|
4
|
-
from typing import Union, Any, Dict, Generator
|
|
4
|
+
from typing import Optional, Union, Any, Dict, Generator
|
|
5
5
|
|
|
6
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
7
7
|
from webscout.AIbase import Provider
|
|
8
8
|
from webscout import exceptions
|
|
9
9
|
from webscout.litagent import LitAgent as Lit
|
|
@@ -77,6 +77,13 @@ class IBMGranite(Provider):
|
|
|
77
77
|
self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
|
|
78
78
|
self.conversation.history_offset = history_offset
|
|
79
79
|
|
|
80
|
+
@staticmethod
|
|
81
|
+
def _granite_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
82
|
+
"""Extracts content from IBM Granite stream JSON lists [3, "text"]."""
|
|
83
|
+
if isinstance(chunk, list) and len(chunk) == 2 and chunk[0] == 3 and isinstance(chunk[1], str):
|
|
84
|
+
return chunk[1]
|
|
85
|
+
return None
|
|
86
|
+
|
|
80
87
|
def ask(
|
|
81
88
|
self,
|
|
82
89
|
prompt: str,
|
|
@@ -127,25 +134,21 @@ class IBMGranite(Provider):
|
|
|
127
134
|
)
|
|
128
135
|
response.raise_for_status() # Check for HTTP errors
|
|
129
136
|
|
|
130
|
-
#
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
# Skip unrecognized lines/formats
|
|
146
|
-
pass
|
|
147
|
-
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
148
|
-
continue # Ignore lines that are not valid JSON or cannot be decoded
|
|
137
|
+
# Use sanitize_stream
|
|
138
|
+
processed_stream = sanitize_stream(
|
|
139
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
140
|
+
intro_value=None, # No prefix
|
|
141
|
+
to_json=True, # Stream sends JSON lines (which are lists)
|
|
142
|
+
content_extractor=self._granite_extractor, # Use the specific extractor
|
|
143
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
for content_chunk in processed_stream:
|
|
147
|
+
# content_chunk is the string extracted by _granite_extractor
|
|
148
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
149
|
+
streaming_text += content_chunk
|
|
150
|
+
resp = dict(text=content_chunk)
|
|
151
|
+
yield resp if not raw else content_chunk
|
|
149
152
|
|
|
150
153
|
# Update history after stream finishes
|
|
151
154
|
self.last_response = dict(text=streaming_text)
|
webscout/Provider/hermes.py
CHANGED
|
@@ -4,7 +4,7 @@ import json
|
|
|
4
4
|
from typing import Union, Any, Dict, Generator, Optional
|
|
5
5
|
|
|
6
6
|
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
8
8
|
from webscout.AIutel import AwesomePrompts
|
|
9
9
|
from webscout.AIbase import Provider
|
|
10
10
|
from webscout import exceptions
|
|
@@ -102,6 +102,13 @@ class NousHermes(Provider):
|
|
|
102
102
|
print(f"Warning: Error loading cookies: {e}")
|
|
103
103
|
return None
|
|
104
104
|
|
|
105
|
+
@staticmethod
|
|
106
|
+
def _hermes_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
107
|
+
"""Extracts content from Hermes stream JSON objects."""
|
|
108
|
+
if isinstance(chunk, dict) and chunk.get('type') == 'llm_response':
|
|
109
|
+
return chunk.get('content')
|
|
110
|
+
return None
|
|
111
|
+
|
|
105
112
|
|
|
106
113
|
def ask(
|
|
107
114
|
self,
|
|
@@ -145,36 +152,36 @@ class NousHermes(Provider):
|
|
|
145
152
|
"top_p": self.top_p,
|
|
146
153
|
}
|
|
147
154
|
def for_stream():
|
|
148
|
-
|
|
155
|
+
streaming_text = "" # Initialize outside try block
|
|
149
156
|
try:
|
|
150
157
|
response = self.session.post(
|
|
151
158
|
self.api_endpoint,
|
|
152
159
|
json=payload,
|
|
153
160
|
stream=True,
|
|
154
161
|
timeout=self.timeout,
|
|
155
|
-
impersonate="chrome110"
|
|
162
|
+
impersonate="chrome110" # Keep impersonate
|
|
156
163
|
)
|
|
157
164
|
response.raise_for_status()
|
|
158
165
|
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
166
|
+
# Use sanitize_stream
|
|
167
|
+
processed_stream = sanitize_stream(
|
|
168
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
169
|
+
intro_value="data:",
|
|
170
|
+
to_json=True, # Stream sends JSON
|
|
171
|
+
content_extractor=self._hermes_extractor, # Use the specific extractor
|
|
172
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
for content_chunk in processed_stream:
|
|
176
|
+
# content_chunk is the string extracted by _hermes_extractor
|
|
177
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
178
|
+
streaming_text += content_chunk
|
|
179
|
+
resp = dict(text=content_chunk)
|
|
180
|
+
yield resp if not raw else content_chunk
|
|
174
181
|
|
|
175
|
-
self.last_response = dict(text=
|
|
182
|
+
self.last_response = dict(text=streaming_text) # Use streaming_text
|
|
176
183
|
self.conversation.update_chat_history(
|
|
177
|
-
prompt,
|
|
184
|
+
prompt, streaming_text # Use streaming_text
|
|
178
185
|
)
|
|
179
186
|
|
|
180
187
|
except CurlError as e:
|
webscout/Provider/learnfastai.py
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import json
|
|
3
|
-
from typing import Optional, Union, Generator
|
|
3
|
+
from typing import Any, Dict, Optional, Union, Generator
|
|
4
4
|
import uuid
|
|
5
5
|
from curl_cffi.requests import Session
|
|
6
6
|
from curl_cffi import CurlError
|
|
7
7
|
|
|
8
8
|
from webscout.AIutel import Optimizers
|
|
9
|
-
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
10
10
|
from webscout.AIutel import AwesomePrompts
|
|
11
11
|
from webscout.AIbase import Provider
|
|
12
12
|
from webscout import exceptions
|
|
@@ -79,6 +79,13 @@ class LearnFast(Provider):
|
|
|
79
79
|
)
|
|
80
80
|
self.conversation.history_offset = history_offset
|
|
81
81
|
|
|
82
|
+
@staticmethod
|
|
83
|
+
def _learnfast_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
84
|
+
"""Extracts message content from LearnFast stream JSON objects."""
|
|
85
|
+
if isinstance(chunk, dict) and chunk.get('code') == 200 and chunk.get('data'):
|
|
86
|
+
return chunk['data'].get('message')
|
|
87
|
+
return None
|
|
88
|
+
|
|
82
89
|
def generate_unique_id(self) -> str:
|
|
83
90
|
"""Generate a 32-character hexadecimal unique ID."""
|
|
84
91
|
return uuid.uuid4().hex
|
|
@@ -209,24 +216,22 @@ class LearnFast(Provider):
|
|
|
209
216
|
)
|
|
210
217
|
response.raise_for_status() # Check for HTTP errors
|
|
211
218
|
|
|
212
|
-
#
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
229
|
-
pass # Ignore lines that are not valid JSON or cannot be decoded
|
|
219
|
+
# Use sanitize_stream
|
|
220
|
+
processed_stream = sanitize_stream(
|
|
221
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
222
|
+
intro_value=None, # No prefix
|
|
223
|
+
to_json=True, # Stream sends JSON lines
|
|
224
|
+
skip_markers=["[DONE]"],
|
|
225
|
+
content_extractor=self._learnfast_extractor, # Use the specific extractor
|
|
226
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
for content_chunk in processed_stream:
|
|
230
|
+
# content_chunk is the string extracted by _learnfast_extractor
|
|
231
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
232
|
+
full_response += content_chunk
|
|
233
|
+
resp = {"text": content_chunk}
|
|
234
|
+
yield resp if not raw else content_chunk
|
|
230
235
|
|
|
231
236
|
# Update history after stream finishes
|
|
232
237
|
self.last_response = {"text": full_response}
|
webscout/Provider/llmchatco.py
CHANGED
|
@@ -5,7 +5,7 @@ import uuid
|
|
|
5
5
|
import re
|
|
6
6
|
from typing import Union, Any, Dict, Optional, Generator, List
|
|
7
7
|
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Optimizers, sanitize_stream # Import sanitize_stream
|
|
9
9
|
from webscout.AIutel import Conversation
|
|
10
10
|
from webscout.AIutel import AwesomePrompts
|
|
11
11
|
from webscout.AIbase import Provider
|
|
@@ -66,15 +66,15 @@ class LLMChatCo(Provider):
|
|
|
66
66
|
self.model = model
|
|
67
67
|
self.system_prompt = system_prompt
|
|
68
68
|
self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
|
|
69
|
-
|
|
69
|
+
|
|
70
70
|
# Create LitAgent instance (keep if needed for other headers)
|
|
71
71
|
lit_agent = Lit()
|
|
72
|
-
|
|
72
|
+
|
|
73
73
|
# Headers based on the provided request
|
|
74
74
|
self.headers = {
|
|
75
75
|
"Content-Type": "application/json",
|
|
76
76
|
"Accept": "text/event-stream",
|
|
77
|
-
"User-Agent": lit_agent.random(),
|
|
77
|
+
"User-Agent": lit_agent.random(),
|
|
78
78
|
"Accept-Language": "en-US,en;q=0.9",
|
|
79
79
|
"Origin": "https://llmchat.co",
|
|
80
80
|
"Referer": f"https://llmchat.co/chat/{self.thread_id}",
|
|
@@ -109,24 +109,16 @@ class LLMChatCo(Provider):
|
|
|
109
109
|
# Store message history for conversation context
|
|
110
110
|
self.last_assistant_response = ""
|
|
111
111
|
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
if data.startswith('data:'):
|
|
123
|
-
data_content = data[5:].strip()
|
|
124
|
-
if data_content:
|
|
125
|
-
try:
|
|
126
|
-
return {'data': json.loads(data_content)}
|
|
127
|
-
except json.JSONDecodeError:
|
|
128
|
-
return {'data': data_content}
|
|
129
|
-
|
|
112
|
+
@staticmethod
|
|
113
|
+
def _llmchatco_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
114
|
+
"""Extracts text content from LLMChat.co stream JSON objects."""
|
|
115
|
+
if isinstance(chunk, dict) and "answer" in chunk:
|
|
116
|
+
answer = chunk["answer"]
|
|
117
|
+
# Prefer fullText if available and status is COMPLETED
|
|
118
|
+
if answer.get("fullText") and answer.get("status") == "COMPLETED":
|
|
119
|
+
return answer["fullText"]
|
|
120
|
+
elif "text" in answer:
|
|
121
|
+
return answer["text"]
|
|
130
122
|
return None
|
|
131
123
|
|
|
132
124
|
def ask(
|
|
@@ -176,62 +168,40 @@ class LLMChatCo(Provider):
|
|
|
176
168
|
try:
|
|
177
169
|
# Use curl_cffi session post with impersonate
|
|
178
170
|
response = self.session.post(
|
|
179
|
-
self.api_endpoint,
|
|
180
|
-
json=payload,
|
|
171
|
+
self.api_endpoint,
|
|
172
|
+
json=payload,
|
|
181
173
|
# headers are set on the session
|
|
182
|
-
stream=True,
|
|
174
|
+
stream=True,
|
|
183
175
|
timeout=self.timeout,
|
|
184
176
|
# proxies are set on the session
|
|
185
177
|
impersonate="chrome110" # Use a common impersonation profile
|
|
186
178
|
)
|
|
187
179
|
response.raise_for_status() # Check for HTTP errors
|
|
188
|
-
|
|
189
|
-
#
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
if
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
if data_content and current_event == 'answer':
|
|
214
|
-
try:
|
|
215
|
-
json_data = json.loads(data_content)
|
|
216
|
-
if "answer" in json_data and "text" in json_data["answer"]:
|
|
217
|
-
text_chunk = json_data["answer"]["text"]
|
|
218
|
-
# If there's a fullText, use it as it's more complete
|
|
219
|
-
if json_data["answer"].get("fullText") and json_data["answer"].get("status") == "COMPLETED":
|
|
220
|
-
text_chunk = json_data["answer"]["fullText"]
|
|
221
|
-
|
|
222
|
-
# Extract only new content since last chunk
|
|
223
|
-
new_text = text_chunk[len(full_response):]
|
|
224
|
-
if new_text:
|
|
225
|
-
full_response = text_chunk # Update full response tracker
|
|
226
|
-
resp = dict(text=new_text)
|
|
227
|
-
# Yield dict or raw string chunk
|
|
228
|
-
yield resp if not raw else new_text
|
|
229
|
-
except json.JSONDecodeError:
|
|
230
|
-
continue # Ignore invalid JSON data
|
|
231
|
-
elif data_content and current_event == 'done':
|
|
232
|
-
# Handle potential final data before done event if needed
|
|
233
|
-
break # Exit loop on 'done' event
|
|
234
|
-
|
|
180
|
+
|
|
181
|
+
# Use sanitize_stream
|
|
182
|
+
# Note: This won't handle SSE 'event:' lines, only 'data:' lines.
|
|
183
|
+
# The original code checked for event == 'answer'. We assume relevant data is JSON after 'data:'.
|
|
184
|
+
processed_stream = sanitize_stream(
|
|
185
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
186
|
+
intro_value="data:",
|
|
187
|
+
to_json=True, # Stream sends JSON
|
|
188
|
+
content_extractor=self._llmchatco_extractor, # Use the specific extractor
|
|
189
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
last_yielded_text = ""
|
|
193
|
+
for current_full_text in processed_stream:
|
|
194
|
+
# current_full_text is the full text extracted by _llmchatco_extractor
|
|
195
|
+
if current_full_text and isinstance(current_full_text, str):
|
|
196
|
+
# Calculate the new part of the text
|
|
197
|
+
new_text = current_full_text[len(last_yielded_text):]
|
|
198
|
+
if new_text:
|
|
199
|
+
full_response = current_full_text # Keep track of the latest full text
|
|
200
|
+
last_yielded_text = current_full_text # Update tracker
|
|
201
|
+
resp = dict(text=new_text)
|
|
202
|
+
# Yield dict or raw string chunk
|
|
203
|
+
yield resp if not raw else new_text
|
|
204
|
+
|
|
235
205
|
# Update history after stream finishes
|
|
236
206
|
self.last_response = dict(text=full_response)
|
|
237
207
|
self.last_assistant_response = full_response
|
|
@@ -244,7 +214,7 @@ class LLMChatCo(Provider):
|
|
|
244
214
|
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
245
215
|
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
246
216
|
raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
247
|
-
|
|
217
|
+
|
|
248
218
|
def for_non_stream():
|
|
249
219
|
# Aggregate the stream using the updated for_stream logic
|
|
250
220
|
full_response_text = ""
|
|
@@ -261,7 +231,7 @@ class LLMChatCo(Provider):
|
|
|
261
231
|
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
262
232
|
if not full_response_text:
|
|
263
233
|
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
264
|
-
|
|
234
|
+
|
|
265
235
|
# last_response and history are updated within for_stream
|
|
266
236
|
# Return the final aggregated response dict or raw string
|
|
267
237
|
return full_response_text if raw else self.last_response
|
|
@@ -313,17 +283,17 @@ if __name__ == "__main__":
|
|
|
313
283
|
print("-" * 80)
|
|
314
284
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
315
285
|
print("-" * 80)
|
|
316
|
-
|
|
286
|
+
|
|
317
287
|
# Test all available models
|
|
318
288
|
working = 0
|
|
319
289
|
total = len(LLMChatCo.AVAILABLE_MODELS)
|
|
320
|
-
|
|
290
|
+
|
|
321
291
|
for model in LLMChatCo.AVAILABLE_MODELS:
|
|
322
292
|
try:
|
|
323
293
|
test_ai = LLMChatCo(model=model, timeout=60)
|
|
324
294
|
response = test_ai.chat("Say 'Hello' in one word")
|
|
325
295
|
response_text = response
|
|
326
|
-
|
|
296
|
+
|
|
327
297
|
if response_text and len(response_text.strip()) > 0:
|
|
328
298
|
status = "✓"
|
|
329
299
|
# Truncate response if too long
|
|
@@ -333,4 +303,4 @@ if __name__ == "__main__":
|
|
|
333
303
|
display_text = "Empty or invalid response"
|
|
334
304
|
print(f"{model:<50} {status:<10} {display_text}")
|
|
335
305
|
except Exception as e:
|
|
336
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
306
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/multichat.py
CHANGED
|
@@ -4,7 +4,7 @@ import json
|
|
|
4
4
|
import uuid
|
|
5
5
|
from typing import Any, Dict, Union
|
|
6
6
|
from datetime import datetime
|
|
7
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
10
|
from webscout.litagent import LitAgent
|
|
@@ -279,8 +279,18 @@ class MultiChatAI(Provider):
|
|
|
279
279
|
response = self._make_request(payload)
|
|
280
280
|
try:
|
|
281
281
|
# Use response.text which is already decoded
|
|
282
|
-
|
|
283
|
-
|
|
282
|
+
response_text_raw = response.text # Get raw text
|
|
283
|
+
|
|
284
|
+
# Process the text using sanitize_stream (even though it's not streaming)
|
|
285
|
+
processed_stream = sanitize_stream(
|
|
286
|
+
data=response_text_raw,
|
|
287
|
+
intro_value=None, # No prefix
|
|
288
|
+
to_json=False # It's plain text
|
|
289
|
+
)
|
|
290
|
+
# Aggregate the single result
|
|
291
|
+
full_response = "".join(list(processed_stream)).strip()
|
|
292
|
+
|
|
293
|
+
self.last_response = {"text": full_response} # Store processed text
|
|
284
294
|
self.conversation.update_chat_history(prompt, full_response)
|
|
285
295
|
# Return dict or raw string based on raw flag
|
|
286
296
|
return full_response if raw else self.last_response
|