webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -239
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
- webscout/Extra/autocoder/autocoder.py +309 -114
- webscout/Extra/autocoder/autocoder_utiles.py +15 -15
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/weather.md +281 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Provider/AISEARCH/DeepFind.py +41 -37
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +0 -1
- webscout/Provider/AISEARCH/genspark_search.py +228 -86
- webscout/Provider/AISEARCH/hika_search.py +11 -11
- webscout/Provider/AISEARCH/scira_search.py +324 -322
- webscout/Provider/AllenAI.py +7 -14
- webscout/Provider/Blackboxai.py +518 -74
- webscout/Provider/Cloudflare.py +0 -1
- webscout/Provider/Deepinfra.py +23 -21
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/GizAI.py +15 -5
- webscout/Provider/Glider.py +11 -8
- webscout/Provider/HeckAI.py +80 -52
- webscout/Provider/Koboldai.py +7 -4
- webscout/Provider/LambdaChat.py +2 -2
- webscout/Provider/Marcus.py +10 -18
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +8 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -286
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +217 -14
- webscout/Provider/OPENAI/c4ai.py +373 -367
- webscout/Provider/OPENAI/chatgpt.py +7 -0
- webscout/Provider/OPENAI/chatgptclone.py +7 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +30 -20
- webscout/Provider/OPENAI/e2b.py +6 -0
- webscout/Provider/OPENAI/exaai.py +7 -0
- webscout/Provider/OPENAI/exachat.py +6 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -352
- webscout/Provider/OPENAI/glider.py +323 -316
- webscout/Provider/OPENAI/groq.py +361 -354
- webscout/Provider/OPENAI/heckai.py +30 -64
- webscout/Provider/OPENAI/llmchatco.py +8 -0
- webscout/Provider/OPENAI/mcpcore.py +7 -0
- webscout/Provider/OPENAI/multichat.py +8 -0
- webscout/Provider/OPENAI/netwrck.py +356 -350
- webscout/Provider/OPENAI/opkfc.py +8 -0
- webscout/Provider/OPENAI/scirachat.py +471 -462
- webscout/Provider/OPENAI/sonus.py +9 -0
- webscout/Provider/OPENAI/standardinput.py +9 -1
- webscout/Provider/OPENAI/textpollinations.py +339 -329
- webscout/Provider/OPENAI/toolbaz.py +7 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -346
- webscout/Provider/OPENAI/uncovrAI.py +7 -0
- webscout/Provider/OPENAI/utils.py +103 -7
- webscout/Provider/OPENAI/venice.py +12 -0
- webscout/Provider/OPENAI/wisecat.py +19 -19
- webscout/Provider/OPENAI/writecream.py +7 -0
- webscout/Provider/OPENAI/x0gpt.py +7 -0
- webscout/Provider/OPENAI/yep.py +50 -21
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/speechma.py +500 -100
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TeachAnything.py +3 -7
- webscout/Provider/TextPollinationsAI.py +4 -2
- webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Writecream.py +11 -2
- webscout/Provider/__init__.py +8 -14
- webscout/Provider/ai4chat.py +4 -58
- webscout/Provider/asksteve.py +17 -9
- webscout/Provider/cerebras.py +3 -1
- webscout/Provider/koala.py +170 -268
- webscout/Provider/llmchat.py +3 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +7 -4
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +4 -2
- webscout/Provider/typefully.py +23 -151
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/scout/README.md +402 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +134 -54
- webscout/zeroart/base.py +19 -13
- webscout/zeroart/effects.py +101 -99
- webscout/zeroart/fonts.py +1239 -816
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.8.dist-info/entry_points.txt +3 -0
- webscout-8.2.8.dist-info/top_level.txt +1 -0
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/scout/core.py +0 -881
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/HeckAI.py
CHANGED
|
@@ -2,7 +2,6 @@ from curl_cffi.requests import Session
|
|
|
2
2
|
from curl_cffi import CurlError
|
|
3
3
|
import json
|
|
4
4
|
import uuid
|
|
5
|
-
import sys
|
|
6
5
|
from typing import Any, Dict, Optional, Generator, Union
|
|
7
6
|
|
|
8
7
|
from webscout.AIutel import Optimizers
|
|
@@ -25,7 +24,6 @@ class HeckAI(Provider):
|
|
|
25
24
|
"openai/gpt-4.1-mini",
|
|
26
25
|
"x-ai/grok-3-mini-beta",
|
|
27
26
|
"meta-llama/llama-4-scout"
|
|
28
|
-
|
|
29
27
|
]
|
|
30
28
|
|
|
31
29
|
def __init__(
|
|
@@ -112,7 +110,8 @@ class HeckAI(Provider):
|
|
|
112
110
|
"sessionId": self.session_id,
|
|
113
111
|
"previousQuestion": self.previous_question,
|
|
114
112
|
"previousAnswer": self.previous_answer,
|
|
115
|
-
"imgUrls": []
|
|
113
|
+
"imgUrls": [],
|
|
114
|
+
"superSmartMode": False # Added based on API request data
|
|
116
115
|
}
|
|
117
116
|
|
|
118
117
|
# Store this message as previous for next request
|
|
@@ -120,7 +119,6 @@ class HeckAI(Provider):
|
|
|
120
119
|
|
|
121
120
|
def for_stream():
|
|
122
121
|
streaming_text = "" # Initialize outside try block
|
|
123
|
-
# in_answer = False # No longer needed
|
|
124
122
|
try:
|
|
125
123
|
# Use curl_cffi session post with impersonate
|
|
126
124
|
response = self.session.post(
|
|
@@ -134,15 +132,16 @@ class HeckAI(Provider):
|
|
|
134
132
|
response.raise_for_status() # Check for HTTP errors
|
|
135
133
|
|
|
136
134
|
# Use sanitize_stream to process the stream
|
|
137
|
-
processed_stream = sanitize_stream(
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
135
|
+
processed_stream = sanitize_stream(
|
|
136
|
+
data=response.iter_content(chunk_size=1024), # Pass byte iterator
|
|
137
|
+
intro_value="data: ", # Prefix to remove (note the space)
|
|
138
|
+
to_json=False, # Content is text
|
|
139
|
+
start_marker="data: [ANSWER_START]",
|
|
140
|
+
end_marker="data: [ANSWER_DONE]",
|
|
141
|
+
skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
|
|
142
|
+
yield_raw_on_error=True,
|
|
143
|
+
strip_chars=" \n\r\t" # Strip whitespace characters from chunks
|
|
144
|
+
)
|
|
146
145
|
|
|
147
146
|
for content_chunk in processed_stream:
|
|
148
147
|
# content_chunk is the text between ANSWER_START and ANSWER_DONE
|
|
@@ -150,9 +149,21 @@ class HeckAI(Provider):
|
|
|
150
149
|
streaming_text += content_chunk
|
|
151
150
|
yield dict(text=content_chunk) if not raw else content_chunk
|
|
152
151
|
|
|
153
|
-
#
|
|
154
|
-
|
|
155
|
-
|
|
152
|
+
# Only update history if we received a valid response
|
|
153
|
+
if streaming_text:
|
|
154
|
+
# Update history and previous answer after stream finishes
|
|
155
|
+
self.previous_answer = streaming_text
|
|
156
|
+
# Convert to simple text before updating conversation
|
|
157
|
+
try:
|
|
158
|
+
# Ensure content is valid before updating conversation
|
|
159
|
+
if streaming_text and isinstance(streaming_text, str):
|
|
160
|
+
# Sanitize the content to ensure it's valid
|
|
161
|
+
sanitized_text = streaming_text.strip()
|
|
162
|
+
if sanitized_text: # Only update if we have non-empty content
|
|
163
|
+
self.conversation.update_chat_history(prompt, sanitized_text)
|
|
164
|
+
except Exception as e:
|
|
165
|
+
# If conversation update fails, log but don't crash
|
|
166
|
+
print(f"Warning: Failed to update conversation history: {str(e)}")
|
|
156
167
|
|
|
157
168
|
except CurlError as e: # Catch CurlError
|
|
158
169
|
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
@@ -177,7 +188,6 @@ class HeckAI(Provider):
|
|
|
177
188
|
if not full_text:
|
|
178
189
|
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
179
190
|
|
|
180
|
-
# last_response and history are updated within for_stream
|
|
181
191
|
# Return the final aggregated response dict or raw string
|
|
182
192
|
self.last_response = {"text": full_text} # Update last_response here
|
|
183
193
|
return full_text if raw else self.last_response
|
|
@@ -200,13 +210,13 @@ class HeckAI(Provider):
|
|
|
200
210
|
return text
|
|
201
211
|
return text
|
|
202
212
|
|
|
203
|
-
def chat(
|
|
204
|
-
self,
|
|
205
|
-
prompt: str,
|
|
206
|
-
stream: bool = False,
|
|
207
|
-
optimizer: str = None,
|
|
208
|
-
conversationally: bool = False,
|
|
209
|
-
) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
|
|
213
|
+
def chat(
|
|
214
|
+
self,
|
|
215
|
+
prompt: str,
|
|
216
|
+
stream: bool = False,
|
|
217
|
+
optimizer: str = None,
|
|
218
|
+
conversationally: bool = False,
|
|
219
|
+
) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
|
|
210
220
|
def for_stream_chat():
|
|
211
221
|
# ask() yields dicts or strings when streaming
|
|
212
222
|
gen = self.ask(
|
|
@@ -226,32 +236,50 @@ class HeckAI(Provider):
|
|
|
226
236
|
|
|
227
237
|
return for_stream_chat() if stream else for_non_stream_chat()
|
|
228
238
|
|
|
229
|
-
def get_message(self, response: dict) -> str:
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
239
|
+
def get_message(self, response: dict) -> str:
|
|
240
|
+
# Validate response format
|
|
241
|
+
if not isinstance(response, dict):
|
|
242
|
+
raise TypeError(f"Expected dict response, got {type(response).__name__}")
|
|
243
|
+
|
|
244
|
+
# Handle missing text key gracefully
|
|
245
|
+
if "text" not in response:
|
|
246
|
+
return ""
|
|
247
|
+
|
|
248
|
+
# Ensure text is a string
|
|
249
|
+
text = response["text"]
|
|
250
|
+
if not isinstance(text, str):
|
|
251
|
+
return str(text)
|
|
252
|
+
|
|
253
|
+
return text
|
|
238
254
|
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
255
|
+
if __name__ == "__main__":
|
|
256
|
+
# Ensure curl_cffi is installed
|
|
257
|
+
print("-" * 80)
|
|
258
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
259
|
+
print("-" * 80)
|
|
260
|
+
|
|
261
|
+
for model in HeckAI.AVAILABLE_MODELS:
|
|
262
|
+
try:
|
|
263
|
+
test_ai = HeckAI(model=model, timeout=60)
|
|
264
|
+
# Use non-streaming mode first to avoid potential streaming issues
|
|
265
|
+
try:
|
|
266
|
+
response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
|
|
267
|
+
print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
|
|
268
|
+
except Exception as e1:
|
|
269
|
+
# Fall back to streaming if non-streaming fails
|
|
270
|
+
print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
|
|
271
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
272
|
+
response_text = ""
|
|
273
|
+
for chunk in response:
|
|
274
|
+
if chunk and isinstance(chunk, str):
|
|
275
|
+
response_text += chunk
|
|
276
|
+
|
|
277
|
+
if response_text and len(response_text.strip()) > 0:
|
|
278
|
+
status = "✓"
|
|
279
|
+
# Truncate response if too long
|
|
280
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
281
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
282
|
+
else:
|
|
283
|
+
raise ValueError("Empty or invalid response")
|
|
284
|
+
except Exception as e:
|
|
285
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Koboldai.py
CHANGED
|
@@ -125,6 +125,7 @@ class KOBOLDAI(Provider):
|
|
|
125
125
|
)
|
|
126
126
|
|
|
127
127
|
message_load = ""
|
|
128
|
+
final_resp = None
|
|
128
129
|
for value in response.iter_lines(
|
|
129
130
|
decode_unicode=True,
|
|
130
131
|
delimiter="" if raw else "event: message\ndata:",
|
|
@@ -135,12 +136,14 @@ class KOBOLDAI(Provider):
|
|
|
135
136
|
message_load += self.get_message(resp)
|
|
136
137
|
resp["token"] = message_load
|
|
137
138
|
self.last_response.update(resp)
|
|
138
|
-
|
|
139
|
+
final_resp = resp # Always keep the latest
|
|
139
140
|
except json.decoder.JSONDecodeError:
|
|
140
141
|
pass
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
142
|
+
if final_resp:
|
|
143
|
+
yield final_resp if not raw else json.dumps(final_resp)
|
|
144
|
+
self.conversation.update_chat_history(
|
|
145
|
+
prompt, self.get_message(self.last_response)
|
|
146
|
+
)
|
|
144
147
|
|
|
145
148
|
def for_non_stream():
|
|
146
149
|
# let's make use of stream
|
webscout/Provider/LambdaChat.py
CHANGED
|
@@ -191,8 +191,8 @@ class LambdaChat(Provider):
|
|
|
191
191
|
reasoning_text = ""
|
|
192
192
|
if chunk["type"] == "stream" and "token" in chunk:
|
|
193
193
|
return chunk["token"].replace("\u0000", "")
|
|
194
|
-
elif chunk["type"] == "finalAnswer":
|
|
195
|
-
|
|
194
|
+
# elif chunk["type"] == "finalAnswer":
|
|
195
|
+
# return chunk.get("text")
|
|
196
196
|
elif chunk["type"] == "reasoning" and chunk.get("subtype") == "stream" and "token" in chunk:
|
|
197
197
|
# Prepend reasoning with <think> tags? Or handle separately? For now, just return token.
|
|
198
198
|
return chunk["token"] # Or potentially format as f"<think>{chunk['token']}</think>"
|
webscout/Provider/Marcus.py
CHANGED
|
@@ -173,24 +173,16 @@ class Marcus(Provider):
|
|
|
173
173
|
conversationally: bool = False,
|
|
174
174
|
) -> Union[str, Generator[str, None, None]]:
|
|
175
175
|
"""Generates a response from the AskMarcus API."""
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
)
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
# ask() returns dict or str when not streaming
|
|
187
|
-
response_data = self.ask(
|
|
188
|
-
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
189
|
-
optimizer=optimizer, conversationally=conversationally
|
|
190
|
-
)
|
|
191
|
-
return self.get_message(response_data) # get_message expects dict
|
|
192
|
-
|
|
193
|
-
return for_stream_chat() if stream else for_non_stream_chat()
|
|
176
|
+
response_data = self.ask(
|
|
177
|
+
prompt, stream=False, raw=False, # Always get the full response
|
|
178
|
+
optimizer=optimizer, conversationally=conversationally
|
|
179
|
+
)
|
|
180
|
+
if stream:
|
|
181
|
+
def stream_wrapper():
|
|
182
|
+
yield self.get_message(response_data)
|
|
183
|
+
return stream_wrapper()
|
|
184
|
+
else:
|
|
185
|
+
return self.get_message(response_data)
|
|
194
186
|
|
|
195
187
|
def get_message(self, response: Dict[str, Any]) -> str:
|
|
196
188
|
"""Extracts the message from the API response."""
|