webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -239
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
- webscout/Extra/autocoder/autocoder.py +309 -114
- webscout/Extra/autocoder/autocoder_utiles.py +15 -15
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/weather.md +281 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Provider/AISEARCH/DeepFind.py +41 -37
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +0 -1
- webscout/Provider/AISEARCH/genspark_search.py +228 -86
- webscout/Provider/AISEARCH/hika_search.py +11 -11
- webscout/Provider/AISEARCH/scira_search.py +324 -322
- webscout/Provider/AllenAI.py +7 -14
- webscout/Provider/Blackboxai.py +518 -74
- webscout/Provider/Cloudflare.py +0 -1
- webscout/Provider/Deepinfra.py +23 -21
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/GizAI.py +15 -5
- webscout/Provider/Glider.py +11 -8
- webscout/Provider/HeckAI.py +80 -52
- webscout/Provider/Koboldai.py +7 -4
- webscout/Provider/LambdaChat.py +2 -2
- webscout/Provider/Marcus.py +10 -18
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +8 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -286
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +217 -14
- webscout/Provider/OPENAI/c4ai.py +373 -367
- webscout/Provider/OPENAI/chatgpt.py +7 -0
- webscout/Provider/OPENAI/chatgptclone.py +7 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +30 -20
- webscout/Provider/OPENAI/e2b.py +6 -0
- webscout/Provider/OPENAI/exaai.py +7 -0
- webscout/Provider/OPENAI/exachat.py +6 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -352
- webscout/Provider/OPENAI/glider.py +323 -316
- webscout/Provider/OPENAI/groq.py +361 -354
- webscout/Provider/OPENAI/heckai.py +30 -64
- webscout/Provider/OPENAI/llmchatco.py +8 -0
- webscout/Provider/OPENAI/mcpcore.py +7 -0
- webscout/Provider/OPENAI/multichat.py +8 -0
- webscout/Provider/OPENAI/netwrck.py +356 -350
- webscout/Provider/OPENAI/opkfc.py +8 -0
- webscout/Provider/OPENAI/scirachat.py +471 -462
- webscout/Provider/OPENAI/sonus.py +9 -0
- webscout/Provider/OPENAI/standardinput.py +9 -1
- webscout/Provider/OPENAI/textpollinations.py +339 -329
- webscout/Provider/OPENAI/toolbaz.py +7 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -346
- webscout/Provider/OPENAI/uncovrAI.py +7 -0
- webscout/Provider/OPENAI/utils.py +103 -7
- webscout/Provider/OPENAI/venice.py +12 -0
- webscout/Provider/OPENAI/wisecat.py +19 -19
- webscout/Provider/OPENAI/writecream.py +7 -0
- webscout/Provider/OPENAI/x0gpt.py +7 -0
- webscout/Provider/OPENAI/yep.py +50 -21
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/speechma.py +500 -100
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TeachAnything.py +3 -7
- webscout/Provider/TextPollinationsAI.py +4 -2
- webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Writecream.py +11 -2
- webscout/Provider/__init__.py +8 -14
- webscout/Provider/ai4chat.py +4 -58
- webscout/Provider/asksteve.py +17 -9
- webscout/Provider/cerebras.py +3 -1
- webscout/Provider/koala.py +170 -268
- webscout/Provider/llmchat.py +3 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +7 -4
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +4 -2
- webscout/Provider/typefully.py +23 -151
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/scout/README.md +402 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +134 -54
- webscout/zeroart/base.py +19 -13
- webscout/zeroart/effects.py +101 -99
- webscout/zeroart/fonts.py +1239 -816
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.8.dist-info/entry_points.txt +3 -0
- webscout-8.2.8.dist-info/top_level.txt +1 -0
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/scout/core.py +0 -881
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -45,14 +45,16 @@ class Completions(BaseCompletions):
|
|
|
45
45
|
question = format_prompt(messages, add_special_tokens=True)
|
|
46
46
|
|
|
47
47
|
# Prepare the payload for HeckAI API
|
|
48
|
+
model = self._client.convert_model_name(model)
|
|
48
49
|
payload = {
|
|
49
50
|
"model": model,
|
|
50
51
|
"question": question,
|
|
51
52
|
"language": self._client.language,
|
|
52
53
|
"sessionId": self._client.session_id,
|
|
53
|
-
"previousQuestion": None,
|
|
54
|
-
"previousAnswer": None,
|
|
55
|
-
"imgUrls": []
|
|
54
|
+
"previousQuestion": None,
|
|
55
|
+
"previousAnswer": None,
|
|
56
|
+
"imgUrls": [],
|
|
57
|
+
"superSmartMode": False
|
|
56
58
|
}
|
|
57
59
|
|
|
58
60
|
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
@@ -76,69 +78,50 @@ class Completions(BaseCompletions):
|
|
|
76
78
|
)
|
|
77
79
|
response.raise_for_status()
|
|
78
80
|
|
|
79
|
-
|
|
80
|
-
completion_tokens = 0
|
|
81
|
-
|
|
82
|
-
streaming_text = ""
|
|
81
|
+
streaming_text = []
|
|
83
82
|
in_answer = False
|
|
84
83
|
|
|
85
84
|
for line in response.iter_lines(decode_unicode=True):
|
|
86
85
|
if not line:
|
|
87
86
|
continue
|
|
88
|
-
|
|
89
|
-
# Remove "data: " prefix
|
|
90
87
|
if line.startswith("data: "):
|
|
91
88
|
data = line[6:]
|
|
92
89
|
else:
|
|
93
90
|
continue
|
|
94
|
-
|
|
95
|
-
# Check for control markers
|
|
96
91
|
if data == "[ANSWER_START]":
|
|
97
92
|
in_answer = True
|
|
98
93
|
continue
|
|
99
|
-
|
|
100
94
|
if data == "[ANSWER_DONE]":
|
|
101
95
|
in_answer = False
|
|
102
96
|
continue
|
|
103
|
-
|
|
104
|
-
if data == "[RELATE_Q_START]" or data == "[RELATE_Q_DONE]":
|
|
97
|
+
if data.startswith("[") and data.endswith("]"):
|
|
105
98
|
continue
|
|
106
|
-
|
|
107
|
-
# Process content if we're in an answer section
|
|
108
99
|
if in_answer:
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
100
|
+
# Fix encoding issues (e.g., emoji) for each chunk
|
|
101
|
+
try:
|
|
102
|
+
data_fixed = data.encode('latin1').decode('utf-8')
|
|
103
|
+
except (UnicodeEncodeError, UnicodeDecodeError):
|
|
104
|
+
data_fixed = data
|
|
105
|
+
streaming_text.append(data_fixed)
|
|
106
|
+
delta = ChoiceDelta(content=data_fixed)
|
|
114
107
|
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
115
|
-
|
|
116
108
|
chunk = ChatCompletionChunk(
|
|
117
109
|
id=request_id,
|
|
118
110
|
choices=[choice],
|
|
119
111
|
created=created_time,
|
|
120
112
|
model=model,
|
|
121
113
|
)
|
|
122
|
-
|
|
123
114
|
yield chunk
|
|
124
|
-
|
|
125
|
-
# Store the response for future context
|
|
126
|
-
# We don't need to store previous_question/answer as we're using format_prompt
|
|
127
|
-
# which handles the conversation formatting
|
|
128
|
-
|
|
129
115
|
# Final chunk with finish_reason
|
|
130
116
|
delta = ChoiceDelta(content=None)
|
|
131
117
|
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
132
|
-
|
|
133
118
|
chunk = ChatCompletionChunk(
|
|
134
119
|
id=request_id,
|
|
135
120
|
choices=[choice],
|
|
136
121
|
created=created_time,
|
|
137
122
|
model=model,
|
|
138
123
|
)
|
|
139
|
-
|
|
140
124
|
yield chunk
|
|
141
|
-
|
|
142
125
|
except requests.exceptions.RequestException as e:
|
|
143
126
|
print(f"{RED}Error during HeckAI stream request: {e}{RESET}")
|
|
144
127
|
raise IOError(f"HeckAI request failed: {e}") from e
|
|
@@ -147,10 +130,8 @@ class Completions(BaseCompletions):
|
|
|
147
130
|
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
148
131
|
) -> ChatCompletion:
|
|
149
132
|
try:
|
|
150
|
-
|
|
151
|
-
streaming_text = ""
|
|
133
|
+
answer_lines = []
|
|
152
134
|
in_answer = False
|
|
153
|
-
|
|
154
135
|
response = self._client.session.post(
|
|
155
136
|
self._client.url,
|
|
156
137
|
headers=self._client.headers,
|
|
@@ -159,64 +140,45 @@ class Completions(BaseCompletions):
|
|
|
159
140
|
timeout=self._client.timeout
|
|
160
141
|
)
|
|
161
142
|
response.raise_for_status()
|
|
162
|
-
|
|
163
143
|
for line in response.iter_lines(decode_unicode=True):
|
|
164
144
|
if not line:
|
|
165
145
|
continue
|
|
166
|
-
|
|
167
|
-
# Remove "data: " prefix
|
|
168
146
|
if line.startswith("data: "):
|
|
169
147
|
data = line[6:]
|
|
170
148
|
else:
|
|
171
149
|
continue
|
|
172
|
-
|
|
173
|
-
# Check for control markers
|
|
174
150
|
if data == "[ANSWER_START]":
|
|
175
151
|
in_answer = True
|
|
176
152
|
continue
|
|
177
|
-
|
|
178
153
|
if data == "[ANSWER_DONE]":
|
|
179
154
|
in_answer = False
|
|
180
155
|
continue
|
|
181
|
-
|
|
182
|
-
if data == "[RELATE_Q_START]" or data == "[RELATE_Q_DONE]":
|
|
156
|
+
if data.startswith("[") and data.endswith("]"):
|
|
183
157
|
continue
|
|
184
|
-
|
|
185
|
-
# Process content if we're in an answer section
|
|
186
158
|
if in_answer:
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
# Create usage statistics (estimated)
|
|
159
|
+
answer_lines.append(data)
|
|
160
|
+
full_text = " ".join(x.strip() for x in answer_lines if x.strip())
|
|
161
|
+
# Fix encoding issues (e.g., emoji)
|
|
162
|
+
try:
|
|
163
|
+
full_text = full_text.encode('latin1').decode('utf-8')
|
|
164
|
+
except (UnicodeEncodeError, UnicodeDecodeError):
|
|
165
|
+
pass
|
|
196
166
|
prompt_tokens = len(payload["question"]) // 4
|
|
197
167
|
completion_tokens = len(full_text) // 4
|
|
198
168
|
total_tokens = prompt_tokens + completion_tokens
|
|
199
|
-
|
|
200
169
|
usage = CompletionUsage(
|
|
201
170
|
prompt_tokens=prompt_tokens,
|
|
202
171
|
completion_tokens=completion_tokens,
|
|
203
172
|
total_tokens=total_tokens
|
|
204
173
|
)
|
|
205
|
-
|
|
206
|
-
# Create the message object
|
|
207
174
|
message = ChatCompletionMessage(
|
|
208
175
|
role="assistant",
|
|
209
|
-
content=full_text
|
|
210
|
-
)
|
|
211
|
-
|
|
212
|
-
# Create the choice object
|
|
176
|
+
content=full_text)
|
|
213
177
|
choice = Choice(
|
|
214
178
|
index=0,
|
|
215
179
|
message=message,
|
|
216
180
|
finish_reason="stop"
|
|
217
181
|
)
|
|
218
|
-
|
|
219
|
-
# Create the completion object
|
|
220
182
|
completion = ChatCompletion(
|
|
221
183
|
id=request_id,
|
|
222
184
|
choices=[choice],
|
|
@@ -224,9 +186,7 @@ class Completions(BaseCompletions):
|
|
|
224
186
|
model=model,
|
|
225
187
|
usage=usage,
|
|
226
188
|
)
|
|
227
|
-
|
|
228
189
|
return completion
|
|
229
|
-
|
|
230
190
|
except Exception as e:
|
|
231
191
|
print(f"{RED}Error during HeckAI non-stream request: {e}{RESET}")
|
|
232
192
|
raise IOError(f"HeckAI request failed: {e}") from e
|
|
@@ -308,6 +268,12 @@ class HeckAI(OpenAICompatibleProvider):
|
|
|
308
268
|
print(f"{BOLD}Warning: Model '{model}' not found, using default model 'google/gemini-2.0-flash-001'{RESET}")
|
|
309
269
|
return "google/gemini-2.0-flash-001"
|
|
310
270
|
|
|
271
|
+
@property
|
|
272
|
+
def models(self):
|
|
273
|
+
class _ModelList:
|
|
274
|
+
def list(inner_self):
|
|
275
|
+
return type(self).AVAILABLE_MODELS
|
|
276
|
+
return _ModelList()
|
|
311
277
|
|
|
312
278
|
# Simple test if run directly
|
|
313
279
|
if __name__ == "__main__":
|
|
@@ -325,3 +325,11 @@ class LLMChatCo(OpenAICompatibleProvider):
|
|
|
325
325
|
|
|
326
326
|
# Initialize the chat interface
|
|
327
327
|
self.chat = Chat(self)
|
|
328
|
+
|
|
329
|
+
@property
|
|
330
|
+
def models(self):
|
|
331
|
+
class _ModelList:
|
|
332
|
+
def list(inner_self):
|
|
333
|
+
return type(self).AVAILABLE_MODELS
|
|
334
|
+
return _ModelList()
|
|
335
|
+
|
|
@@ -374,3 +374,10 @@ class MCPCore(OpenAICompatibleProvider):
|
|
|
374
374
|
print(f"{RED}An unexpected error occurred loading cookies: {e}{RESET}")
|
|
375
375
|
return None
|
|
376
376
|
|
|
377
|
+
@property
|
|
378
|
+
def models(self):
|
|
379
|
+
class _ModelList:
|
|
380
|
+
def list(inner_self):
|
|
381
|
+
return type(self).AVAILABLE_MODELS
|
|
382
|
+
return _ModelList()
|
|
383
|
+
|
|
@@ -276,6 +276,14 @@ class MultiChatAI(OpenAICompatibleProvider):
|
|
|
276
276
|
# Initialize the chat interface
|
|
277
277
|
self.chat = Chat(self)
|
|
278
278
|
|
|
279
|
+
@property
|
|
280
|
+
def models(self):
|
|
281
|
+
class _ModelList:
|
|
282
|
+
def list(inner_self):
|
|
283
|
+
return type(self).AVAILABLE_MODELS
|
|
284
|
+
return _ModelList()
|
|
285
|
+
|
|
286
|
+
|
|
279
287
|
def _get_endpoint(self) -> str:
|
|
280
288
|
"""Get the API endpoint for the current provider."""
|
|
281
289
|
return MODEL_CONFIGS[self.provider]["endpoint"]
|