webscout 8.3.4__py3-none-any.whl → 8.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +52 -1016
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/AISEARCH/PERPLEXED_search.py +214 -0
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +2 -0
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +13 -1
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/GithubChat.py +1 -0
- webscout/Provider/GptOss.py +207 -0
- webscout/Provider/Kimi.py +445 -0
- webscout/Provider/Netwrck.py +3 -6
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +12 -8
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +4 -4
- webscout/Provider/OPENAI/copilot.py +20 -4
- webscout/Provider/OPENAI/deepinfra.py +12 -0
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/gptoss.py +288 -0
- webscout/Provider/OPENAI/kimi.py +469 -0
- webscout/Provider/OPENAI/netwrck.py +8 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/scirachat.py +4 -0
- webscout/Provider/OPENAI/textpollinations.py +11 -10
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +30 -6
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +11 -9
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +0 -1
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TextPollinationsAI.py +11 -10
- webscout/Provider/TogetherAI.py +12 -4
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +2 -96
- webscout/Provider/cerebras.py +83 -33
- webscout/Provider/copilot.py +42 -23
- webscout/Provider/scira_chat.py +4 -0
- webscout/Provider/toolbaz.py +6 -10
- webscout/Provider/typefully.py +1 -11
- webscout/__init__.py +3 -15
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +99 -2
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/conversation.py +22 -20
- webscout/sanitize.py +1078 -0
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/METADATA +10 -149
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/RECORD +88 -87
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
from regex import R
|
|
2
|
+
import requests
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import uuid
|
|
6
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
|
+
|
|
8
|
+
from webscout.Provider.Deepinfra import DeepInfra
|
|
9
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
+
from webscout.Provider.OPENAI.utils import (
|
|
11
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
+
ChatCompletionMessage, CompletionUsage
|
|
13
|
+
)
|
|
14
|
+
try:
|
|
15
|
+
from .generate_api_key import generate_full_api_key
|
|
16
|
+
except ImportError:
|
|
17
|
+
# Fallback: define the function inline if import fails
|
|
18
|
+
import random
|
|
19
|
+
import string
|
|
20
|
+
|
|
21
|
+
def generate_api_key_suffix(length: int = 4) -> str:
|
|
22
|
+
"""Generate a random API key suffix like 'C1Z5'"""
|
|
23
|
+
chars = string.ascii_uppercase + string.digits
|
|
24
|
+
return ''.join(random.choice(chars) for _ in range(length))
|
|
25
|
+
|
|
26
|
+
def generate_full_api_key(prefix: str = "EU1CW20nX5oau42xBSgm") -> str:
|
|
27
|
+
"""Generate a full API key with a random suffix"""
|
|
28
|
+
suffix = generate_api_key_suffix(4)
|
|
29
|
+
return prefix + suffix
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
from webscout.litagent import LitAgent
|
|
33
|
+
except ImportError:
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
class Completions(BaseCompletions):
|
|
37
|
+
def __init__(self, client: 'Refact'):
|
|
38
|
+
self._client = client
|
|
39
|
+
|
|
40
|
+
def create(
|
|
41
|
+
self,
|
|
42
|
+
*,
|
|
43
|
+
model: str,
|
|
44
|
+
messages: List[Dict[str, str]],
|
|
45
|
+
max_tokens: Optional[int] = 2049,
|
|
46
|
+
stream: bool = False,
|
|
47
|
+
temperature: Optional[float] = None,
|
|
48
|
+
top_p: Optional[float] = None,
|
|
49
|
+
timeout: Optional[int] = None,
|
|
50
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
51
|
+
**kwargs: Any
|
|
52
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
53
|
+
payload = {
|
|
54
|
+
"model": model,
|
|
55
|
+
"messages": messages,
|
|
56
|
+
"max_tokens": max_tokens,
|
|
57
|
+
"stream": stream,
|
|
58
|
+
}
|
|
59
|
+
if temperature is not None:
|
|
60
|
+
payload["temperature"] = temperature
|
|
61
|
+
if top_p is not None:
|
|
62
|
+
payload["top_p"] = top_p
|
|
63
|
+
payload.update(kwargs)
|
|
64
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
65
|
+
created_time = int(time.time())
|
|
66
|
+
if stream:
|
|
67
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
68
|
+
else:
|
|
69
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
70
|
+
|
|
71
|
+
def _create_stream(
|
|
72
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
73
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
74
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
75
|
+
try:
|
|
76
|
+
response = self._client.session.post(
|
|
77
|
+
self._client.base_url,
|
|
78
|
+
headers=self._client.headers,
|
|
79
|
+
json=payload,
|
|
80
|
+
stream=True,
|
|
81
|
+
timeout=timeout or self._client.timeout,
|
|
82
|
+
proxies=proxies
|
|
83
|
+
)
|
|
84
|
+
response.raise_for_status()
|
|
85
|
+
prompt_tokens = 0
|
|
86
|
+
completion_tokens = 0
|
|
87
|
+
total_tokens = 0
|
|
88
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
89
|
+
if line:
|
|
90
|
+
if line.startswith("data: "):
|
|
91
|
+
json_str = line[6:]
|
|
92
|
+
if json_str == "[DONE]":
|
|
93
|
+
break
|
|
94
|
+
try:
|
|
95
|
+
data = json.loads(json_str)
|
|
96
|
+
choice_data = data.get('choices', [{}])[0]
|
|
97
|
+
delta_data = choice_data.get('delta', {})
|
|
98
|
+
finish_reason = choice_data.get('finish_reason')
|
|
99
|
+
usage_data = data.get('usage', {})
|
|
100
|
+
if usage_data:
|
|
101
|
+
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
102
|
+
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
103
|
+
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
104
|
+
if delta_data.get('content'):
|
|
105
|
+
completion_tokens += 1
|
|
106
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
107
|
+
delta = ChoiceDelta(
|
|
108
|
+
content=delta_data.get('content'),
|
|
109
|
+
role=delta_data.get('role'),
|
|
110
|
+
tool_calls=delta_data.get('tool_calls')
|
|
111
|
+
)
|
|
112
|
+
choice = Choice(
|
|
113
|
+
index=choice_data.get('index', 0),
|
|
114
|
+
delta=delta,
|
|
115
|
+
finish_reason=finish_reason,
|
|
116
|
+
logprobs=choice_data.get('logprobs')
|
|
117
|
+
)
|
|
118
|
+
chunk = ChatCompletionChunk(
|
|
119
|
+
id=request_id,
|
|
120
|
+
choices=[choice],
|
|
121
|
+
created=created_time,
|
|
122
|
+
model=model,
|
|
123
|
+
system_fingerprint=data.get('system_fingerprint')
|
|
124
|
+
)
|
|
125
|
+
chunk.usage = {
|
|
126
|
+
"prompt_tokens": prompt_tokens,
|
|
127
|
+
"completion_tokens": completion_tokens,
|
|
128
|
+
"total_tokens": total_tokens,
|
|
129
|
+
"estimated_cost": None
|
|
130
|
+
}
|
|
131
|
+
yield chunk
|
|
132
|
+
except json.JSONDecodeError:
|
|
133
|
+
continue
|
|
134
|
+
# Final chunk with finish_reason="stop"
|
|
135
|
+
delta = ChoiceDelta(content=None, role=None, tool_calls=None)
|
|
136
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
|
|
137
|
+
chunk = ChatCompletionChunk(
|
|
138
|
+
id=request_id,
|
|
139
|
+
choices=[choice],
|
|
140
|
+
created=created_time,
|
|
141
|
+
model=model,
|
|
142
|
+
system_fingerprint=None
|
|
143
|
+
)
|
|
144
|
+
chunk.usage = {
|
|
145
|
+
"prompt_tokens": prompt_tokens,
|
|
146
|
+
"completion_tokens": completion_tokens,
|
|
147
|
+
"total_tokens": total_tokens,
|
|
148
|
+
"estimated_cost": None
|
|
149
|
+
}
|
|
150
|
+
yield chunk
|
|
151
|
+
except Exception as e:
|
|
152
|
+
print(f"Error during Refact stream request: {e}")
|
|
153
|
+
raise IOError(f"Refact request failed: {e}") from e
|
|
154
|
+
|
|
155
|
+
def _create_non_stream(
|
|
156
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
157
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
158
|
+
) -> ChatCompletion:
|
|
159
|
+
try:
|
|
160
|
+
response = self._client.session.post(
|
|
161
|
+
self._client.base_url,
|
|
162
|
+
headers=self._client.headers,
|
|
163
|
+
json=payload,
|
|
164
|
+
timeout=timeout or self._client.timeout,
|
|
165
|
+
proxies=proxies
|
|
166
|
+
)
|
|
167
|
+
response.raise_for_status()
|
|
168
|
+
data = response.json()
|
|
169
|
+
choices_data = data.get('choices', [])
|
|
170
|
+
usage_data = data.get('usage', {})
|
|
171
|
+
choices = []
|
|
172
|
+
for choice_d in choices_data:
|
|
173
|
+
message_d = choice_d.get('message')
|
|
174
|
+
if not message_d and 'delta' in choice_d:
|
|
175
|
+
delta = choice_d['delta']
|
|
176
|
+
message_d = {
|
|
177
|
+
'role': delta.get('role', 'assistant'),
|
|
178
|
+
'content': delta.get('content', '')
|
|
179
|
+
}
|
|
180
|
+
if not message_d:
|
|
181
|
+
message_d = {'role': 'assistant', 'content': ''}
|
|
182
|
+
message = ChatCompletionMessage(
|
|
183
|
+
role=message_d.get('role', 'assistant'),
|
|
184
|
+
content=message_d.get('content', '')
|
|
185
|
+
)
|
|
186
|
+
choice = Choice(
|
|
187
|
+
index=choice_d.get('index', 0),
|
|
188
|
+
message=message,
|
|
189
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
190
|
+
)
|
|
191
|
+
choices.append(choice)
|
|
192
|
+
usage = CompletionUsage(
|
|
193
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
194
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
195
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
196
|
+
)
|
|
197
|
+
completion = ChatCompletion(
|
|
198
|
+
id=request_id,
|
|
199
|
+
choices=choices,
|
|
200
|
+
created=created_time,
|
|
201
|
+
model=data.get('model', model),
|
|
202
|
+
usage=usage,
|
|
203
|
+
)
|
|
204
|
+
return completion
|
|
205
|
+
except Exception as e:
|
|
206
|
+
print(f"Error during Refact non-stream request: {e}")
|
|
207
|
+
raise IOError(f"Refact request failed: {e}") from e
|
|
208
|
+
|
|
209
|
+
class Chat(BaseChat):
|
|
210
|
+
def __init__(self, client: 'Refact'):
|
|
211
|
+
self.completions = Completions(client)
|
|
212
|
+
|
|
213
|
+
class Refact(OpenAICompatibleProvider):
|
|
214
|
+
AVAILABLE_MODELS = [
|
|
215
|
+
"gpt-4o",
|
|
216
|
+
"gpt-4o-mini",
|
|
217
|
+
"o4-mini",
|
|
218
|
+
"gpt-4.1",
|
|
219
|
+
"gpt-4.1-mini",
|
|
220
|
+
"gpt-4.1-nano",
|
|
221
|
+
"gpt-5",
|
|
222
|
+
"gpt-5-mini",
|
|
223
|
+
"gpt-5-nano",
|
|
224
|
+
"claude-sonnet-4",
|
|
225
|
+
"claude-opus-4",
|
|
226
|
+
"claude-opus-4.1",
|
|
227
|
+
"gemini-2.5-pro",
|
|
228
|
+
"gemini-2.5-pro-preview"
|
|
229
|
+
]
|
|
230
|
+
|
|
231
|
+
def __init__(self, api_key: str = None, browser: str = "chrome"):
|
|
232
|
+
# Mirror DeepInfra constructor signature but use the lightweight headers from lol.py
|
|
233
|
+
self.timeout = None
|
|
234
|
+
self.base_url = "https://inference.smallcloud.ai/v1/chat/completions"
|
|
235
|
+
self.session = requests.Session()
|
|
236
|
+
|
|
237
|
+
# Use minimal headers consistent with lol.py
|
|
238
|
+
self.headers = {
|
|
239
|
+
"Content-Type": "application/json",
|
|
240
|
+
"User-Agent": "refact-lsp 0.10.19",
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
if api_key:
|
|
244
|
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
245
|
+
else:
|
|
246
|
+
self.headers["Authorization"] = f"Bearer {generate_full_api_key()}"
|
|
247
|
+
|
|
248
|
+
# Try to initialize LitAgent for compatibility, but do not alter headers (keep lol.py style)
|
|
249
|
+
try:
|
|
250
|
+
_ = LitAgent()
|
|
251
|
+
except Exception:
|
|
252
|
+
pass
|
|
253
|
+
|
|
254
|
+
self.session.headers.update(self.headers)
|
|
255
|
+
self.chat = Chat(self)
|
|
256
|
+
|
|
257
|
+
@property
|
|
258
|
+
def models(self):
|
|
259
|
+
class _ModelList:
|
|
260
|
+
def list(inner_self):
|
|
261
|
+
return type(self).AVAILABLE_MODELS
|
|
262
|
+
return _ModelList()
|
|
263
|
+
|
|
264
|
+
if __name__ == "__main__":
|
|
265
|
+
client = Refact()
|
|
266
|
+
response = client.chat.completions.create(
|
|
267
|
+
model="claude-opus-4.1",
|
|
268
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
269
|
+
max_tokens=10000,
|
|
270
|
+
stream=True
|
|
271
|
+
)
|
|
272
|
+
for chunk in response:
|
|
273
|
+
if chunk.choices[0].delta.content:
|
|
274
|
+
print(chunk.choices[0].delta.content, end='', flush=True)
|
|
@@ -347,6 +347,8 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
347
347
|
"claude-4-opus-20250514": "scira-opus",
|
|
348
348
|
"claude-4-opus-20250514-pro": "scira-opus-pro",
|
|
349
349
|
"meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
|
|
350
|
+
"kimi-k2-instruct": "scira-kimi-k2",
|
|
351
|
+
"scira-kimi-k2": "kimi-k2-instruct",
|
|
350
352
|
}
|
|
351
353
|
# Reverse mapping: Scira format to actual model names
|
|
352
354
|
SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
|
|
@@ -360,6 +362,8 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
360
362
|
SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
|
|
361
363
|
SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
|
|
362
364
|
SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
|
|
365
|
+
SCIRA_TO_MODEL["scira-kimi-k2"] = "kimi-k2-instruct"
|
|
366
|
+
SCIRA_TO_MODEL["kimi-k2-instruct"] = "scira-kimi-k2"
|
|
363
367
|
MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
|
|
364
368
|
# Available models list (actual model names + scira aliases)
|
|
365
369
|
AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
|
|
@@ -276,22 +276,23 @@ class TextPollinations(OpenAICompatibleProvider):
|
|
|
276
276
|
"""
|
|
277
277
|
|
|
278
278
|
AVAILABLE_MODELS = [
|
|
279
|
+
"deepseek-reasoning",
|
|
280
|
+
"glm",
|
|
281
|
+
"gpt-5-nano",
|
|
282
|
+
"llama-fast-roblox",
|
|
283
|
+
"llama-roblox",
|
|
284
|
+
"llamascout",
|
|
285
|
+
"mistral",
|
|
286
|
+
"mistral-nemo-roblox",
|
|
287
|
+
"mistral-roblox",
|
|
288
|
+
"nova-fast",
|
|
279
289
|
"openai",
|
|
290
|
+
"openai-audio",
|
|
280
291
|
"openai-fast",
|
|
281
292
|
"openai-large",
|
|
282
|
-
"openai-reasoning",
|
|
283
293
|
"openai-roblox",
|
|
284
|
-
"openai-audio",
|
|
285
|
-
"deepseek",
|
|
286
|
-
"deepseek-reasoning",
|
|
287
|
-
"grok",
|
|
288
|
-
"llamascout",
|
|
289
|
-
"mistral",
|
|
290
|
-
"phi",
|
|
291
294
|
"qwen-coder",
|
|
292
|
-
"searchgpt",
|
|
293
295
|
"bidara",
|
|
294
|
-
"elixposearch",
|
|
295
296
|
"evil",
|
|
296
297
|
"hypnosis-tracy",
|
|
297
298
|
"midijourney",
|