webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +367 -41
- webscout/Bard.py +2 -22
- webscout/Bing_search.py +1 -2
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/scira_search.py +24 -11
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/Deepinfra.py +75 -57
- webscout/Provider/ExaChat.py +93 -63
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/FreeGemini.py +2 -2
- webscout/Provider/Gemini.py +3 -10
- webscout/Provider/GeminiProxy.py +31 -5
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/LambdaChat.py +39 -31
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +39 -59
- webscout/Provider/OLLAMA.py +8 -9
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +31 -30
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +4 -2
- webscout/Provider/OPENAI/autoproxy.py +753 -18
- webscout/Provider/OPENAI/base.py +7 -76
- webscout/Provider/OPENAI/copilot.py +73 -26
- webscout/Provider/OPENAI/deepinfra.py +96 -132
- webscout/Provider/OPENAI/exachat.py +9 -5
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/netwrck.py +4 -7
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/qodo.py +630 -0
- webscout/Provider/OPENAI/scirachat.py +82 -49
- webscout/Provider/OPENAI/textpollinations.py +13 -12
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/typegpt.py +4 -4
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/Qodo.py +454 -0
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +91 -82
- webscout/Provider/TogetherAI.py +32 -48
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/__init__.py +6 -6
- webscout/Provider/copilot.py +58 -61
- webscout/Provider/freeaichat.py +64 -55
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/monochat.py +275 -0
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +257 -104
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +43 -48
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +325 -299
- webscout/Provider/yep.py +79 -96
- webscout/__init__.py +7 -2
- webscout/auth/__init__.py +12 -1
- webscout/auth/providers.py +27 -5
- webscout/auth/routes.py +146 -105
- webscout/auth/server.py +367 -312
- webscout/client.py +121 -116
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/version.py +1 -1
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/OPENAI/freeaichat.py +0 -363
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- webscout/auth/static/favicon.svg +0 -11
- webscout/auth/swagger_ui.py +0 -203
- webscout/auth/templates/components/authentication.html +0 -237
- webscout/auth/templates/components/base.html +0 -103
- webscout/auth/templates/components/endpoints.html +0 -750
- webscout/auth/templates/components/examples.html +0 -491
- webscout/auth/templates/components/footer.html +0 -75
- webscout/auth/templates/components/header.html +0 -27
- webscout/auth/templates/components/models.html +0 -286
- webscout/auth/templates/components/navigation.html +0 -70
- webscout/auth/templates/static/api.js +0 -455
- webscout/auth/templates/static/icons.js +0 -168
- webscout/auth/templates/static/main.js +0 -784
- webscout/auth/templates/static/particles.js +0 -201
- webscout/auth/templates/static/styles.css +0 -3353
- webscout/auth/templates/static/ui.js +0 -374
- webscout/auth/templates/swagger_ui.html +0 -170
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
|
@@ -43,17 +43,30 @@ class Scira(AISearch):
|
|
|
43
43
|
"""
|
|
44
44
|
|
|
45
45
|
AVAILABLE_MODELS = {
|
|
46
|
-
"scira-default": "
|
|
47
|
-
"scira-
|
|
48
|
-
"scira-
|
|
49
|
-
"scira-
|
|
50
|
-
"scira-
|
|
51
|
-
"scira-
|
|
52
|
-
"scira-
|
|
53
|
-
"scira-
|
|
54
|
-
"scira-
|
|
55
|
-
"scira-
|
|
56
|
-
"scira-
|
|
46
|
+
"scira-default": "grok-3-mini", # thinking model
|
|
47
|
+
"scira-x-fast-mini": "grok-3-mini-fast",
|
|
48
|
+
"scira-x-fast": "grok-3-fast",
|
|
49
|
+
"scira-nano": "gpt-4.1-nano",
|
|
50
|
+
"scira-grok-3": "grok-3",
|
|
51
|
+
"scira-grok-4": "grok-4",
|
|
52
|
+
"scira-vision": "grok-2-vision-1212",
|
|
53
|
+
"scira-g2": "grok-2-latest",
|
|
54
|
+
"scira-4o-mini": "gpt-4o-mini",
|
|
55
|
+
"scira-o4-mini": "o4-mini-2025-04-16",
|
|
56
|
+
"scira-o3": "o3",
|
|
57
|
+
"scira-qwen-32b": "qwen/qwen3-32b",
|
|
58
|
+
"scira-qwen-30b": "qwen3-30b-a3b",
|
|
59
|
+
"scira-deepseek-v3": "deepseek-v3-0324",
|
|
60
|
+
"scira-haiku": "claude-3-5-haiku-20241022",
|
|
61
|
+
"scira-mistral": "mistral-small-latest",
|
|
62
|
+
"scira-google-lite": "gemini-2.5-flash-lite-preview-06-17",
|
|
63
|
+
"scira-google": "gemini-2.5-flash",
|
|
64
|
+
"scira-google-pro": "gemini-2.5-pro",
|
|
65
|
+
"scira-anthropic": "claude-sonnet-4-20250514",
|
|
66
|
+
"scira-anthropic-thinking": "claude-sonnet-4-20250514",
|
|
67
|
+
"scira-opus": "claude-4-opus-20250514",
|
|
68
|
+
"scira-opus-pro": "claude-4-opus-20250514",
|
|
69
|
+
"scira-llama-4": "meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
57
70
|
}
|
|
58
71
|
def __init__(
|
|
59
72
|
self,
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import re
|
|
3
|
+
from typing import Dict, Optional, Generator, Union, Any
|
|
4
|
+
from webscout.AIbase import AISearch, SearchResponse
|
|
5
|
+
from webscout import exceptions
|
|
6
|
+
from webscout.litagent import LitAgent
|
|
7
|
+
from webscout.AIutel import sanitize_stream
|
|
8
|
+
|
|
9
|
+
class Stellar(AISearch):
|
|
10
|
+
"""AI Search provider for stellar.chatastra.ai"""
|
|
11
|
+
def __init__(self, timeout: int = 30, proxies: Optional[dict] = None):
|
|
12
|
+
self.api_endpoint = "https://stellar.chatastra.ai/search/x1GUVzl"
|
|
13
|
+
self.timeout = timeout
|
|
14
|
+
self.proxies = proxies
|
|
15
|
+
self.session = requests.Session()
|
|
16
|
+
self.headers = {
|
|
17
|
+
"accept": "text/x-component",
|
|
18
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
19
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
20
|
+
"content-type": "multipart/form-data; boundary=----WebKitFormBoundaryQsWD5Qs3QqDkNBPH",
|
|
21
|
+
"dnt": "1",
|
|
22
|
+
"next-action": "efc2643ed9bafe182a010b58ebea17f068ad3985",
|
|
23
|
+
"next-router-state-tree": "%5B%22%22%2C%7B%22children%22%3A%5B%22__PAGE__%22%2C%7B%7D%2C%22%2F%22%2C%22refresh%22%5D%7D%2Cnull%2Cnull%2Ctrue%5D",
|
|
24
|
+
"origin": "https://stellar.chatastra.ai",
|
|
25
|
+
"priority": "u=1, i",
|
|
26
|
+
"referer": "https://stellar.chatastra.ai/search/x1GUVzl",
|
|
27
|
+
"sec-ch-ua": '"Microsoft Edge";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
|
|
28
|
+
"sec-ch-ua-mobile": "?0",
|
|
29
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
30
|
+
"sec-fetch-dest": "empty",
|
|
31
|
+
"sec-fetch-mode": "cors",
|
|
32
|
+
"sec-fetch-site": "same-origin",
|
|
33
|
+
"sec-gpc": "1",
|
|
34
|
+
"user-agent": LitAgent().random(),
|
|
35
|
+
"cookie": "__client_uat=0; __client_uat_K90aduOv=0",
|
|
36
|
+
}
|
|
37
|
+
self.session.headers.update(self.headers)
|
|
38
|
+
if proxies:
|
|
39
|
+
self.session.proxies = proxies
|
|
40
|
+
|
|
41
|
+
def _make_payload(self, prompt: str) -> bytes: # This is a static payload for the demo; in production, generate dynamically as needed
|
|
42
|
+
boundary = "----WebKitFormBoundaryQsWD5Qs3QqDkNBPH"
|
|
43
|
+
parts = [
|
|
44
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"1\"\r\n\r\n{{\"id\":\"71bb616ba5b7cbcac2308fe0c249a9f2d51825b7\",\"bound\":null}}\r\n",
|
|
45
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"2\"\r\n\r\n{{\"id\":\"8bcca1d0cb933b14fefde88dacb2865be3d1d525\",\"bound\":null}}\r\n",
|
|
46
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_input\"\r\n\r\n{prompt}\r\n",
|
|
47
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_id\"\r\n\r\nx1GUVzl\r\n",
|
|
48
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_userId\"\r\n\r\nnull\r\n",
|
|
49
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"0\"\r\n\r\n[{{\"action\":\"$F1\",\"options\":{{\"onSetAIState\":\"$F2\"}}}},{{\"messages\":[],\"chatId\":\"\"}},\"$K3\"]\r\n",
|
|
50
|
+
f"--{boundary}--\r\n"
|
|
51
|
+
]
|
|
52
|
+
return "".join(parts).encode("utf-8")
|
|
53
|
+
|
|
54
|
+
@staticmethod
|
|
55
|
+
def _stellar_extractor(chunk: Union[str, bytes, Dict[str, Any]]) -> Optional[str]:
|
|
56
|
+
"""Extracts content from the Stellar stream format with hex keys and diff arrays. Handles both str and bytes input."""
|
|
57
|
+
if isinstance(chunk, bytes):
|
|
58
|
+
try:
|
|
59
|
+
chunk = chunk.decode('utf-8', errors='replace')
|
|
60
|
+
except Exception:
|
|
61
|
+
return None
|
|
62
|
+
if not isinstance(chunk, str):
|
|
63
|
+
return None
|
|
64
|
+
# Match patterns like 6e:{"diff":[0," empathy"],"next":"$@6f"}
|
|
65
|
+
pattern = r'[0-9a-f]+:\{"diff":\[0,"([^"\\]*)"\]'
|
|
66
|
+
matches = re.findall(pattern, chunk)
|
|
67
|
+
if matches:
|
|
68
|
+
extracted_text = ''.join(matches)
|
|
69
|
+
# Fix escaped newlines
|
|
70
|
+
extracted_text = extracted_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
71
|
+
return extracted_text if extracted_text.strip() else None
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
def search(self, prompt: str, stream: bool = False, raw: bool = False) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse, str], None, None]]:
|
|
75
|
+
payload = self._make_payload(prompt)
|
|
76
|
+
try:
|
|
77
|
+
response = self.session.post(
|
|
78
|
+
self.api_endpoint,
|
|
79
|
+
data=payload,
|
|
80
|
+
timeout=self.timeout,
|
|
81
|
+
proxies=self.proxies,
|
|
82
|
+
stream=stream,
|
|
83
|
+
)
|
|
84
|
+
if not response.ok:
|
|
85
|
+
raise exceptions.APIConnectionError(f"Failed to get response: {response.status_code} {response.text}")
|
|
86
|
+
|
|
87
|
+
def _yield_stream():
|
|
88
|
+
# Use sanitize_stream for real-time extraction from the response iterator
|
|
89
|
+
processed_stream = sanitize_stream(
|
|
90
|
+
data=response.iter_lines(decode_unicode=True),
|
|
91
|
+
intro_value=None,
|
|
92
|
+
to_json=False,
|
|
93
|
+
content_extractor=self._stellar_extractor
|
|
94
|
+
)
|
|
95
|
+
full_response = ""
|
|
96
|
+
for content in processed_stream:
|
|
97
|
+
if content and isinstance(content, str):
|
|
98
|
+
full_response += content
|
|
99
|
+
if raw:
|
|
100
|
+
yield {"text": content}
|
|
101
|
+
else:
|
|
102
|
+
yield content
|
|
103
|
+
# Do NOT yield SearchResponse(full_response) in streaming mode to avoid duplicate output
|
|
104
|
+
|
|
105
|
+
if stream:
|
|
106
|
+
return _yield_stream()
|
|
107
|
+
else:
|
|
108
|
+
# Use sanitize_stream for the full response text
|
|
109
|
+
processed_stream = sanitize_stream(
|
|
110
|
+
data=response.text.splitlines(),
|
|
111
|
+
intro_value=None,
|
|
112
|
+
to_json=False,
|
|
113
|
+
content_extractor=self._stellar_extractor
|
|
114
|
+
)
|
|
115
|
+
full_response = ""
|
|
116
|
+
for content in processed_stream:
|
|
117
|
+
if content and isinstance(content, str):
|
|
118
|
+
full_response += content
|
|
119
|
+
if raw:
|
|
120
|
+
return {"text": full_response}
|
|
121
|
+
else:
|
|
122
|
+
return SearchResponse(full_response)
|
|
123
|
+
except requests.RequestException as e:
|
|
124
|
+
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
125
|
+
|
|
126
|
+
if __name__ == "__main__":
|
|
127
|
+
from rich import print
|
|
128
|
+
ai = Stellar()
|
|
129
|
+
user_query = input(">>> ")
|
|
130
|
+
response = ai.search(user_query, stream=True, raw=False)
|
|
131
|
+
for chunk in response:
|
|
132
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -17,62 +17,66 @@ class DeepInfra(Provider):
|
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
19
|
AVAILABLE_MODELS = [
|
|
20
|
-
|
|
21
|
-
"
|
|
22
|
-
"deepseek-ai/DeepSeek-R1",
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
"
|
|
26
|
-
"
|
|
20
|
+
"anthropic/claude-4-opus",
|
|
21
|
+
"anthropic/claude-4-sonnet",
|
|
22
|
+
"deepseek-ai/DeepSeek-R1-0528-Turbo",
|
|
23
|
+
"Qwen/Qwen3-235B-A22B",
|
|
24
|
+
"Qwen/Qwen3-30B-A3B",
|
|
25
|
+
"Qwen/Qwen3-32B",
|
|
26
|
+
"Qwen/Qwen3-14B",
|
|
27
|
+
"deepseek-ai/DeepSeek-V3-0324-Turbo",
|
|
27
28
|
"deepseek-ai/DeepSeek-Prover-V2-671B",
|
|
28
|
-
"
|
|
29
|
-
"
|
|
30
|
-
"
|
|
29
|
+
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo",
|
|
30
|
+
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
31
|
+
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
32
|
+
"deepseek-ai/DeepSeek-R1-0528",
|
|
33
|
+
"deepseek-ai/DeepSeek-V3-0324",
|
|
34
|
+
"mistralai/Mistral-Small-3.1-24B-Instruct-2503",
|
|
35
|
+
"microsoft/phi-4-reasoning-plus",
|
|
36
|
+
"Qwen/QwQ-32B",
|
|
37
|
+
"google/gemini-2.5-flash",
|
|
38
|
+
"google/gemini-2.5-pro",
|
|
31
39
|
"google/gemma-3-27b-it",
|
|
40
|
+
"google/gemma-3-12b-it",
|
|
32
41
|
"google/gemma-3-4b-it",
|
|
33
|
-
"
|
|
42
|
+
"microsoft/Phi-4-multimodal-instruct",
|
|
43
|
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
44
|
+
"deepseek-ai/DeepSeek-V3",
|
|
34
45
|
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
35
|
-
"meta-llama/Llama-
|
|
36
|
-
"
|
|
37
|
-
"
|
|
46
|
+
"meta-llama/Llama-3.3-70B-Instruct",
|
|
47
|
+
"microsoft/phi-4",
|
|
48
|
+
"Gryphe/MythoMax-L2-13b",
|
|
49
|
+
"NousResearch/Hermes-3-Llama-3.1-405B",
|
|
50
|
+
"NousResearch/Hermes-3-Llama-3.1-70B",
|
|
51
|
+
"NovaSky-AI/Sky-T1-32B-Preview",
|
|
52
|
+
"Qwen/Qwen2.5-72B-Instruct",
|
|
53
|
+
"Qwen/Qwen2.5-7B-Instruct",
|
|
54
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
55
|
+
"Sao10K/L3-8B-Lunaris-v1-Turbo",
|
|
56
|
+
"Sao10K/L3.1-70B-Euryale-v2.2",
|
|
57
|
+
"Sao10K/L3.3-70B-Euryale-v2.3",
|
|
58
|
+
"anthropic/claude-3-7-sonnet-latest",
|
|
59
|
+
"deepseek-ai/DeepSeek-R1",
|
|
60
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
61
|
+
"deepseek-ai/DeepSeek-R1-Turbo",
|
|
62
|
+
"google/gemini-2.0-flash-001",
|
|
63
|
+
"meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
64
|
+
"meta-llama/Llama-3.2-1B-Instruct",
|
|
65
|
+
"meta-llama/Llama-3.2-3B-Instruct",
|
|
66
|
+
"meta-llama/Llama-3.2-90B-Vision-Instruct",
|
|
67
|
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
|
68
|
+
"meta-llama/Meta-Llama-3-8B-Instruct",
|
|
69
|
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
38
70
|
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
39
71
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
40
|
-
"microsoft/Phi-4-multimodal-instruct",
|
|
41
72
|
"microsoft/WizardLM-2-8x22B",
|
|
42
|
-
"
|
|
43
|
-
"
|
|
73
|
+
"mistralai/Devstral-Small-2505",
|
|
74
|
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
75
|
+
"mistralai/Mistral-Nemo-Instruct-2407",
|
|
44
76
|
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
77
|
+
"mistralai/Mistral-Small-3.2-24B-Instruct-2506",
|
|
78
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
45
79
|
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
46
|
-
"Qwen/QwQ-32B",
|
|
47
|
-
"Qwen/Qwen2.5-72B-Instruct",
|
|
48
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
49
|
-
"Qwen/Qwen3-14B",
|
|
50
|
-
"Qwen/Qwen3-30B-A3B",
|
|
51
|
-
"Qwen/Qwen3-32B",
|
|
52
|
-
"Qwen/Qwen3-235B-A22B",
|
|
53
|
-
# "google/gemini-1.5-flash", # >>>> NOT WORKING
|
|
54
|
-
# "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
|
|
55
|
-
# "google/gemini-2.0-flash-001", # >>>> NOT WORKING
|
|
56
|
-
|
|
57
|
-
# "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
|
|
58
|
-
|
|
59
|
-
# "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
|
|
60
|
-
# "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
|
|
61
|
-
# "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
|
|
62
|
-
# "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
|
|
63
|
-
# "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
|
|
64
|
-
# "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
|
|
65
|
-
# "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
|
|
66
|
-
# "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
|
|
67
|
-
# "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
|
|
68
|
-
# "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
|
|
69
|
-
# "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
|
|
70
|
-
# "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
|
|
71
|
-
# "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
|
|
72
|
-
# "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
|
|
73
|
-
# "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
|
|
74
|
-
# "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
|
|
75
|
-
# "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
|
|
76
80
|
]
|
|
77
81
|
|
|
78
82
|
@staticmethod
|
|
@@ -84,6 +88,7 @@ class DeepInfra(Provider):
|
|
|
84
88
|
|
|
85
89
|
def __init__(
|
|
86
90
|
self,
|
|
91
|
+
api_key: Optional[str] = None,
|
|
87
92
|
is_conversation: bool = True,
|
|
88
93
|
max_tokens: int = 2049,
|
|
89
94
|
timeout: int = 30,
|
|
@@ -107,21 +112,34 @@ class DeepInfra(Provider):
|
|
|
107
112
|
self.agent = LitAgent()
|
|
108
113
|
# Fingerprint generation might be less relevant with impersonate
|
|
109
114
|
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
110
|
-
|
|
115
|
+
self.api = api_key
|
|
111
116
|
# Use the fingerprint for headers (keep relevant ones)
|
|
112
117
|
self.headers = {
|
|
113
|
-
"Accept": self.fingerprint["accept"],
|
|
114
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
118
|
+
"Accept": self.fingerprint["accept"],
|
|
119
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
115
120
|
"Content-Type": "application/json",
|
|
116
|
-
"Cache-Control": "no-cache",
|
|
117
|
-
"Origin": "https://deepinfra.com",
|
|
118
|
-
"Pragma": "no-cache",
|
|
119
|
-
"Referer": "https://deepinfra.com/",
|
|
120
|
-
"Sec-Fetch-Dest": "empty",
|
|
121
|
+
"Cache-Control": "no-cache",
|
|
122
|
+
"Origin": "https://deepinfra.com",
|
|
123
|
+
"Pragma": "no-cache",
|
|
124
|
+
"Referer": "https://deepinfra.com/",
|
|
125
|
+
"Sec-Fetch-Dest": "empty",
|
|
121
126
|
"Sec-Fetch-Mode": "cors",
|
|
122
127
|
"Sec-Fetch-Site": "same-site",
|
|
123
|
-
"X-Deepinfra-Source": "web-embed",
|
|
128
|
+
"X-Deepinfra-Source": "web-embed",
|
|
129
|
+
# Additional headers from LitAgent.generate_fingerprint
|
|
130
|
+
"User-Agent": self.fingerprint.get("user_agent", ""),
|
|
131
|
+
"Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
|
|
132
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
133
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
|
|
134
|
+
"X-Forwarded-For": self.fingerprint.get("x-forwarded-for", ""),
|
|
135
|
+
"X-Real-IP": self.fingerprint.get("x-real-ip", ""),
|
|
136
|
+
"X-Client-IP": self.fingerprint.get("x-client-ip", ""),
|
|
137
|
+
"Forwarded": self.fingerprint.get("forwarded", ""),
|
|
138
|
+
"X-Forwarded-Proto": self.fingerprint.get("x-forwarded-proto", ""),
|
|
139
|
+
"X-Request-Id": self.fingerprint.get("x-request-id", ""),
|
|
124
140
|
}
|
|
141
|
+
if self.api is not None:
|
|
142
|
+
self.headers["Authorization"] = f"Bearer {self.api}"
|
|
125
143
|
|
|
126
144
|
# Initialize curl_cffi Session
|
|
127
145
|
self.session = Session()
|
|
@@ -321,7 +339,7 @@ if __name__ == "__main__":
|
|
|
321
339
|
|
|
322
340
|
for model in DeepInfra.AVAILABLE_MODELS:
|
|
323
341
|
try:
|
|
324
|
-
test_ai = DeepInfra(model=model, timeout=60)
|
|
342
|
+
test_ai = DeepInfra(model=model, timeout=60, api_key="jwt:eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJnaDoxNTg5ODg0NzgiLCJleHAiOjE3NTI3NDI5NDV9.qM93p6bPZYi_ejaOo1Dbe4UjYXrFiM7XvBLN4-9BWag")
|
|
325
343
|
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
326
344
|
response_text = ""
|
|
327
345
|
for chunk in response:
|
webscout/Provider/ExaChat.py
CHANGED
|
@@ -2,7 +2,7 @@ from curl_cffi import CurlError
|
|
|
2
2
|
from curl_cffi.requests import Session, Response # Import Response
|
|
3
3
|
import json
|
|
4
4
|
import uuid
|
|
5
|
-
from typing import Any, Dict, Union, Optional, List
|
|
5
|
+
from typing import Any, Dict, Union, Optional, List, Generator
|
|
6
6
|
from datetime import datetime
|
|
7
7
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider
|
|
@@ -21,9 +21,9 @@ MODEL_CONFIGS = {
|
|
|
21
21
|
"gemini-2.0-flash",
|
|
22
22
|
"gemini-2.0-flash-exp-image-generation",
|
|
23
23
|
"gemini-2.0-flash-thinking-exp-01-21",
|
|
24
|
-
"gemini-2.5-
|
|
24
|
+
"gemini-2.5-flash-lite-preview-06-17",
|
|
25
25
|
"gemini-2.0-pro-exp-02-05",
|
|
26
|
-
"gemini-2.5-flash
|
|
26
|
+
"gemini-2.5-flash",
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
],
|
|
@@ -62,7 +62,9 @@ MODEL_CONFIGS = {
|
|
|
62
62
|
"endpoint": "https://ayle.chat/api/cerebras",
|
|
63
63
|
"models": [
|
|
64
64
|
"llama3.1-8b",
|
|
65
|
-
"llama-3.3-70b"
|
|
65
|
+
"llama-3.3-70b",
|
|
66
|
+
"llama-4-scout-17b-16e-instruct",
|
|
67
|
+
"qwen-3-32b"
|
|
66
68
|
],
|
|
67
69
|
},
|
|
68
70
|
"xai": {
|
|
@@ -88,9 +90,9 @@ class ExaChat(Provider):
|
|
|
88
90
|
"gemini-2.0-flash",
|
|
89
91
|
"gemini-2.0-flash-exp-image-generation",
|
|
90
92
|
"gemini-2.0-flash-thinking-exp-01-21",
|
|
91
|
-
"gemini-2.5-pro-exp-03-25",
|
|
92
93
|
"gemini-2.0-pro-exp-02-05",
|
|
93
|
-
"gemini-2.5-flash
|
|
94
|
+
"gemini-2.5-flash",
|
|
95
|
+
"gemini-2.5-flash-lite-preview-06-17",
|
|
94
96
|
|
|
95
97
|
# OpenRouter Models
|
|
96
98
|
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
@@ -120,6 +122,8 @@ class ExaChat(Provider):
|
|
|
120
122
|
# Cerebras Models
|
|
121
123
|
"llama3.1-8b",
|
|
122
124
|
"llama-3.3-70b",
|
|
125
|
+
"llama-4-scout-17b-16e-instruct",
|
|
126
|
+
"qwen-3-32b",
|
|
123
127
|
|
|
124
128
|
]
|
|
125
129
|
|
|
@@ -264,10 +268,11 @@ class ExaChat(Provider):
|
|
|
264
268
|
def ask(
|
|
265
269
|
self,
|
|
266
270
|
prompt: str,
|
|
271
|
+
stream: bool = False,
|
|
267
272
|
raw: bool = False,
|
|
268
273
|
optimizer: str = None,
|
|
269
274
|
conversationally: bool = False,
|
|
270
|
-
) -> Dict[str, Any]:
|
|
275
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
271
276
|
"""Sends a prompt to the API and returns the response."""
|
|
272
277
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
273
278
|
if optimizer:
|
|
@@ -281,78 +286,103 @@ class ExaChat(Provider):
|
|
|
281
286
|
|
|
282
287
|
payload = self._build_payload(conversation_prompt)
|
|
283
288
|
response = self._make_request(payload)
|
|
284
|
-
|
|
285
|
-
|
|
289
|
+
processed_stream = sanitize_stream(
|
|
290
|
+
data=response.iter_content(chunk_size=None),
|
|
291
|
+
intro_value=None,
|
|
292
|
+
to_json=True,
|
|
293
|
+
content_extractor=self._exachat_extractor,
|
|
294
|
+
yield_raw_on_error=False,
|
|
295
|
+
raw=raw
|
|
296
|
+
)
|
|
297
|
+
if stream:
|
|
298
|
+
streaming_text = ""
|
|
299
|
+
for content_chunk in processed_stream:
|
|
300
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
301
|
+
content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
|
|
302
|
+
if raw:
|
|
303
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
304
|
+
streaming_text += content_chunk
|
|
305
|
+
yield content_chunk
|
|
306
|
+
else:
|
|
307
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
308
|
+
streaming_text += content_chunk
|
|
309
|
+
yield dict(text=content_chunk)
|
|
310
|
+
self.last_response = {"text": streaming_text}
|
|
311
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
312
|
+
else:
|
|
286
313
|
full_response = ""
|
|
287
|
-
# Use sanitize_stream to process the response
|
|
288
|
-
processed_stream = sanitize_stream(
|
|
289
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
290
|
-
intro_value=None, # API doesn't seem to use 'data:' prefix
|
|
291
|
-
to_json=True, # Stream sends JSON lines
|
|
292
|
-
content_extractor=self._exachat_extractor, # Use the specific extractor
|
|
293
|
-
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
294
|
-
)
|
|
295
|
-
|
|
296
314
|
for content_chunk in processed_stream:
|
|
297
|
-
# content_chunk is the string extracted by _exachat_extractor
|
|
298
315
|
if content_chunk and isinstance(content_chunk, str):
|
|
299
|
-
|
|
300
|
-
|
|
316
|
+
content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
|
|
317
|
+
if raw:
|
|
318
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
319
|
+
full_response += content_chunk
|
|
320
|
+
else:
|
|
321
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
322
|
+
full_response += content_chunk
|
|
301
323
|
self.last_response = {"text": full_response}
|
|
302
324
|
self.conversation.update_chat_history(prompt, full_response)
|
|
303
|
-
return self.last_response if not raw else full_response
|
|
304
|
-
|
|
305
|
-
except json.JSONDecodeError as e:
|
|
306
|
-
raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
|
|
325
|
+
return self.last_response if not raw else full_response
|
|
307
326
|
|
|
308
327
|
def chat(
|
|
309
328
|
self,
|
|
310
329
|
prompt: str,
|
|
330
|
+
stream: bool = False,
|
|
311
331
|
optimizer: str = None,
|
|
312
332
|
conversationally: bool = False,
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
333
|
+
raw: bool = False,
|
|
334
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
335
|
+
def for_stream():
|
|
336
|
+
for response in self.ask(
|
|
337
|
+
prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
338
|
+
):
|
|
339
|
+
if raw:
|
|
340
|
+
yield response
|
|
341
|
+
else:
|
|
342
|
+
yield self.get_message(response)
|
|
343
|
+
def for_non_stream():
|
|
344
|
+
result = self.ask(
|
|
345
|
+
prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
346
|
+
)
|
|
347
|
+
if raw:
|
|
348
|
+
return result if isinstance(result, str) else str(result)
|
|
349
|
+
return self.get_message(result)
|
|
350
|
+
return for_stream() if stream else for_non_stream()
|
|
319
351
|
|
|
320
352
|
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
321
|
-
"""
|
|
322
|
-
Retrieves message from response.
|
|
323
|
-
|
|
324
|
-
Args:
|
|
325
|
-
response (Union[Dict[str, Any], str]): The response to extract the message from
|
|
326
|
-
|
|
327
|
-
Returns:
|
|
328
|
-
str: The extracted message text
|
|
329
|
-
"""
|
|
330
353
|
if isinstance(response, dict):
|
|
331
|
-
|
|
332
|
-
|
|
354
|
+
text = response.get("text", "")
|
|
355
|
+
else:
|
|
356
|
+
text = str(response)
|
|
357
|
+
return text.replace('\\\\', '\\').replace('\\"', '"')
|
|
333
358
|
|
|
334
359
|
if __name__ == "__main__":
|
|
335
|
-
print("-" * 80)
|
|
336
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
337
|
-
print("-" * 80)
|
|
360
|
+
# print("-" * 80)
|
|
361
|
+
# print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
362
|
+
# print("-" * 80)
|
|
338
363
|
|
|
339
|
-
# Test all available models
|
|
340
|
-
working = 0
|
|
341
|
-
total = len(ExaChat.AVAILABLE_MODELS)
|
|
364
|
+
# # Test all available models
|
|
365
|
+
# working = 0
|
|
366
|
+
# total = len(ExaChat.AVAILABLE_MODELS)
|
|
342
367
|
|
|
343
|
-
for model in ExaChat.AVAILABLE_MODELS:
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
368
|
+
# for model in ExaChat.AVAILABLE_MODELS:
|
|
369
|
+
# try:
|
|
370
|
+
# test_ai = ExaChat(model=model, timeout=60)
|
|
371
|
+
# response = test_ai.chat("Say 'Hello' in one word")
|
|
372
|
+
# response_text = response
|
|
348
373
|
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
374
|
+
# if response_text and len(response_text.strip()) > 0:
|
|
375
|
+
# status = "✓"
|
|
376
|
+
# # Truncate response if too long
|
|
377
|
+
# display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
378
|
+
# else:
|
|
379
|
+
# status = "✗"
|
|
380
|
+
# display_text = "Empty or invalid response"
|
|
381
|
+
# print(f"{model:<50} {status:<10} {display_text}")
|
|
382
|
+
# except Exception as e:
|
|
383
|
+
# print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
384
|
+
from rich import print
|
|
385
|
+
ai = ExaChat(model="gemini-2.0-flash")
|
|
386
|
+
response = ai.chat("tell me a joke", stream=True, raw=False)
|
|
387
|
+
for chunk in response:
|
|
388
|
+
print(chunk, end='', flush=True)
|
webscout/Provider/Flowith.py
CHANGED
|
@@ -15,7 +15,7 @@ class Flowith(Provider):
|
|
|
15
15
|
"""
|
|
16
16
|
A provider class for interacting with the Flowith API.
|
|
17
17
|
"""
|
|
18
|
-
AVAILABLE_MODELS = ["gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku", "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"]
|
|
18
|
+
AVAILABLE_MODELS = ["gpt-4.1-nano", "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku", "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"]
|
|
19
19
|
|
|
20
20
|
def __init__(
|
|
21
21
|
self,
|
webscout/Provider/FreeGemini.py
CHANGED
|
@@ -83,7 +83,7 @@ class FreeGemini(Provider):
|
|
|
83
83
|
self.last_response = {}
|
|
84
84
|
self.system_prompt = system_prompt # Stored for consistency
|
|
85
85
|
|
|
86
|
-
self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.
|
|
86
|
+
self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.5-flash:streamGenerateContent?alt=sse"
|
|
87
87
|
|
|
88
88
|
self.agent = LitAgent()
|
|
89
89
|
self.headers = {
|
|
@@ -246,5 +246,5 @@ class FreeGemini(Provider):
|
|
|
246
246
|
if __name__ == "__main__":
|
|
247
247
|
# Example usage
|
|
248
248
|
free_gemini = FreeGemini()
|
|
249
|
-
response = free_gemini.chat("
|
|
249
|
+
response = free_gemini.chat("how many r in strawberry", stream=False)
|
|
250
250
|
print(response) # Should print the response from the API
|
webscout/Provider/Gemini.py
CHANGED
|
@@ -10,22 +10,15 @@ from ..Bard import Chatbot, Model
|
|
|
10
10
|
|
|
11
11
|
warnings.simplefilter("ignore", category=UserWarning)
|
|
12
12
|
|
|
13
|
-
# Define model aliases for easy usage
|
|
13
|
+
# Define model aliases for easy usage (only supported models)
|
|
14
14
|
MODEL_ALIASES: Dict[str, Model] = {
|
|
15
15
|
"unspecified": Model.UNSPECIFIED,
|
|
16
|
-
"gemini-2.0-flash": Model.G_2_0_FLASH,
|
|
17
|
-
"gemini-2.0-flash-thinking": Model.G_2_0_FLASH_THINKING,
|
|
18
|
-
"gemini-2.5-pro": Model.G_2_5_PRO,
|
|
19
|
-
"gemini-2.0-exp-advanced": Model.G_2_0_EXP_ADVANCED,
|
|
20
|
-
"gemini-2.5-exp-advanced": Model.G_2_5_EXP_ADVANCED,
|
|
21
16
|
"gemini-2.5-flash": Model.G_2_5_FLASH,
|
|
17
|
+
"gemini-2.5-pro": Model.G_2_5_PRO,
|
|
22
18
|
# Add shorter aliases for convenience
|
|
23
|
-
"flash": Model.G_2_0_FLASH,
|
|
24
19
|
"flash-2.5": Model.G_2_5_FLASH,
|
|
25
|
-
"thinking": Model.G_2_0_FLASH_THINKING,
|
|
26
20
|
"pro": Model.G_2_5_PRO,
|
|
27
|
-
"
|
|
28
|
-
"advanced-2.5": Model.G_2_5_EXP_ADVANCED,
|
|
21
|
+
"unspecified": Model.UNSPECIFIED,
|
|
29
22
|
}
|
|
30
23
|
|
|
31
24
|
# List of available models (friendly names)
|