webscout 8.3.3__py3-none-any.whl → 8.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +53 -800
- webscout/Bard.py +2 -22
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +26 -11
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +81 -57
- webscout/Provider/ExaChat.py +9 -5
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/FreeGemini.py +2 -2
- webscout/Provider/Gemini.py +3 -10
- webscout/Provider/GeminiProxy.py +31 -5
- webscout/Provider/LambdaChat.py +39 -31
- webscout/Provider/Netwrck.py +5 -8
- webscout/Provider/OLLAMA.py +8 -9
- webscout/Provider/OPENAI/README.md +1 -1
- webscout/Provider/OPENAI/TogetherAI.py +57 -48
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +1 -3
- webscout/Provider/OPENAI/autoproxy.py +1 -1
- webscout/Provider/OPENAI/copilot.py +73 -26
- webscout/Provider/OPENAI/deepinfra.py +60 -24
- webscout/Provider/OPENAI/exachat.py +9 -5
- webscout/Provider/OPENAI/monochat.py +3 -3
- webscout/Provider/OPENAI/netwrck.py +4 -7
- webscout/Provider/OPENAI/qodo.py +630 -0
- webscout/Provider/OPENAI/scirachat.py +86 -49
- webscout/Provider/OPENAI/textpollinations.py +19 -14
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +478 -0
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/monochat.py +3 -3
- webscout/Provider/TTI/together.py +7 -6
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TextPollinationsAI.py +19 -14
- webscout/Provider/TogetherAI.py +57 -44
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +4 -10
- webscout/Provider/copilot.py +58 -61
- webscout/Provider/freeaichat.py +64 -55
- webscout/Provider/monochat.py +275 -0
- webscout/Provider/scira_chat.py +115 -21
- webscout/Provider/toolbaz.py +5 -10
- webscout/Provider/typefully.py +1 -11
- webscout/Provider/x0gpt.py +325 -315
- webscout/__init__.py +4 -11
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +119 -5
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/sanitize.py +1074 -0
- webscout/version.py +1 -1
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -150
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/RECORD +70 -72
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/freeaichat.py +0 -363
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
webscout/Bard.py
CHANGED
|
@@ -81,21 +81,11 @@ class Model(Enum):
|
|
|
81
81
|
model_header (dict): Additional headers required for the model.
|
|
82
82
|
advanced_only (bool): Whether the model is available only for advanced users.
|
|
83
83
|
"""
|
|
84
|
-
#
|
|
84
|
+
# Only the specified models
|
|
85
85
|
UNSPECIFIED = ("unspecified", {}, False)
|
|
86
|
-
G_2_0_FLASH = (
|
|
87
|
-
"gemini-2.0-flash",
|
|
88
|
-
{"x-goog-ext-525001261-jspb": '[1,null,null,null,"f299729663a2343f"]'},
|
|
89
|
-
False,
|
|
90
|
-
)
|
|
91
|
-
G_2_0_FLASH_THINKING = (
|
|
92
|
-
"gemini-2.0-flash-thinking",
|
|
93
|
-
{"x-goog-ext-525001261-jspb": '[null,null,null,null,"7ca48d02d802f20a"]'},
|
|
94
|
-
False,
|
|
95
|
-
)
|
|
96
86
|
G_2_5_FLASH = (
|
|
97
87
|
"gemini-2.5-flash",
|
|
98
|
-
{"x-goog-ext-525001261-jspb": '[1,null,null,null,"
|
|
88
|
+
{"x-goog-ext-525001261-jspb": '[1,null,null,null,"71c2d248d3b102ff"]'},
|
|
99
89
|
False,
|
|
100
90
|
)
|
|
101
91
|
G_2_5_PRO = (
|
|
@@ -103,16 +93,6 @@ class Model(Enum):
|
|
|
103
93
|
{"x-goog-ext-525001261-jspb": '[1,null,null,null,"2525e3954d185b3c"]'},
|
|
104
94
|
False,
|
|
105
95
|
)
|
|
106
|
-
G_2_0_EXP_ADVANCED = (
|
|
107
|
-
"gemini-2.0-exp-advanced",
|
|
108
|
-
{"x-goog-ext-525001261-jspb": '[null,null,null,null,"b1e46a6037e6aa9f"]'},
|
|
109
|
-
True,
|
|
110
|
-
)
|
|
111
|
-
G_2_5_EXP_ADVANCED = (
|
|
112
|
-
"gemini-2.5-exp-advanced",
|
|
113
|
-
{"x-goog-ext-525001261-jspb": '[null,null,null,null,"203e6bb81620bcfe"]'},
|
|
114
|
-
True,
|
|
115
|
-
)
|
|
116
96
|
|
|
117
97
|
def __init__(self, name, header, advanced_only):
|
|
118
98
|
"""
|
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
from .stellar_search import *
|
|
2
|
-
from .felo_search import *
|
|
3
|
-
from .DeepFind import *
|
|
4
|
-
from .genspark_search import *
|
|
5
|
-
from .monica_search import *
|
|
6
|
-
from .webpilotai_search import *
|
|
7
|
-
from .hika_search import *
|
|
8
|
-
from .scira_search import *
|
|
9
|
-
from .iask_search import *
|
|
10
|
-
from .Perplexity import *
|
|
1
|
+
from .stellar_search import *
|
|
2
|
+
from .felo_search import *
|
|
3
|
+
from .DeepFind import *
|
|
4
|
+
from .genspark_search import *
|
|
5
|
+
from .monica_search import *
|
|
6
|
+
from .webpilotai_search import *
|
|
7
|
+
from .hika_search import *
|
|
8
|
+
from .scira_search import *
|
|
9
|
+
from .iask_search import *
|
|
10
|
+
from .Perplexity import *
|
|
11
|
+
# from .PERPLEXED_search import *
|
|
@@ -69,7 +69,7 @@ class Felo(AISearch):
|
|
|
69
69
|
"accept-encoding": "gzip, deflate, br, zstd",
|
|
70
70
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
71
71
|
"content-type": "application/json",
|
|
72
|
-
"cookie": "_clck=1gifk45%7C2%7Cfoa%7C0%7C1686; _clsk=1g5lv07%7C1723558310439%7C1%7C1%7Cu.clarity.ms%2Fcollect; _ga=GA1.1.877307181.1723558313; _ga_8SZPRV97HV=GS1.1.1723558313.1.1.1723558341.0.0.0; _ga_Q9Q1E734CC=GS1.1.1723558313.1.1.1723558341.0.0.0",
|
|
72
|
+
# "cookie": "_clck=1gifk45%7C2%7Cfoa%7C0%7C1686; _clsk=1g5lv07%7C1723558310439%7C1%7C1%7Cu.clarity.ms%2Fcollect; _ga=GA1.1.877307181.1723558313; _ga_8SZPRV97HV=GS1.1.1723558313.1.1.1723558341.0.0.0; _ga_Q9Q1E734CC=GS1.1.1723558313.1.1.1723558341.0.0.0",
|
|
73
73
|
"dnt": "1",
|
|
74
74
|
"origin": "https://felo.ai",
|
|
75
75
|
"referer": "https://felo.ai/",
|
|
@@ -141,10 +141,14 @@ class Felo(AISearch):
|
|
|
141
141
|
"lang": "",
|
|
142
142
|
"agent_lang": "en",
|
|
143
143
|
"search_options": {
|
|
144
|
-
"langcode": "en-US"
|
|
144
|
+
"langcode": "en-US",
|
|
145
|
+
"search_image": True,
|
|
146
|
+
"search_video": True,
|
|
145
147
|
},
|
|
146
148
|
"search_video": True,
|
|
147
|
-
"
|
|
149
|
+
"model": "",
|
|
150
|
+
"contexts_from": "google",
|
|
151
|
+
"auto_routing": True,
|
|
148
152
|
}
|
|
149
153
|
|
|
150
154
|
def for_stream():
|
|
@@ -43,17 +43,32 @@ class Scira(AISearch):
|
|
|
43
43
|
"""
|
|
44
44
|
|
|
45
45
|
AVAILABLE_MODELS = {
|
|
46
|
-
"scira-default": "
|
|
47
|
-
"scira-
|
|
48
|
-
"scira-
|
|
49
|
-
"scira-
|
|
50
|
-
"scira-
|
|
51
|
-
"scira-
|
|
52
|
-
"scira-
|
|
53
|
-
"scira-
|
|
54
|
-
"scira-
|
|
55
|
-
"scira-
|
|
56
|
-
"scira-
|
|
46
|
+
"scira-default": "grok-3-mini", # thinking model
|
|
47
|
+
"scira-x-fast-mini": "grok-3-mini-fast",
|
|
48
|
+
"scira-x-fast": "grok-3-fast",
|
|
49
|
+
"scira-nano": "gpt-4.1-nano",
|
|
50
|
+
"scira-grok-3": "grok-3",
|
|
51
|
+
"scira-grok-4": "grok-4",
|
|
52
|
+
"scira-vision": "grok-2-vision-1212",
|
|
53
|
+
"scira-g2": "grok-2-latest",
|
|
54
|
+
"scira-4o-mini": "gpt-4o-mini",
|
|
55
|
+
"scira-o4-mini": "o4-mini-2025-04-16",
|
|
56
|
+
"scira-o3": "o3",
|
|
57
|
+
"scira-qwen-32b": "qwen/qwen3-32b",
|
|
58
|
+
"scira-qwen-30b": "qwen3-30b-a3b",
|
|
59
|
+
"scira-deepseek-v3": "deepseek-v3-0324",
|
|
60
|
+
"scira-haiku": "claude-3-5-haiku-20241022",
|
|
61
|
+
"scira-mistral": "mistral-small-latest",
|
|
62
|
+
"scira-google-lite": "gemini-2.5-flash-lite-preview-06-17",
|
|
63
|
+
"scira-google": "gemini-2.5-flash",
|
|
64
|
+
"scira-google-pro": "gemini-2.5-pro",
|
|
65
|
+
"scira-anthropic": "claude-sonnet-4-20250514",
|
|
66
|
+
"scira-anthropic-thinking": "claude-sonnet-4-20250514",
|
|
67
|
+
"scira-opus": "claude-4-opus-20250514",
|
|
68
|
+
"scira-opus-pro": "claude-4-opus-20250514",
|
|
69
|
+
"scira-llama-4": "meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
70
|
+
"scira-kimi-k2": "kimi-k2-instruct",
|
|
71
|
+
"kimi-k2-instruct": "scira-kimi-k2",
|
|
57
72
|
}
|
|
58
73
|
def __init__(
|
|
59
74
|
self,
|
|
@@ -53,7 +53,12 @@ class Stellar(AISearch):
|
|
|
53
53
|
|
|
54
54
|
@staticmethod
|
|
55
55
|
def _stellar_extractor(chunk: Union[str, bytes, Dict[str, Any]]) -> Optional[str]:
|
|
56
|
-
"""
|
|
56
|
+
"""
|
|
57
|
+
Extracts content from the Stellar stream format with focused pattern matching.
|
|
58
|
+
|
|
59
|
+
Prioritizes the primary diff pattern to avoid duplication and focuses on
|
|
60
|
+
incremental content building from stellar.chatastra.ai streaming response.
|
|
61
|
+
"""
|
|
57
62
|
if isinstance(chunk, bytes):
|
|
58
63
|
try:
|
|
59
64
|
chunk = chunk.decode('utf-8', errors='replace')
|
|
@@ -61,14 +66,54 @@ class Stellar(AISearch):
|
|
|
61
66
|
return None
|
|
62
67
|
if not isinstance(chunk, str):
|
|
63
68
|
return None
|
|
64
|
-
|
|
65
|
-
pattern
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
69
|
+
|
|
70
|
+
# Primary pattern: Hex key diff format (most reliable for streaming)
|
|
71
|
+
# Matches: 16:{"diff":[0,"AI"],"next":"$@18"}
|
|
72
|
+
primary_pattern = r'[0-9a-f]+:\{"diff":\[0,"([^"]*?)"\]'
|
|
73
|
+
primary_matches = re.findall(primary_pattern, chunk)
|
|
74
|
+
|
|
75
|
+
if primary_matches:
|
|
76
|
+
# Join the matches and clean up
|
|
77
|
+
extracted_text = ''.join(primary_matches)
|
|
78
|
+
|
|
79
|
+
# Handle escape sequences properly
|
|
80
|
+
extracted_text = extracted_text.replace('\\n', '\n')
|
|
81
|
+
extracted_text = extracted_text.replace('\\r', '\r')
|
|
82
|
+
extracted_text = extracted_text.replace('\\"', '"')
|
|
83
|
+
extracted_text = extracted_text.replace('\\t', '\t')
|
|
84
|
+
extracted_text = extracted_text.replace('\\/', '/')
|
|
85
|
+
extracted_text = extracted_text.replace('\\\\', '\\')
|
|
86
|
+
|
|
87
|
+
# Clean up markdown formatting
|
|
88
|
+
extracted_text = extracted_text.replace('\\*', '*')
|
|
89
|
+
extracted_text = extracted_text.replace('\\#', '#')
|
|
90
|
+
extracted_text = extracted_text.replace('\\[', '[')
|
|
91
|
+
extracted_text = extracted_text.replace('\\]', ']')
|
|
92
|
+
extracted_text = extracted_text.replace('\\(', '(')
|
|
93
|
+
extracted_text = extracted_text.replace('\\)', ')')
|
|
94
|
+
|
|
71
95
|
return extracted_text if extracted_text.strip() else None
|
|
96
|
+
|
|
97
|
+
# # Fallback: Look for Ta24 content blocks (complete responses)
|
|
98
|
+
# if ':Ta24,' in chunk:
|
|
99
|
+
# ta24_pattern = r':Ta24,([^}]*?)(?:\d+:|$)'
|
|
100
|
+
# ta24_matches = re.findall(ta24_pattern, chunk)
|
|
101
|
+
# if ta24_matches:
|
|
102
|
+
# extracted_text = ''.join(ta24_matches)
|
|
103
|
+
# # Basic cleanup
|
|
104
|
+
# extracted_text = extracted_text.replace('\\n', '\n')
|
|
105
|
+
# extracted_text = extracted_text.replace('\\"', '"')
|
|
106
|
+
# return extracted_text.strip() if extracted_text.strip() else None
|
|
107
|
+
|
|
108
|
+
# # Secondary fallback: Direct diff patterns without hex prefix
|
|
109
|
+
# fallback_pattern = r'\{"diff":\[0,"([^"]*?)"\]'
|
|
110
|
+
# fallback_matches = re.findall(fallback_pattern, chunk)
|
|
111
|
+
# if fallback_matches:
|
|
112
|
+
# extracted_text = ''.join(fallback_matches)
|
|
113
|
+
# extracted_text = extracted_text.replace('\\n', '\n')
|
|
114
|
+
# extracted_text = extracted_text.replace('\\"', '"')
|
|
115
|
+
# return extracted_text if extracted_text.strip() else None
|
|
116
|
+
|
|
72
117
|
return None
|
|
73
118
|
|
|
74
119
|
def search(self, prompt: str, stream: bool = False, raw: bool = False) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse, str], None, None]]:
|
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -17,62 +17,72 @@ class DeepInfra(Provider):
|
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
19
|
AVAILABLE_MODELS = [
|
|
20
|
-
|
|
21
|
-
"
|
|
22
|
-
"
|
|
23
|
-
"deepseek-ai/DeepSeek-R1-
|
|
24
|
-
"
|
|
25
|
-
"
|
|
26
|
-
"
|
|
20
|
+
"anthropic/claude-4-opus",
|
|
21
|
+
"moonshotai/Kimi-K2-Instruct",
|
|
22
|
+
"anthropic/claude-4-sonnet",
|
|
23
|
+
"deepseek-ai/DeepSeek-R1-0528-Turbo",
|
|
24
|
+
"Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
25
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
|
26
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo",
|
|
27
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507",
|
|
28
|
+
"Qwen/Qwen3-235B-A22B",
|
|
29
|
+
"Qwen/Qwen3-30B-A3B",
|
|
30
|
+
"Qwen/Qwen3-32B",
|
|
31
|
+
"Qwen/Qwen3-14B",
|
|
32
|
+
"deepseek-ai/DeepSeek-V3-0324-Turbo",
|
|
27
33
|
"deepseek-ai/DeepSeek-Prover-V2-671B",
|
|
28
|
-
"
|
|
29
|
-
"
|
|
30
|
-
"
|
|
34
|
+
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo",
|
|
35
|
+
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
36
|
+
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
37
|
+
"deepseek-ai/DeepSeek-R1-0528",
|
|
38
|
+
"deepseek-ai/DeepSeek-V3-0324",
|
|
39
|
+
"mistralai/Mistral-Small-3.1-24B-Instruct-2503",
|
|
40
|
+
"microsoft/phi-4-reasoning-plus",
|
|
41
|
+
"Qwen/QwQ-32B",
|
|
42
|
+
"google/gemini-2.5-flash",
|
|
43
|
+
"google/gemini-2.5-pro",
|
|
31
44
|
"google/gemma-3-27b-it",
|
|
45
|
+
"google/gemma-3-12b-it",
|
|
32
46
|
"google/gemma-3-4b-it",
|
|
33
|
-
"
|
|
47
|
+
"microsoft/Phi-4-multimodal-instruct",
|
|
48
|
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
49
|
+
"deepseek-ai/DeepSeek-V3",
|
|
34
50
|
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
35
|
-
"meta-llama/Llama-
|
|
36
|
-
"
|
|
37
|
-
"
|
|
51
|
+
"meta-llama/Llama-3.3-70B-Instruct",
|
|
52
|
+
"microsoft/phi-4",
|
|
53
|
+
"Gryphe/MythoMax-L2-13b",
|
|
54
|
+
"NousResearch/Hermes-3-Llama-3.1-405B",
|
|
55
|
+
"NousResearch/Hermes-3-Llama-3.1-70B",
|
|
56
|
+
"NovaSky-AI/Sky-T1-32B-Preview",
|
|
57
|
+
"Qwen/Qwen2.5-72B-Instruct",
|
|
58
|
+
"Qwen/Qwen2.5-7B-Instruct",
|
|
59
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
60
|
+
"Sao10K/L3-8B-Lunaris-v1-Turbo",
|
|
61
|
+
"Sao10K/L3.1-70B-Euryale-v2.2",
|
|
62
|
+
"Sao10K/L3.3-70B-Euryale-v2.3",
|
|
63
|
+
"anthropic/claude-3-7-sonnet-latest",
|
|
64
|
+
"deepseek-ai/DeepSeek-R1",
|
|
65
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
66
|
+
"deepseek-ai/DeepSeek-R1-Turbo",
|
|
67
|
+
"google/gemini-2.0-flash-001",
|
|
68
|
+
"meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
69
|
+
"meta-llama/Llama-3.2-1B-Instruct",
|
|
70
|
+
"meta-llama/Llama-3.2-3B-Instruct",
|
|
71
|
+
"meta-llama/Llama-3.2-90B-Vision-Instruct",
|
|
72
|
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
|
73
|
+
"meta-llama/Meta-Llama-3-8B-Instruct",
|
|
74
|
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
38
75
|
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
39
76
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
40
|
-
"microsoft/Phi-4-multimodal-instruct",
|
|
41
77
|
"microsoft/WizardLM-2-8x22B",
|
|
42
|
-
"
|
|
43
|
-
"
|
|
78
|
+
"mistralai/Devstral-Small-2505",
|
|
79
|
+
"mistralai/Devstral-Small-2507",
|
|
80
|
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
81
|
+
"mistralai/Mistral-Nemo-Instruct-2407",
|
|
44
82
|
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
83
|
+
"mistralai/Mistral-Small-3.2-24B-Instruct-2506",
|
|
84
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
45
85
|
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
46
|
-
"Qwen/QwQ-32B",
|
|
47
|
-
"Qwen/Qwen2.5-72B-Instruct",
|
|
48
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
49
|
-
"Qwen/Qwen3-14B",
|
|
50
|
-
"Qwen/Qwen3-30B-A3B",
|
|
51
|
-
"Qwen/Qwen3-32B",
|
|
52
|
-
"Qwen/Qwen3-235B-A22B",
|
|
53
|
-
# "google/gemini-1.5-flash", # >>>> NOT WORKING
|
|
54
|
-
# "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
|
|
55
|
-
# "google/gemini-2.0-flash-001", # >>>> NOT WORKING
|
|
56
|
-
|
|
57
|
-
# "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
|
|
58
|
-
|
|
59
|
-
# "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
|
|
60
|
-
# "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
|
|
61
|
-
# "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
|
|
62
|
-
# "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
|
|
63
|
-
# "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
|
|
64
|
-
# "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
|
|
65
|
-
# "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
|
|
66
|
-
# "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
|
|
67
|
-
# "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
|
|
68
|
-
# "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
|
|
69
|
-
# "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
|
|
70
|
-
# "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
|
|
71
|
-
# "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
|
|
72
|
-
# "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
|
|
73
|
-
# "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
|
|
74
|
-
# "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
|
|
75
|
-
# "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
|
|
76
86
|
]
|
|
77
87
|
|
|
78
88
|
@staticmethod
|
|
@@ -84,6 +94,7 @@ class DeepInfra(Provider):
|
|
|
84
94
|
|
|
85
95
|
def __init__(
|
|
86
96
|
self,
|
|
97
|
+
api_key: Optional[str] = None,
|
|
87
98
|
is_conversation: bool = True,
|
|
88
99
|
max_tokens: int = 2049,
|
|
89
100
|
timeout: int = 30,
|
|
@@ -107,21 +118,34 @@ class DeepInfra(Provider):
|
|
|
107
118
|
self.agent = LitAgent()
|
|
108
119
|
# Fingerprint generation might be less relevant with impersonate
|
|
109
120
|
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
110
|
-
|
|
121
|
+
self.api = api_key
|
|
111
122
|
# Use the fingerprint for headers (keep relevant ones)
|
|
112
123
|
self.headers = {
|
|
113
|
-
"Accept": self.fingerprint["accept"],
|
|
114
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
124
|
+
"Accept": self.fingerprint["accept"],
|
|
125
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
115
126
|
"Content-Type": "application/json",
|
|
116
|
-
"Cache-Control": "no-cache",
|
|
117
|
-
"Origin": "https://deepinfra.com",
|
|
118
|
-
"Pragma": "no-cache",
|
|
119
|
-
"Referer": "https://deepinfra.com/",
|
|
120
|
-
"Sec-Fetch-Dest": "empty",
|
|
127
|
+
"Cache-Control": "no-cache",
|
|
128
|
+
"Origin": "https://deepinfra.com",
|
|
129
|
+
"Pragma": "no-cache",
|
|
130
|
+
"Referer": "https://deepinfra.com/",
|
|
131
|
+
"Sec-Fetch-Dest": "empty",
|
|
121
132
|
"Sec-Fetch-Mode": "cors",
|
|
122
133
|
"Sec-Fetch-Site": "same-site",
|
|
123
|
-
"X-Deepinfra-Source": "web-embed",
|
|
134
|
+
"X-Deepinfra-Source": "web-embed",
|
|
135
|
+
# Additional headers from LitAgent.generate_fingerprint
|
|
136
|
+
"User-Agent": self.fingerprint.get("user_agent", ""),
|
|
137
|
+
"Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
|
|
138
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
139
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
|
|
140
|
+
"X-Forwarded-For": self.fingerprint.get("x-forwarded-for", ""),
|
|
141
|
+
"X-Real-IP": self.fingerprint.get("x-real-ip", ""),
|
|
142
|
+
"X-Client-IP": self.fingerprint.get("x-client-ip", ""),
|
|
143
|
+
"Forwarded": self.fingerprint.get("forwarded", ""),
|
|
144
|
+
"X-Forwarded-Proto": self.fingerprint.get("x-forwarded-proto", ""),
|
|
145
|
+
"X-Request-Id": self.fingerprint.get("x-request-id", ""),
|
|
124
146
|
}
|
|
147
|
+
if self.api is not None:
|
|
148
|
+
self.headers["Authorization"] = f"Bearer {self.api}"
|
|
125
149
|
|
|
126
150
|
# Initialize curl_cffi Session
|
|
127
151
|
self.session = Session()
|
|
@@ -321,7 +345,7 @@ if __name__ == "__main__":
|
|
|
321
345
|
|
|
322
346
|
for model in DeepInfra.AVAILABLE_MODELS:
|
|
323
347
|
try:
|
|
324
|
-
test_ai = DeepInfra(model=model, timeout=60)
|
|
348
|
+
test_ai = DeepInfra(model=model, timeout=60,)
|
|
325
349
|
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
326
350
|
response_text = ""
|
|
327
351
|
for chunk in response:
|
webscout/Provider/ExaChat.py
CHANGED
|
@@ -21,9 +21,9 @@ MODEL_CONFIGS = {
|
|
|
21
21
|
"gemini-2.0-flash",
|
|
22
22
|
"gemini-2.0-flash-exp-image-generation",
|
|
23
23
|
"gemini-2.0-flash-thinking-exp-01-21",
|
|
24
|
-
"gemini-2.5-
|
|
24
|
+
"gemini-2.5-flash-lite-preview-06-17",
|
|
25
25
|
"gemini-2.0-pro-exp-02-05",
|
|
26
|
-
"gemini-2.5-flash
|
|
26
|
+
"gemini-2.5-flash",
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
],
|
|
@@ -62,7 +62,9 @@ MODEL_CONFIGS = {
|
|
|
62
62
|
"endpoint": "https://ayle.chat/api/cerebras",
|
|
63
63
|
"models": [
|
|
64
64
|
"llama3.1-8b",
|
|
65
|
-
"llama-3.3-70b"
|
|
65
|
+
"llama-3.3-70b",
|
|
66
|
+
"llama-4-scout-17b-16e-instruct",
|
|
67
|
+
"qwen-3-32b"
|
|
66
68
|
],
|
|
67
69
|
},
|
|
68
70
|
"xai": {
|
|
@@ -88,9 +90,9 @@ class ExaChat(Provider):
|
|
|
88
90
|
"gemini-2.0-flash",
|
|
89
91
|
"gemini-2.0-flash-exp-image-generation",
|
|
90
92
|
"gemini-2.0-flash-thinking-exp-01-21",
|
|
91
|
-
"gemini-2.5-pro-exp-03-25",
|
|
92
93
|
"gemini-2.0-pro-exp-02-05",
|
|
93
|
-
"gemini-2.5-flash
|
|
94
|
+
"gemini-2.5-flash",
|
|
95
|
+
"gemini-2.5-flash-lite-preview-06-17",
|
|
94
96
|
|
|
95
97
|
# OpenRouter Models
|
|
96
98
|
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
@@ -120,6 +122,8 @@ class ExaChat(Provider):
|
|
|
120
122
|
# Cerebras Models
|
|
121
123
|
"llama3.1-8b",
|
|
122
124
|
"llama-3.3-70b",
|
|
125
|
+
"llama-4-scout-17b-16e-instruct",
|
|
126
|
+
"qwen-3-32b",
|
|
123
127
|
|
|
124
128
|
]
|
|
125
129
|
|
webscout/Provider/Flowith.py
CHANGED
|
@@ -15,7 +15,7 @@ class Flowith(Provider):
|
|
|
15
15
|
"""
|
|
16
16
|
A provider class for interacting with the Flowith API.
|
|
17
17
|
"""
|
|
18
|
-
AVAILABLE_MODELS = ["gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku", "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"]
|
|
18
|
+
AVAILABLE_MODELS = ["gpt-4.1-nano", "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku", "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"]
|
|
19
19
|
|
|
20
20
|
def __init__(
|
|
21
21
|
self,
|
webscout/Provider/FreeGemini.py
CHANGED
|
@@ -83,7 +83,7 @@ class FreeGemini(Provider):
|
|
|
83
83
|
self.last_response = {}
|
|
84
84
|
self.system_prompt = system_prompt # Stored for consistency
|
|
85
85
|
|
|
86
|
-
self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.
|
|
86
|
+
self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.5-flash:streamGenerateContent?alt=sse"
|
|
87
87
|
|
|
88
88
|
self.agent = LitAgent()
|
|
89
89
|
self.headers = {
|
|
@@ -246,5 +246,5 @@ class FreeGemini(Provider):
|
|
|
246
246
|
if __name__ == "__main__":
|
|
247
247
|
# Example usage
|
|
248
248
|
free_gemini = FreeGemini()
|
|
249
|
-
response = free_gemini.chat("
|
|
249
|
+
response = free_gemini.chat("how many r in strawberry", stream=False)
|
|
250
250
|
print(response) # Should print the response from the API
|
webscout/Provider/Gemini.py
CHANGED
|
@@ -10,22 +10,15 @@ from ..Bard import Chatbot, Model
|
|
|
10
10
|
|
|
11
11
|
warnings.simplefilter("ignore", category=UserWarning)
|
|
12
12
|
|
|
13
|
-
# Define model aliases for easy usage
|
|
13
|
+
# Define model aliases for easy usage (only supported models)
|
|
14
14
|
MODEL_ALIASES: Dict[str, Model] = {
|
|
15
15
|
"unspecified": Model.UNSPECIFIED,
|
|
16
|
-
"gemini-2.0-flash": Model.G_2_0_FLASH,
|
|
17
|
-
"gemini-2.0-flash-thinking": Model.G_2_0_FLASH_THINKING,
|
|
18
|
-
"gemini-2.5-pro": Model.G_2_5_PRO,
|
|
19
|
-
"gemini-2.0-exp-advanced": Model.G_2_0_EXP_ADVANCED,
|
|
20
|
-
"gemini-2.5-exp-advanced": Model.G_2_5_EXP_ADVANCED,
|
|
21
16
|
"gemini-2.5-flash": Model.G_2_5_FLASH,
|
|
17
|
+
"gemini-2.5-pro": Model.G_2_5_PRO,
|
|
22
18
|
# Add shorter aliases for convenience
|
|
23
|
-
"flash": Model.G_2_0_FLASH,
|
|
24
19
|
"flash-2.5": Model.G_2_5_FLASH,
|
|
25
|
-
"thinking": Model.G_2_0_FLASH_THINKING,
|
|
26
20
|
"pro": Model.G_2_5_PRO,
|
|
27
|
-
"
|
|
28
|
-
"advanced-2.5": Model.G_2_5_EXP_ADVANCED,
|
|
21
|
+
"unspecified": Model.UNSPECIFIED,
|
|
29
22
|
}
|
|
30
23
|
|
|
31
24
|
# List of available models (friendly names)
|
webscout/Provider/GeminiProxy.py
CHANGED
|
@@ -14,10 +14,11 @@ class GeminiProxy(Provider):
|
|
|
14
14
|
AVAILABLE_MODELS = [
|
|
15
15
|
"gemini-2.0-flash-lite",
|
|
16
16
|
"gemini-2.0-flash",
|
|
17
|
-
"gemini-2.5-pro-preview-06-05",
|
|
18
|
-
"gemini-2.5-pro-preview-05-06",
|
|
19
17
|
"gemini-2.5-flash-preview-04-17",
|
|
20
18
|
"gemini-2.5-flash-preview-05-20",
|
|
19
|
+
"gemini-2.5-flash-lite-preview-06-17",
|
|
20
|
+
"gemini-2.5-pro",
|
|
21
|
+
"gemini-2.5-flash",
|
|
21
22
|
|
|
22
23
|
]
|
|
23
24
|
|
|
@@ -135,6 +136,31 @@ class GeminiProxy(Provider):
|
|
|
135
136
|
return str(response)
|
|
136
137
|
|
|
137
138
|
if __name__ == "__main__":
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
print(
|
|
139
|
+
# Ensure curl_cffi is installed
|
|
140
|
+
print("-" * 80)
|
|
141
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
142
|
+
print("-" * 80)
|
|
143
|
+
|
|
144
|
+
# Test all available models
|
|
145
|
+
working = 0
|
|
146
|
+
total = len(GeminiProxy.AVAILABLE_MODELS)
|
|
147
|
+
|
|
148
|
+
for model in GeminiProxy.AVAILABLE_MODELS:
|
|
149
|
+
try:
|
|
150
|
+
test_ai = GeminiProxy(model=model, timeout=60)
|
|
151
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
152
|
+
response_text = ""
|
|
153
|
+
for chunk in response:
|
|
154
|
+
response_text += chunk
|
|
155
|
+
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
156
|
+
|
|
157
|
+
if response_text and len(response_text.strip()) > 0:
|
|
158
|
+
status = "✓"
|
|
159
|
+
# Truncate response if too long
|
|
160
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
161
|
+
else:
|
|
162
|
+
status = "✗"
|
|
163
|
+
display_text = "Empty or invalid response"
|
|
164
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
165
|
+
except Exception as e:
|
|
166
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|