webscout 8.3.4__py3-none-any.whl → 8.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +52 -1016
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +2 -0
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +7 -1
- webscout/Provider/OPENAI/TogetherAI.py +57 -48
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +0 -2
- webscout/Provider/OPENAI/deepinfra.py +6 -0
- webscout/Provider/OPENAI/scirachat.py +4 -0
- webscout/Provider/OPENAI/textpollinations.py +11 -7
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +30 -6
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/together.py +7 -6
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TextPollinationsAI.py +11 -7
- webscout/Provider/TogetherAI.py +57 -44
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +0 -6
- webscout/Provider/scira_chat.py +4 -0
- webscout/Provider/toolbaz.py +5 -10
- webscout/Provider/typefully.py +1 -11
- webscout/__init__.py +3 -15
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +99 -2
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/sanitize.py +1074 -0
- webscout/version.py +1 -1
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -149
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/RECORD +49 -51
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
from .stellar_search import *
|
|
2
|
-
from .felo_search import *
|
|
3
|
-
from .DeepFind import *
|
|
4
|
-
from .genspark_search import *
|
|
5
|
-
from .monica_search import *
|
|
6
|
-
from .webpilotai_search import *
|
|
7
|
-
from .hika_search import *
|
|
8
|
-
from .scira_search import *
|
|
9
|
-
from .iask_search import *
|
|
10
|
-
from .Perplexity import *
|
|
1
|
+
from .stellar_search import *
|
|
2
|
+
from .felo_search import *
|
|
3
|
+
from .DeepFind import *
|
|
4
|
+
from .genspark_search import *
|
|
5
|
+
from .monica_search import *
|
|
6
|
+
from .webpilotai_search import *
|
|
7
|
+
from .hika_search import *
|
|
8
|
+
from .scira_search import *
|
|
9
|
+
from .iask_search import *
|
|
10
|
+
from .Perplexity import *
|
|
11
|
+
# from .PERPLEXED_search import *
|
|
@@ -69,7 +69,7 @@ class Felo(AISearch):
|
|
|
69
69
|
"accept-encoding": "gzip, deflate, br, zstd",
|
|
70
70
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
71
71
|
"content-type": "application/json",
|
|
72
|
-
"cookie": "_clck=1gifk45%7C2%7Cfoa%7C0%7C1686; _clsk=1g5lv07%7C1723558310439%7C1%7C1%7Cu.clarity.ms%2Fcollect; _ga=GA1.1.877307181.1723558313; _ga_8SZPRV97HV=GS1.1.1723558313.1.1.1723558341.0.0.0; _ga_Q9Q1E734CC=GS1.1.1723558313.1.1.1723558341.0.0.0",
|
|
72
|
+
# "cookie": "_clck=1gifk45%7C2%7Cfoa%7C0%7C1686; _clsk=1g5lv07%7C1723558310439%7C1%7C1%7Cu.clarity.ms%2Fcollect; _ga=GA1.1.877307181.1723558313; _ga_8SZPRV97HV=GS1.1.1723558313.1.1.1723558341.0.0.0; _ga_Q9Q1E734CC=GS1.1.1723558313.1.1.1723558341.0.0.0",
|
|
73
73
|
"dnt": "1",
|
|
74
74
|
"origin": "https://felo.ai",
|
|
75
75
|
"referer": "https://felo.ai/",
|
|
@@ -141,10 +141,14 @@ class Felo(AISearch):
|
|
|
141
141
|
"lang": "",
|
|
142
142
|
"agent_lang": "en",
|
|
143
143
|
"search_options": {
|
|
144
|
-
"langcode": "en-US"
|
|
144
|
+
"langcode": "en-US",
|
|
145
|
+
"search_image": True,
|
|
146
|
+
"search_video": True,
|
|
145
147
|
},
|
|
146
148
|
"search_video": True,
|
|
147
|
-
"
|
|
149
|
+
"model": "",
|
|
150
|
+
"contexts_from": "google",
|
|
151
|
+
"auto_routing": True,
|
|
148
152
|
}
|
|
149
153
|
|
|
150
154
|
def for_stream():
|
|
@@ -67,6 +67,8 @@ class Scira(AISearch):
|
|
|
67
67
|
"scira-opus": "claude-4-opus-20250514",
|
|
68
68
|
"scira-opus-pro": "claude-4-opus-20250514",
|
|
69
69
|
"scira-llama-4": "meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
70
|
+
"scira-kimi-k2": "kimi-k2-instruct",
|
|
71
|
+
"kimi-k2-instruct": "scira-kimi-k2",
|
|
70
72
|
}
|
|
71
73
|
def __init__(
|
|
72
74
|
self,
|
|
@@ -53,7 +53,12 @@ class Stellar(AISearch):
|
|
|
53
53
|
|
|
54
54
|
@staticmethod
|
|
55
55
|
def _stellar_extractor(chunk: Union[str, bytes, Dict[str, Any]]) -> Optional[str]:
|
|
56
|
-
"""
|
|
56
|
+
"""
|
|
57
|
+
Extracts content from the Stellar stream format with focused pattern matching.
|
|
58
|
+
|
|
59
|
+
Prioritizes the primary diff pattern to avoid duplication and focuses on
|
|
60
|
+
incremental content building from stellar.chatastra.ai streaming response.
|
|
61
|
+
"""
|
|
57
62
|
if isinstance(chunk, bytes):
|
|
58
63
|
try:
|
|
59
64
|
chunk = chunk.decode('utf-8', errors='replace')
|
|
@@ -61,14 +66,54 @@ class Stellar(AISearch):
|
|
|
61
66
|
return None
|
|
62
67
|
if not isinstance(chunk, str):
|
|
63
68
|
return None
|
|
64
|
-
|
|
65
|
-
pattern
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
69
|
+
|
|
70
|
+
# Primary pattern: Hex key diff format (most reliable for streaming)
|
|
71
|
+
# Matches: 16:{"diff":[0,"AI"],"next":"$@18"}
|
|
72
|
+
primary_pattern = r'[0-9a-f]+:\{"diff":\[0,"([^"]*?)"\]'
|
|
73
|
+
primary_matches = re.findall(primary_pattern, chunk)
|
|
74
|
+
|
|
75
|
+
if primary_matches:
|
|
76
|
+
# Join the matches and clean up
|
|
77
|
+
extracted_text = ''.join(primary_matches)
|
|
78
|
+
|
|
79
|
+
# Handle escape sequences properly
|
|
80
|
+
extracted_text = extracted_text.replace('\\n', '\n')
|
|
81
|
+
extracted_text = extracted_text.replace('\\r', '\r')
|
|
82
|
+
extracted_text = extracted_text.replace('\\"', '"')
|
|
83
|
+
extracted_text = extracted_text.replace('\\t', '\t')
|
|
84
|
+
extracted_text = extracted_text.replace('\\/', '/')
|
|
85
|
+
extracted_text = extracted_text.replace('\\\\', '\\')
|
|
86
|
+
|
|
87
|
+
# Clean up markdown formatting
|
|
88
|
+
extracted_text = extracted_text.replace('\\*', '*')
|
|
89
|
+
extracted_text = extracted_text.replace('\\#', '#')
|
|
90
|
+
extracted_text = extracted_text.replace('\\[', '[')
|
|
91
|
+
extracted_text = extracted_text.replace('\\]', ']')
|
|
92
|
+
extracted_text = extracted_text.replace('\\(', '(')
|
|
93
|
+
extracted_text = extracted_text.replace('\\)', ')')
|
|
94
|
+
|
|
71
95
|
return extracted_text if extracted_text.strip() else None
|
|
96
|
+
|
|
97
|
+
# # Fallback: Look for Ta24 content blocks (complete responses)
|
|
98
|
+
# if ':Ta24,' in chunk:
|
|
99
|
+
# ta24_pattern = r':Ta24,([^}]*?)(?:\d+:|$)'
|
|
100
|
+
# ta24_matches = re.findall(ta24_pattern, chunk)
|
|
101
|
+
# if ta24_matches:
|
|
102
|
+
# extracted_text = ''.join(ta24_matches)
|
|
103
|
+
# # Basic cleanup
|
|
104
|
+
# extracted_text = extracted_text.replace('\\n', '\n')
|
|
105
|
+
# extracted_text = extracted_text.replace('\\"', '"')
|
|
106
|
+
# return extracted_text.strip() if extracted_text.strip() else None
|
|
107
|
+
|
|
108
|
+
# # Secondary fallback: Direct diff patterns without hex prefix
|
|
109
|
+
# fallback_pattern = r'\{"diff":\[0,"([^"]*?)"\]'
|
|
110
|
+
# fallback_matches = re.findall(fallback_pattern, chunk)
|
|
111
|
+
# if fallback_matches:
|
|
112
|
+
# extracted_text = ''.join(fallback_matches)
|
|
113
|
+
# extracted_text = extracted_text.replace('\\n', '\n')
|
|
114
|
+
# extracted_text = extracted_text.replace('\\"', '"')
|
|
115
|
+
# return extracted_text if extracted_text.strip() else None
|
|
116
|
+
|
|
72
117
|
return None
|
|
73
118
|
|
|
74
119
|
def search(self, prompt: str, stream: bool = False, raw: bool = False) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse, str], None, None]]:
|
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -18,8 +18,13 @@ class DeepInfra(Provider):
|
|
|
18
18
|
|
|
19
19
|
AVAILABLE_MODELS = [
|
|
20
20
|
"anthropic/claude-4-opus",
|
|
21
|
+
"moonshotai/Kimi-K2-Instruct",
|
|
21
22
|
"anthropic/claude-4-sonnet",
|
|
22
23
|
"deepseek-ai/DeepSeek-R1-0528-Turbo",
|
|
24
|
+
"Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
25
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
|
26
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo",
|
|
27
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507",
|
|
23
28
|
"Qwen/Qwen3-235B-A22B",
|
|
24
29
|
"Qwen/Qwen3-30B-A3B",
|
|
25
30
|
"Qwen/Qwen3-32B",
|
|
@@ -71,6 +76,7 @@ class DeepInfra(Provider):
|
|
|
71
76
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
72
77
|
"microsoft/WizardLM-2-8x22B",
|
|
73
78
|
"mistralai/Devstral-Small-2505",
|
|
79
|
+
"mistralai/Devstral-Small-2507",
|
|
74
80
|
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
75
81
|
"mistralai/Mistral-Nemo-Instruct-2407",
|
|
76
82
|
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
@@ -339,7 +345,7 @@ if __name__ == "__main__":
|
|
|
339
345
|
|
|
340
346
|
for model in DeepInfra.AVAILABLE_MODELS:
|
|
341
347
|
try:
|
|
342
|
-
test_ai = DeepInfra(model=model, timeout=60,
|
|
348
|
+
test_ai = DeepInfra(model=model, timeout=60,)
|
|
343
349
|
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
344
350
|
response_text = ""
|
|
345
351
|
for chunk in response:
|
|
@@ -199,68 +199,77 @@ class Chat(BaseChat):
|
|
|
199
199
|
self.completions = Completions(client)
|
|
200
200
|
|
|
201
201
|
|
|
202
|
-
class TogetherAI(OpenAICompatibleProvider):
|
|
203
|
-
"""
|
|
204
|
-
OpenAI-compatible client for TogetherAI API.
|
|
205
|
-
"""
|
|
206
202
|
class TogetherAI(OpenAICompatibleProvider):
|
|
207
203
|
"""
|
|
208
204
|
OpenAI-compatible client for TogetherAI API.
|
|
209
205
|
"""
|
|
210
206
|
AVAILABLE_MODELS = [
|
|
211
|
-
"
|
|
212
|
-
"
|
|
213
|
-
"Qwen/Qwen2-72B-Instruct",
|
|
214
|
-
"Qwen/Qwen2-VL-72B-Instruct",
|
|
215
|
-
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
207
|
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
208
|
+
"togethercomputer/MoA-1",
|
|
216
209
|
"Qwen/Qwen2.5-7B-Instruct-Turbo",
|
|
210
|
+
"meta-llama/Llama-3-8b-chat-hf",
|
|
211
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
|
212
|
+
"togethercomputer/MoA-1-Turbo",
|
|
213
|
+
"eddiehou/meta-llama/Llama-3.1-405B",
|
|
214
|
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
|
215
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
|
|
216
|
+
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
|
217
|
+
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
|
|
218
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
217
219
|
"Qwen/Qwen2.5-VL-72B-Instruct",
|
|
218
|
-
"
|
|
219
|
-
"
|
|
220
|
-
"
|
|
221
|
-
"
|
|
222
|
-
"
|
|
220
|
+
"arcee-ai/AFM-4.5B-Preview",
|
|
221
|
+
"lgai/exaone-3-5-32b-instruct",
|
|
222
|
+
"meta-llama/Llama-3-70b-chat-hf",
|
|
223
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
224
|
+
"google/gemma-2-27b-it",
|
|
225
|
+
"Qwen/Qwen2-72B-Instruct",
|
|
226
|
+
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
227
|
+
"Qwen/Qwen2-VL-72B-Instruct",
|
|
228
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
|
229
|
+
"meta-llama/Llama-Vision-Free",
|
|
230
|
+
"perplexity-ai/r1-1776",
|
|
231
|
+
"scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
|
|
223
232
|
"arcee-ai/maestro-reasoning",
|
|
224
|
-
"
|
|
225
|
-
"
|
|
233
|
+
"togethercomputer/Refuel-Llm-V2-Small",
|
|
234
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
235
|
+
"arcee-ai/coder-large",
|
|
236
|
+
"Qwen/QwQ-32B",
|
|
226
237
|
"arcee_ai/arcee-spotlight",
|
|
227
|
-
"
|
|
228
|
-
"deepseek-ai/DeepSeek-R1",
|
|
229
|
-
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
230
|
-
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
|
|
231
|
-
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
|
232
|
-
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
|
233
|
-
"deepseek-ai/DeepSeek-V3",
|
|
234
|
-
"google/gemma-2-27b-it",
|
|
235
|
-
"lgai/exaone-3-5-32b-instruct",
|
|
236
|
-
"lgai/exaone-deep-32b",
|
|
238
|
+
"deepseek-ai/DeepSeek-R1-0528-tput",
|
|
237
239
|
"marin-community/marin-8b-instruct",
|
|
238
|
-
"
|
|
239
|
-
"
|
|
240
|
-
"
|
|
241
|
-
"
|
|
242
|
-
"
|
|
243
|
-
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
|
244
|
-
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
245
|
-
"meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
|
|
240
|
+
"lgai/exaone-deep-32b",
|
|
241
|
+
"google/gemma-3-27b-it",
|
|
242
|
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
243
|
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
|
244
|
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
|
246
245
|
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
247
|
-
"
|
|
248
|
-
"
|
|
249
|
-
"
|
|
246
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
|
247
|
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
|
|
248
|
+
"scb10x/scb10x-typhoon-2-1-gemma3-12b",
|
|
249
|
+
"togethercomputer/Refuel-Llm-V2",
|
|
250
|
+
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
250
251
|
"meta-llama/Meta-Llama-3-8B-Instruct-Lite",
|
|
251
|
-
"meta-llama/
|
|
252
|
+
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
253
|
+
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
|
254
|
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
|
255
|
+
"deepseek-ai/DeepSeek-V3",
|
|
256
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
|
|
252
257
|
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
258
|
+
"Qwen/Qwen3-32B-FP8",
|
|
259
|
+
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
|
260
|
+
"arcee-ai/virtuoso-large",
|
|
261
|
+
"google/gemma-3n-E4B-it",
|
|
262
|
+
"moonshotai/Kimi-K2-Instruct",
|
|
253
263
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
254
|
-
"
|
|
255
|
-
"
|
|
256
|
-
"
|
|
257
|
-
"
|
|
258
|
-
"mistralai/
|
|
259
|
-
"
|
|
260
|
-
"
|
|
261
|
-
"
|
|
262
|
-
"
|
|
263
|
-
"togethercomputer/Refuel-Llm-V2-Small",
|
|
264
|
+
"deepseek-ai/DeepSeek-R1",
|
|
265
|
+
"Qwen/Qwen3-235B-A22B-fp8-tput",
|
|
266
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507-tput",
|
|
267
|
+
"Rrrr/nim/nvidia/llama-3.3-nemotron-super-49b-v1-de6a6453",
|
|
268
|
+
"Rrrr/mistralai/Devstral-Small-2505-306f5881",
|
|
269
|
+
"Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
270
|
+
"Rrrr/ChatGPT-5",
|
|
271
|
+
"Rrrr/MeowGPT-3.5",
|
|
272
|
+
"blackbox/meta-llama-3-1-8b"
|
|
264
273
|
]
|
|
265
274
|
|
|
266
275
|
def __init__(self, browser: str = "chrome"):
|
|
@@ -5,6 +5,9 @@ import time
|
|
|
5
5
|
import uuid
|
|
6
6
|
import re
|
|
7
7
|
import urllib.parse
|
|
8
|
+
import os
|
|
9
|
+
import pickle
|
|
10
|
+
import tempfile
|
|
8
11
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
9
12
|
|
|
10
13
|
from webscout.Extra.tempmail import get_random_email
|
|
@@ -208,6 +211,96 @@ class TwoAI(OpenAICompatibleProvider):
|
|
|
208
211
|
"""OpenAI-compatible client for the TwoAI API."""
|
|
209
212
|
|
|
210
213
|
AVAILABLE_MODELS = ["sutra-v2", "sutra-r0"]
|
|
214
|
+
|
|
215
|
+
# Class-level cache for API keys
|
|
216
|
+
_api_key_cache = None
|
|
217
|
+
_cache_file = os.path.join(tempfile.gettempdir(), "webscout_twoai_openai_cache.pkl")
|
|
218
|
+
|
|
219
|
+
@classmethod
|
|
220
|
+
def _load_cached_api_key(cls) -> Optional[str]:
|
|
221
|
+
"""Load cached API key from file."""
|
|
222
|
+
try:
|
|
223
|
+
if os.path.exists(cls._cache_file):
|
|
224
|
+
with open(cls._cache_file, 'rb') as f:
|
|
225
|
+
cache_data = pickle.load(f)
|
|
226
|
+
# Check if cache is not too old (24 hours)
|
|
227
|
+
if time.time() - cache_data.get('timestamp', 0) < 86400:
|
|
228
|
+
return cache_data.get('api_key')
|
|
229
|
+
except Exception:
|
|
230
|
+
# If cache is corrupted or unreadable, ignore and regenerate
|
|
231
|
+
pass
|
|
232
|
+
return None
|
|
233
|
+
|
|
234
|
+
@classmethod
|
|
235
|
+
def _save_cached_api_key(cls, api_key: str):
|
|
236
|
+
"""Save API key to cache file."""
|
|
237
|
+
try:
|
|
238
|
+
cache_data = {
|
|
239
|
+
'api_key': api_key,
|
|
240
|
+
'timestamp': time.time()
|
|
241
|
+
}
|
|
242
|
+
with open(cls._cache_file, 'wb') as f:
|
|
243
|
+
pickle.dump(cache_data, f)
|
|
244
|
+
except Exception:
|
|
245
|
+
# If caching fails, continue without caching
|
|
246
|
+
pass
|
|
247
|
+
|
|
248
|
+
@classmethod
|
|
249
|
+
def _validate_api_key(cls, api_key: str) -> bool:
|
|
250
|
+
"""Validate if an API key is still working."""
|
|
251
|
+
try:
|
|
252
|
+
session = Session()
|
|
253
|
+
headers = {
|
|
254
|
+
'User-Agent': LitAgent().random(),
|
|
255
|
+
'Accept': 'application/json',
|
|
256
|
+
'Content-Type': 'application/json',
|
|
257
|
+
'Authorization': f'Bearer {api_key}',
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
# Test with a simple request
|
|
261
|
+
test_payload = {
|
|
262
|
+
"messages": [{"role": "user", "content": "test"}],
|
|
263
|
+
"model": "sutra-v2",
|
|
264
|
+
"max_tokens": 1,
|
|
265
|
+
"stream": False
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
response = session.post(
|
|
269
|
+
"https://api.two.ai/v2/chat/completions",
|
|
270
|
+
headers=headers,
|
|
271
|
+
json=test_payload,
|
|
272
|
+
timeout=10,
|
|
273
|
+
impersonate="chrome120"
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# If we get a 200 or 400 (bad request but auth worked), key is valid
|
|
277
|
+
# If we get 401/403, key is invalid
|
|
278
|
+
return response.status_code not in [401, 403]
|
|
279
|
+
except Exception:
|
|
280
|
+
# If validation fails, assume key is invalid
|
|
281
|
+
return False
|
|
282
|
+
|
|
283
|
+
@classmethod
|
|
284
|
+
def get_cached_api_key(cls) -> str:
|
|
285
|
+
"""Get a cached API key or generate a new one if needed."""
|
|
286
|
+
# First check class-level cache
|
|
287
|
+
if cls._api_key_cache:
|
|
288
|
+
if cls._validate_api_key(cls._api_key_cache):
|
|
289
|
+
return cls._api_key_cache
|
|
290
|
+
else:
|
|
291
|
+
cls._api_key_cache = None
|
|
292
|
+
|
|
293
|
+
# Then check file cache
|
|
294
|
+
cached_key = cls._load_cached_api_key()
|
|
295
|
+
if cached_key and cls._validate_api_key(cached_key):
|
|
296
|
+
cls._api_key_cache = cached_key
|
|
297
|
+
return cached_key
|
|
298
|
+
|
|
299
|
+
# Generate new key if no valid cached key
|
|
300
|
+
new_key = cls.generate_api_key()
|
|
301
|
+
cls._api_key_cache = new_key
|
|
302
|
+
cls._save_cached_api_key(new_key)
|
|
303
|
+
return new_key
|
|
211
304
|
|
|
212
305
|
@staticmethod
|
|
213
306
|
def generate_api_key() -> str:
|
|
@@ -302,7 +395,7 @@ class TwoAI(OpenAICompatibleProvider):
|
|
|
302
395
|
return api_key
|
|
303
396
|
|
|
304
397
|
def __init__(self, browser: str = "chrome"):
|
|
305
|
-
api_key = self.
|
|
398
|
+
api_key = self.get_cached_api_key()
|
|
306
399
|
self.timeout = 30
|
|
307
400
|
self.base_url = "https://api.two.ai/v2/chat/completions"
|
|
308
401
|
self.api_key = api_key
|
|
@@ -6,7 +6,6 @@ from .x0gpt import *
|
|
|
6
6
|
from .wisecat import *
|
|
7
7
|
from .venice import *
|
|
8
8
|
from .exaai import *
|
|
9
|
-
from .typegpt import *
|
|
10
9
|
from .scirachat import *
|
|
11
10
|
from .llmchatco import *
|
|
12
11
|
from .yep import * # Add YEPCHAT
|
|
@@ -17,7 +16,6 @@ from .netwrck import *
|
|
|
17
16
|
from .standardinput import *
|
|
18
17
|
from .writecream import *
|
|
19
18
|
from .toolbaz import *
|
|
20
|
-
from .uncovrAI import *
|
|
21
19
|
from .opkfc import *
|
|
22
20
|
from .chatgpt import *
|
|
23
21
|
from .textpollinations import *
|
|
@@ -196,8 +196,13 @@ class Chat(BaseChat):
|
|
|
196
196
|
class DeepInfra(OpenAICompatibleProvider):
|
|
197
197
|
AVAILABLE_MODELS = [
|
|
198
198
|
"anthropic/claude-4-opus",
|
|
199
|
+
"moonshotai/Kimi-K2-Instruct",
|
|
199
200
|
"anthropic/claude-4-sonnet",
|
|
200
201
|
"deepseek-ai/DeepSeek-R1-0528-Turbo",
|
|
202
|
+
"Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
203
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
|
204
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo",
|
|
205
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507",
|
|
201
206
|
"Qwen/Qwen3-235B-A22B",
|
|
202
207
|
"Qwen/Qwen3-30B-A3B",
|
|
203
208
|
"Qwen/Qwen3-32B",
|
|
@@ -249,6 +254,7 @@ class DeepInfra(OpenAICompatibleProvider):
|
|
|
249
254
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
250
255
|
"microsoft/WizardLM-2-8x22B",
|
|
251
256
|
"mistralai/Devstral-Small-2505",
|
|
257
|
+
"mistralai/Devstral-Small-2507",
|
|
252
258
|
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
253
259
|
"mistralai/Mistral-Nemo-Instruct-2407",
|
|
254
260
|
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
@@ -347,6 +347,8 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
347
347
|
"claude-4-opus-20250514": "scira-opus",
|
|
348
348
|
"claude-4-opus-20250514-pro": "scira-opus-pro",
|
|
349
349
|
"meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
|
|
350
|
+
"kimi-k2-instruct": "scira-kimi-k2",
|
|
351
|
+
"scira-kimi-k2": "kimi-k2-instruct",
|
|
350
352
|
}
|
|
351
353
|
# Reverse mapping: Scira format to actual model names
|
|
352
354
|
SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
|
|
@@ -360,6 +362,8 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
360
362
|
SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
|
|
361
363
|
SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
|
|
362
364
|
SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
|
|
365
|
+
SCIRA_TO_MODEL["scira-kimi-k2"] = "kimi-k2-instruct"
|
|
366
|
+
SCIRA_TO_MODEL["kimi-k2-instruct"] = "scira-kimi-k2"
|
|
363
367
|
MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
|
|
364
368
|
# Available models list (actual model names + scira aliases)
|
|
365
369
|
AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
|
|
@@ -276,20 +276,24 @@ class TextPollinations(OpenAICompatibleProvider):
|
|
|
276
276
|
"""
|
|
277
277
|
|
|
278
278
|
AVAILABLE_MODELS = [
|
|
279
|
-
"openai",
|
|
280
|
-
"openai-fast",
|
|
281
|
-
"openai-large",
|
|
282
|
-
"openai-reasoning",
|
|
283
|
-
"openai-roblox",
|
|
284
|
-
"openai-audio",
|
|
285
279
|
"deepseek",
|
|
286
280
|
"deepseek-reasoning",
|
|
281
|
+
"gemma-roblox",
|
|
287
282
|
"grok",
|
|
283
|
+
"llama-fast-roblox",
|
|
284
|
+
"llama-roblox",
|
|
288
285
|
"llamascout",
|
|
289
286
|
"mistral",
|
|
287
|
+
"mistral-nemo-roblox",
|
|
288
|
+
"mistral-roblox",
|
|
289
|
+
"openai",
|
|
290
|
+
"openai-audio",
|
|
291
|
+
"openai-fast",
|
|
292
|
+
"openai-large",
|
|
293
|
+
"openai-reasoning",
|
|
294
|
+
"openai-roblox",
|
|
290
295
|
"phi",
|
|
291
296
|
"qwen-coder",
|
|
292
|
-
"searchgpt",
|
|
293
297
|
"bidara",
|
|
294
298
|
"elixposearch",
|
|
295
299
|
"evil",
|