webscout 8.3.3__py3-none-any.whl → 8.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +53 -800
- webscout/Bard.py +2 -22
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +26 -11
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +81 -57
- webscout/Provider/ExaChat.py +9 -5
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/FreeGemini.py +2 -2
- webscout/Provider/Gemini.py +3 -10
- webscout/Provider/GeminiProxy.py +31 -5
- webscout/Provider/LambdaChat.py +39 -31
- webscout/Provider/Netwrck.py +5 -8
- webscout/Provider/OLLAMA.py +8 -9
- webscout/Provider/OPENAI/README.md +1 -1
- webscout/Provider/OPENAI/TogetherAI.py +57 -48
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +1 -3
- webscout/Provider/OPENAI/autoproxy.py +1 -1
- webscout/Provider/OPENAI/copilot.py +73 -26
- webscout/Provider/OPENAI/deepinfra.py +60 -24
- webscout/Provider/OPENAI/exachat.py +9 -5
- webscout/Provider/OPENAI/monochat.py +3 -3
- webscout/Provider/OPENAI/netwrck.py +4 -7
- webscout/Provider/OPENAI/qodo.py +630 -0
- webscout/Provider/OPENAI/scirachat.py +86 -49
- webscout/Provider/OPENAI/textpollinations.py +19 -14
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +478 -0
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/monochat.py +3 -3
- webscout/Provider/TTI/together.py +7 -6
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TextPollinationsAI.py +19 -14
- webscout/Provider/TogetherAI.py +57 -44
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +4 -10
- webscout/Provider/copilot.py +58 -61
- webscout/Provider/freeaichat.py +64 -55
- webscout/Provider/monochat.py +275 -0
- webscout/Provider/scira_chat.py +115 -21
- webscout/Provider/toolbaz.py +5 -10
- webscout/Provider/typefully.py +1 -11
- webscout/Provider/x0gpt.py +325 -315
- webscout/__init__.py +4 -11
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +119 -5
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/sanitize.py +1074 -0
- webscout/version.py +1 -1
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -150
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/RECORD +70 -72
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/freeaichat.py +0 -363
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
webscout/Provider/scira_chat.py
CHANGED
|
@@ -17,19 +17,78 @@ class SciraAI(Provider):
|
|
|
17
17
|
A class to interact with the Scira AI chat API.
|
|
18
18
|
"""
|
|
19
19
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
"
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
"
|
|
26
|
-
"
|
|
27
|
-
"
|
|
28
|
-
"
|
|
29
|
-
"
|
|
30
|
-
"
|
|
31
|
-
"
|
|
20
|
+
# Model mapping: actual model names to Scira API format
|
|
21
|
+
MODEL_MAPPING = {
|
|
22
|
+
"grok-3-mini": "scira-default",
|
|
23
|
+
"grok-3-mini-fast": "scira-x-fast-mini",
|
|
24
|
+
"grok-3-fast": "scira-x-fast",
|
|
25
|
+
"gpt-4.1-nano": "scira-nano",
|
|
26
|
+
"grok-3": "scira-grok-3",
|
|
27
|
+
"grok-4": "scira-grok-4",
|
|
28
|
+
"grok-2-vision-1212": "scira-vision",
|
|
29
|
+
"grok-2-latest": "scira-g2",
|
|
30
|
+
"gpt-4o-mini": "scira-4o-mini",
|
|
31
|
+
"o4-mini-2025-04-16": "scira-o4-mini",
|
|
32
|
+
"o3": "scira-o3",
|
|
33
|
+
"qwen/qwen3-32b": "scira-qwen-32b",
|
|
34
|
+
"qwen3-30b-a3b": "scira-qwen-30b",
|
|
35
|
+
"deepseek-v3-0324": "scira-deepseek-v3",
|
|
36
|
+
"claude-3-5-haiku-20241022": "scira-haiku",
|
|
37
|
+
"mistral-small-latest": "scira-mistral",
|
|
38
|
+
"gemini-2.5-flash-lite-preview-06-17": "scira-google-lite",
|
|
39
|
+
"gemini-2.5-flash": "scira-google",
|
|
40
|
+
"gemini-2.5-pro": "scira-google-pro",
|
|
41
|
+
"claude-sonnet-4-20250514": "scira-anthropic",
|
|
42
|
+
"claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
|
|
43
|
+
"claude-4-opus-20250514": "scira-opus",
|
|
44
|
+
"claude-4-opus-20250514-pro": "scira-opus-pro",
|
|
45
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
|
|
46
|
+
"kimi-k2-instruct": "scira-kimi-k2",
|
|
47
|
+
"scira-kimi-k2": "kimi-k2-instruct",
|
|
32
48
|
}
|
|
49
|
+
|
|
50
|
+
# Reverse mapping: Scira format to actual model names
|
|
51
|
+
SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
|
|
52
|
+
# Add special cases for aliases and duplicate mappings
|
|
53
|
+
SCIRA_TO_MODEL["scira-anthropic-thinking"] = "claude-sonnet-4-20250514"
|
|
54
|
+
SCIRA_TO_MODEL["scira-opus-pro"] = "claude-4-opus-20250514"
|
|
55
|
+
SCIRA_TO_MODEL["scira-x-fast"] = "grok-3-fast"
|
|
56
|
+
SCIRA_TO_MODEL["scira-x-fast-mini"] = "grok-3-mini-fast"
|
|
57
|
+
SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
|
|
58
|
+
SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
|
|
59
|
+
SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
|
|
60
|
+
SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
|
|
61
|
+
SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
|
|
62
|
+
SCIRA_TO_MODEL["scira-kimi-k2"] = "kimi-k2-instruct"
|
|
63
|
+
SCIRA_TO_MODEL["kimi-k2-instruct"] = "scira-kimi-k2"
|
|
64
|
+
MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
|
|
65
|
+
# Available models list (actual model names + scira aliases)
|
|
66
|
+
AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
|
|
67
|
+
|
|
68
|
+
@classmethod
|
|
69
|
+
def _resolve_model(cls, model: str) -> str:
|
|
70
|
+
"""
|
|
71
|
+
Resolve a model name to its Scira API format.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
model: Either an actual model name or a Scira alias
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
The Scira API format model name
|
|
78
|
+
|
|
79
|
+
Raises:
|
|
80
|
+
ValueError: If the model is not supported
|
|
81
|
+
"""
|
|
82
|
+
# If it's already a Scira format, return as-is
|
|
83
|
+
if model in cls.SCIRA_TO_MODEL:
|
|
84
|
+
return model
|
|
85
|
+
|
|
86
|
+
# If it's an actual model name, convert to Scira format
|
|
87
|
+
if model in cls.MODEL_MAPPING:
|
|
88
|
+
return cls.MODEL_MAPPING[model]
|
|
89
|
+
|
|
90
|
+
# Model not found
|
|
91
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {cls.AVAILABLE_MODELS}")
|
|
33
92
|
|
|
34
93
|
def __init__(
|
|
35
94
|
self,
|
|
@@ -42,7 +101,7 @@ class SciraAI(Provider):
|
|
|
42
101
|
proxies: dict = {},
|
|
43
102
|
history_offset: int = 10250,
|
|
44
103
|
act: str = None,
|
|
45
|
-
model: str = "
|
|
104
|
+
model: str = "grok-3-mini",
|
|
46
105
|
chat_id: str = None,
|
|
47
106
|
user_id: str = None,
|
|
48
107
|
browser: str = "chrome",
|
|
@@ -67,9 +126,9 @@ class SciraAI(Provider):
|
|
|
67
126
|
system_prompt (str): System prompt for the AI.
|
|
68
127
|
|
|
69
128
|
"""
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
129
|
+
# Resolve the model to Scira format
|
|
130
|
+
self.model = self._resolve_model(model)
|
|
131
|
+
|
|
73
132
|
self.url = "https://scira.ai/api/search"
|
|
74
133
|
|
|
75
134
|
# Initialize LitAgent for user agent generation
|
|
@@ -103,7 +162,6 @@ class SciraAI(Provider):
|
|
|
103
162
|
self.max_tokens_to_sample = max_tokens
|
|
104
163
|
self.timeout = timeout
|
|
105
164
|
self.last_response = {}
|
|
106
|
-
self.model = model
|
|
107
165
|
self.chat_id = chat_id or str(uuid.uuid4())
|
|
108
166
|
self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
|
|
109
167
|
|
|
@@ -357,7 +415,43 @@ class SciraAI(Provider):
|
|
|
357
415
|
return response.get("text", "")
|
|
358
416
|
|
|
359
417
|
if __name__ == "__main__":
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
418
|
+
print("-" * 80)
|
|
419
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
420
|
+
print("-" * 80)
|
|
421
|
+
|
|
422
|
+
# Test all available models
|
|
423
|
+
working = 0
|
|
424
|
+
total = len(SciraAI.AVAILABLE_MODELS)
|
|
425
|
+
|
|
426
|
+
for model in SciraAI.AVAILABLE_MODELS:
|
|
427
|
+
try:
|
|
428
|
+
test_ai = SciraAI(model=model, timeout=60)
|
|
429
|
+
# Test stream first
|
|
430
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
431
|
+
response_text = ""
|
|
432
|
+
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
433
|
+
for chunk in response_stream:
|
|
434
|
+
response_text += chunk
|
|
435
|
+
# Optional: print chunks as they arrive for visual feedback
|
|
436
|
+
# print(chunk, end="", flush=True)
|
|
437
|
+
|
|
438
|
+
if response_text and len(response_text.strip()) > 0:
|
|
439
|
+
status = "✓"
|
|
440
|
+
# Clean and truncate response
|
|
441
|
+
clean_text = response_text.strip() # Already decoded in get_message
|
|
442
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
443
|
+
else:
|
|
444
|
+
status = "✗ (Stream)"
|
|
445
|
+
display_text = "Empty or invalid stream response"
|
|
446
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
447
|
+
|
|
448
|
+
# Optional: Add non-stream test if needed, but stream test covers basic functionality
|
|
449
|
+
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
450
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
451
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
452
|
+
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
except Exception as e:
|
|
456
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
457
|
+
|
webscout/Provider/toolbaz.py
CHANGED
|
@@ -37,8 +37,9 @@ class Toolbaz(Provider):
|
|
|
37
37
|
"Qwen2.5-72B",
|
|
38
38
|
"grok-2-1212",
|
|
39
39
|
"grok-3-beta",
|
|
40
|
-
"toolbaz_v3.5_pro",
|
|
41
40
|
"toolbaz_v3",
|
|
41
|
+
"toolbaz_v3.5_pro",
|
|
42
|
+
"toolbaz_v4",
|
|
42
43
|
"mixtral_8x22b",
|
|
43
44
|
"L3-70B-Euryale-v2.1",
|
|
44
45
|
"midnight-rose",
|
|
@@ -112,12 +113,6 @@ class Toolbaz(Provider):
|
|
|
112
113
|
)
|
|
113
114
|
self.conversation.history_offset = history_offset
|
|
114
115
|
|
|
115
|
-
@staticmethod
|
|
116
|
-
def _toolbaz_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
117
|
-
"""Removes [model:...] tags from a string chunk."""
|
|
118
|
-
if isinstance(chunk, str):
|
|
119
|
-
return re.sub(r"\[model:.*?\]", "", chunk)
|
|
120
|
-
return None
|
|
121
116
|
|
|
122
117
|
def random_string(self, length):
|
|
123
118
|
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
|
|
@@ -217,14 +212,14 @@ class Toolbaz(Provider):
|
|
|
217
212
|
|
|
218
213
|
streaming_text = ""
|
|
219
214
|
|
|
220
|
-
# Use sanitize_stream with
|
|
215
|
+
# Use sanitize_stream with skip_regexes to remove [model:...] tags
|
|
221
216
|
# It will decode bytes and yield processed string chunks
|
|
222
217
|
processed_stream = sanitize_stream(
|
|
223
218
|
data=resp.iter_content(chunk_size=None), # Pass byte iterator
|
|
224
219
|
intro_value=None, # No simple prefix
|
|
225
220
|
to_json=False, # Content is text
|
|
226
|
-
|
|
227
|
-
yield_raw_on_error=True, # Yield even if
|
|
221
|
+
skip_regexes=[r"\[model:.*?\]"], # Skip [model:...] tags
|
|
222
|
+
yield_raw_on_error=True, # Yield even if regex processing fails
|
|
228
223
|
raw=raw
|
|
229
224
|
)
|
|
230
225
|
|
webscout/Provider/typefully.py
CHANGED
|
@@ -71,16 +71,6 @@ class TypefullyAI(Provider):
|
|
|
71
71
|
)
|
|
72
72
|
self.conversation.history_offset = history_offset
|
|
73
73
|
|
|
74
|
-
@staticmethod
|
|
75
|
-
def _typefully_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
76
|
-
if isinstance(chunk, str):
|
|
77
|
-
if isinstance(chunk, bytes):
|
|
78
|
-
chunk = chunk.decode('utf-8', errors='replace')
|
|
79
|
-
match = re.search(r'0:"(.*?)"', chunk)
|
|
80
|
-
if match:
|
|
81
|
-
content = match.group(1).encode().decode('unicode_escape')
|
|
82
|
-
return content.replace('\\', '\\').replace('\\"', '"')
|
|
83
|
-
return None
|
|
84
74
|
|
|
85
75
|
def ask(
|
|
86
76
|
self,
|
|
@@ -125,7 +115,7 @@ class TypefullyAI(Provider):
|
|
|
125
115
|
data=response.iter_content(chunk_size=None),
|
|
126
116
|
intro_value=None,
|
|
127
117
|
to_json=False,
|
|
128
|
-
|
|
118
|
+
extract_regexes=[r'0:"(.*?)"'],
|
|
129
119
|
raw=raw
|
|
130
120
|
)
|
|
131
121
|
for content_chunk in processed_stream:
|