webscout 7.5__py3-none-any.whl → 7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +5 -53
- webscout/AIutel.py +8 -318
- webscout/DWEBS.py +460 -489
- webscout/Extra/YTToolkit/YTdownloader.py +14 -53
- webscout/Extra/YTToolkit/transcriber.py +12 -13
- webscout/Extra/YTToolkit/ytapi/video.py +0 -1
- webscout/Extra/__init__.py +0 -1
- webscout/Extra/autocoder/autocoder_utiles.py +0 -4
- webscout/Extra/autocoder/rawdog.py +13 -41
- webscout/Extra/gguf.py +652 -428
- webscout/Extra/weather.py +178 -156
- webscout/Extra/weather_ascii.py +70 -17
- webscout/Litlogger/core/logger.py +1 -2
- webscout/Litlogger/handlers/file.py +1 -1
- webscout/Litlogger/styles/formats.py +0 -2
- webscout/Litlogger/utils/detectors.py +0 -1
- webscout/Provider/AISEARCH/DeepFind.py +0 -1
- webscout/Provider/AISEARCH/ISou.py +1 -1
- webscout/Provider/AISEARCH/felo_search.py +0 -1
- webscout/Provider/AllenAI.py +24 -9
- webscout/Provider/C4ai.py +29 -11
- webscout/Provider/ChatGPTGratis.py +24 -56
- webscout/Provider/DeepSeek.py +25 -17
- webscout/Provider/Deepinfra.py +115 -48
- webscout/Provider/Gemini.py +1 -1
- webscout/Provider/Glider.py +25 -8
- webscout/Provider/HF_space/qwen_qwen2.py +2 -2
- webscout/Provider/HeckAI.py +23 -7
- webscout/Provider/Jadve.py +20 -5
- webscout/Provider/Netwrck.py +42 -19
- webscout/Provider/PI.py +4 -2
- webscout/Provider/Perplexitylabs.py +26 -6
- webscout/Provider/PizzaGPT.py +10 -51
- webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
- webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
- webscout/Provider/TTI/__init__.py +2 -3
- webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
- webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
- webscout/Provider/TTS/__init__.py +2 -2
- webscout/Provider/TTS/deepgram.py +12 -39
- webscout/Provider/TTS/elevenlabs.py +14 -40
- webscout/Provider/TTS/gesserit.py +11 -35
- webscout/Provider/TTS/murfai.py +13 -39
- webscout/Provider/TTS/parler.py +17 -40
- webscout/Provider/TTS/speechma.py +180 -0
- webscout/Provider/TTS/streamElements.py +17 -44
- webscout/Provider/TextPollinationsAI.py +39 -59
- webscout/Provider/Venice.py +25 -8
- webscout/Provider/WiseCat.py +27 -5
- webscout/Provider/Youchat.py +64 -37
- webscout/Provider/__init__.py +0 -6
- webscout/Provider/akashgpt.py +20 -5
- webscout/Provider/flowith.py +20 -5
- webscout/Provider/freeaichat.py +32 -45
- webscout/Provider/koala.py +20 -5
- webscout/Provider/llamatutor.py +1 -1
- webscout/Provider/llmchat.py +30 -8
- webscout/Provider/multichat.py +65 -9
- webscout/Provider/talkai.py +1 -0
- webscout/Provider/turboseek.py +3 -0
- webscout/Provider/tutorai.py +2 -0
- webscout/Provider/typegpt.py +154 -64
- webscout/Provider/x0gpt.py +3 -1
- webscout/Provider/yep.py +102 -20
- webscout/__init__.py +3 -0
- webscout/cli.py +4 -40
- webscout/conversation.py +1 -10
- webscout/litagent/__init__.py +2 -2
- webscout/litagent/agent.py +351 -20
- webscout/litagent/constants.py +34 -5
- webscout/litprinter/__init__.py +0 -3
- webscout/models.py +181 -0
- webscout/optimizers.py +1 -1
- webscout/prompt_manager.py +2 -8
- webscout/scout/core/scout.py +1 -4
- webscout/scout/core/search_result.py +1 -1
- webscout/scout/core/text_utils.py +1 -1
- webscout/scout/core.py +2 -5
- webscout/scout/element.py +1 -1
- webscout/scout/parsers/html_parser.py +1 -1
- webscout/scout/utils.py +0 -1
- webscout/swiftcli/__init__.py +1 -3
- webscout/tempid.py +1 -1
- webscout/update_checker.py +1 -3
- webscout/version.py +1 -1
- webscout/webscout_search_async.py +1 -2
- webscout/yep_search.py +297 -297
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/LICENSE.md +4 -4
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/METADATA +101 -390
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/RECORD +104 -110
- webscout/Extra/autollama.py +0 -231
- webscout/Provider/Amigo.py +0 -274
- webscout/Provider/Bing.py +0 -243
- webscout/Provider/DiscordRocks.py +0 -253
- webscout/Provider/TTI/blackbox/__init__.py +0 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
- webscout/Provider/TTI/deepinfra/__init__.py +0 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
- webscout/Provider/TTI/imgninza/__init__.py +0 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
- webscout/Provider/TTS/voicepod.py +0 -117
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
|
@@ -5,13 +5,12 @@ import json
|
|
|
5
5
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
6
|
from webscout.AIbase import Provider
|
|
7
7
|
from webscout import exceptions
|
|
8
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
9
8
|
from webscout import LitAgent as Lit
|
|
10
9
|
|
|
11
10
|
|
|
12
11
|
class ChatGPTGratis(Provider):
|
|
13
12
|
"""
|
|
14
|
-
A class to interact with the chatgptgratis.eu backend API with
|
|
13
|
+
A class to interact with the chatgptgratis.eu backend API with real-time streaming.
|
|
15
14
|
"""
|
|
16
15
|
AVAILABLE_MODELS = [
|
|
17
16
|
"Meta-Llama-3.2-1B-Instruct",
|
|
@@ -20,14 +19,12 @@ class ChatGPTGratis(Provider):
|
|
|
20
19
|
"Meta-Llama-3.1-70B-Instruct",
|
|
21
20
|
"Meta-Llama-3.1-405B-Instruct",
|
|
22
21
|
"gpt4o"
|
|
23
|
-
|
|
24
22
|
]
|
|
25
23
|
|
|
26
24
|
def __init__(
|
|
27
25
|
self,
|
|
28
|
-
model: str = "
|
|
26
|
+
model: str = "Meta-Llama-3.2-1B-Instruct",
|
|
29
27
|
timeout: int = 30,
|
|
30
|
-
logging: bool = False,
|
|
31
28
|
proxies: Optional[Dict[str, str]] = None,
|
|
32
29
|
intro: Optional[str] = None,
|
|
33
30
|
filepath: Optional[str] = None,
|
|
@@ -41,14 +38,6 @@ class ChatGPTGratis(Provider):
|
|
|
41
38
|
if model not in self.AVAILABLE_MODELS:
|
|
42
39
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
43
40
|
|
|
44
|
-
self.logger = Logger(
|
|
45
|
-
name="ChatGPTGratis",
|
|
46
|
-
format=LogFormat.MODERN_EMOJI,
|
|
47
|
-
) if logging else None
|
|
48
|
-
|
|
49
|
-
if self.logger:
|
|
50
|
-
self.logger.info(f"Initializing ChatGPTGratis with model: {model}")
|
|
51
|
-
|
|
52
41
|
self.session = requests.Session()
|
|
53
42
|
self.timeout = timeout
|
|
54
43
|
self.api_endpoint = "https://chatgptgratis.eu/backend/chat.php"
|
|
@@ -78,9 +67,6 @@ class ChatGPTGratis(Provider):
|
|
|
78
67
|
)
|
|
79
68
|
self.conversation.history_offset = history_offset
|
|
80
69
|
|
|
81
|
-
if self.logger:
|
|
82
|
-
self.logger.info("ChatGPTGratis initialized successfully.")
|
|
83
|
-
|
|
84
70
|
def ask(
|
|
85
71
|
self,
|
|
86
72
|
prompt: str,
|
|
@@ -93,10 +79,6 @@ class ChatGPTGratis(Provider):
|
|
|
93
79
|
Sends a request to the API and returns the response.
|
|
94
80
|
If stream is True, yields response chunks as they are received.
|
|
95
81
|
"""
|
|
96
|
-
if self.logger:
|
|
97
|
-
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
98
|
-
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
99
|
-
|
|
100
82
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
101
83
|
if optimizer:
|
|
102
84
|
available_opts = (
|
|
@@ -107,22 +89,15 @@ class ChatGPTGratis(Provider):
|
|
|
107
89
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
108
90
|
conversation_prompt if conversationally else prompt
|
|
109
91
|
)
|
|
110
|
-
if self.logger:
|
|
111
|
-
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
112
92
|
else:
|
|
113
|
-
if self.logger:
|
|
114
|
-
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
115
93
|
raise Exception(f"Optimizer is not one of {list(available_opts)}")
|
|
116
94
|
|
|
117
95
|
payload = {
|
|
118
96
|
"message": conversation_prompt,
|
|
119
97
|
"model": self.model,
|
|
120
|
-
|
|
121
98
|
}
|
|
122
99
|
|
|
123
100
|
def for_stream() -> Generator[Dict[str, Any], None, None]:
|
|
124
|
-
if self.logger:
|
|
125
|
-
self.logger.debug("Initiating streaming request to API")
|
|
126
101
|
response = self.session.post(
|
|
127
102
|
self.api_endpoint,
|
|
128
103
|
json=payload,
|
|
@@ -130,23 +105,15 @@ class ChatGPTGratis(Provider):
|
|
|
130
105
|
timeout=self.timeout
|
|
131
106
|
)
|
|
132
107
|
if not response.ok:
|
|
133
|
-
if self.logger:
|
|
134
|
-
self.logger.error(
|
|
135
|
-
f"API request failed. Status: {response.status_code}, Reason: {response.reason}"
|
|
136
|
-
)
|
|
137
108
|
raise exceptions.FailedToGenerateResponseError(
|
|
138
109
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
139
110
|
)
|
|
140
|
-
if self.logger:
|
|
141
|
-
self.logger.info(f"API connection established. Status: {response.status_code}")
|
|
142
111
|
|
|
143
112
|
full_response = ""
|
|
144
113
|
for line in response.iter_lines():
|
|
145
114
|
if line:
|
|
146
115
|
line_decoded = line.decode('utf-8').strip()
|
|
147
116
|
if line_decoded == "data: [DONE]":
|
|
148
|
-
if self.logger:
|
|
149
|
-
self.logger.debug("Stream completed.")
|
|
150
117
|
break
|
|
151
118
|
if line_decoded.startswith("data: "):
|
|
152
119
|
try:
|
|
@@ -158,18 +125,12 @@ class ChatGPTGratis(Provider):
|
|
|
158
125
|
content = ""
|
|
159
126
|
full_response += content
|
|
160
127
|
yield content if raw else {"text": content}
|
|
161
|
-
except json.JSONDecodeError
|
|
162
|
-
if self.logger:
|
|
163
|
-
self.logger.error(f"JSON parsing error: {str(e)}")
|
|
128
|
+
except json.JSONDecodeError:
|
|
164
129
|
continue
|
|
165
130
|
# Update last response and conversation history.
|
|
166
131
|
self.conversation.update_chat_history(prompt, self.get_message({"text": full_response}))
|
|
167
|
-
if self.logger:
|
|
168
|
-
self.logger.debug("Response processing completed.")
|
|
169
132
|
|
|
170
133
|
def for_non_stream() -> Dict[str, Any]:
|
|
171
|
-
if self.logger:
|
|
172
|
-
self.logger.debug("Processing non-streaming request")
|
|
173
134
|
collected = ""
|
|
174
135
|
for chunk in for_stream():
|
|
175
136
|
collected += chunk["text"] if isinstance(chunk, dict) else chunk
|
|
@@ -188,9 +149,6 @@ class ChatGPTGratis(Provider):
|
|
|
188
149
|
Returns the response as a string.
|
|
189
150
|
For streaming requests, yields each response chunk as a string.
|
|
190
151
|
"""
|
|
191
|
-
if self.logger:
|
|
192
|
-
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
193
|
-
|
|
194
152
|
def stream_response() -> Generator[str, None, None]:
|
|
195
153
|
for response in self.ask(
|
|
196
154
|
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
@@ -213,14 +171,24 @@ class ChatGPTGratis(Provider):
|
|
|
213
171
|
|
|
214
172
|
|
|
215
173
|
if __name__ == "__main__":
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
174
|
+
print("-" * 80)
|
|
175
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
176
|
+
print("-" * 80)
|
|
177
|
+
|
|
178
|
+
for model in ChatGPTGratis.AVAILABLE_MODELS:
|
|
179
|
+
try:
|
|
180
|
+
test_ai = ChatGPTGratis(model=model, timeout=60)
|
|
181
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
182
|
+
response_text = response
|
|
183
|
+
|
|
184
|
+
if response_text and len(response_text.strip()) > 0:
|
|
185
|
+
status = "✓"
|
|
186
|
+
# Clean and truncate response
|
|
187
|
+
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
188
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
189
|
+
else:
|
|
190
|
+
status = "✗"
|
|
191
|
+
display_text = "Empty or invalid response"
|
|
192
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
193
|
+
except Exception as e:
|
|
194
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/DeepSeek.py
CHANGED
|
@@ -13,11 +13,11 @@ class DeepSeek(Provider):
|
|
|
13
13
|
A class to interact with the DeepSeek AI API.
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
|
-
AVAILABLE_MODELS =
|
|
17
|
-
"deepseek-v3"
|
|
18
|
-
"deepseek-r1"
|
|
19
|
-
"deepseek-llm-67b-chat"
|
|
20
|
-
|
|
16
|
+
AVAILABLE_MODELS = [
|
|
17
|
+
"deepseek-v3",
|
|
18
|
+
"deepseek-r1",
|
|
19
|
+
"deepseek-llm-67b-chat"
|
|
20
|
+
]
|
|
21
21
|
|
|
22
22
|
def __init__(
|
|
23
23
|
self,
|
|
@@ -175,15 +175,23 @@ class DeepSeek(Provider):
|
|
|
175
175
|
return response["text"]
|
|
176
176
|
|
|
177
177
|
if __name__ == "__main__":
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
178
|
+
print("-" * 80)
|
|
179
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
180
|
+
print("-" * 80)
|
|
181
|
+
|
|
182
|
+
for model in DeepSeek.AVAILABLE_MODELS:
|
|
183
|
+
try:
|
|
184
|
+
test_ai = DeepSeek(model=model, timeout=60)
|
|
185
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
186
|
+
response_text = response
|
|
187
|
+
|
|
188
|
+
if response_text and len(response_text.strip()) > 0:
|
|
189
|
+
status = "✓"
|
|
190
|
+
# Truncate response if too long
|
|
191
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
192
|
+
else:
|
|
193
|
+
status = "✗"
|
|
194
|
+
display_text = "Empty or invalid response"
|
|
195
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
196
|
+
except Exception as e:
|
|
197
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -16,46 +16,46 @@ class DeepInfra(Provider):
|
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
18
|
AVAILABLE_MODELS = [
|
|
19
|
-
"anthropic/claude-3-7-sonnet-latest",
|
|
19
|
+
# "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
|
|
20
20
|
"deepseek-ai/DeepSeek-R1",
|
|
21
21
|
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
22
22
|
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
23
23
|
"deepseek-ai/DeepSeek-R1-Turbo",
|
|
24
24
|
"deepseek-ai/DeepSeek-V3",
|
|
25
|
-
"google/gemma-2-27b-it",
|
|
26
|
-
"google/gemma-2-9b-it",
|
|
25
|
+
# "google/gemma-2-27b-it", # >>>> NOT WORKING
|
|
26
|
+
# "google/gemma-2-9b-it", # >>>> NOT WORKING
|
|
27
27
|
"google/gemma-3-27b-it",
|
|
28
|
-
"google/gemini-1.5-flash",
|
|
29
|
-
"google/gemini-1.5-flash-8b",
|
|
30
|
-
"google/gemini-2.0-flash-001",
|
|
31
|
-
"Gryphe/MythoMax-L2-13b",
|
|
32
|
-
"meta-llama/Llama-3.2-1B-Instruct",
|
|
33
|
-
"meta-llama/Llama-3.2-3B-Instruct",
|
|
28
|
+
# "google/gemini-1.5-flash", # >>>> NOT WORKING
|
|
29
|
+
# "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
|
|
30
|
+
# "google/gemini-2.0-flash-001", # >>>> NOT WORKING
|
|
31
|
+
# "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
|
|
32
|
+
# "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
|
|
33
|
+
# "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
|
|
34
34
|
"meta-llama/Llama-3.2-90B-Vision-Instruct",
|
|
35
35
|
"meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
36
|
-
"meta-llama/Meta-Llama-3-70B-Instruct",
|
|
37
|
-
"meta-llama/Meta-Llama-3-8B-Instruct",
|
|
38
|
-
"meta-llama/Meta-Llama-3.1-70B-Instruct",
|
|
36
|
+
# "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
|
|
37
|
+
# "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
|
|
38
|
+
# "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
|
|
39
39
|
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
40
40
|
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
41
41
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
42
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct",
|
|
42
|
+
# "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
|
|
43
43
|
"microsoft/phi-4",
|
|
44
44
|
"microsoft/Phi-4-multimodal-instruct",
|
|
45
45
|
"microsoft/WizardLM-2-8x22B",
|
|
46
|
-
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
47
|
-
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
48
|
-
"mistralai/Mistral-Nemo-Instruct-2407",
|
|
46
|
+
# "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
|
|
47
|
+
# "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
|
|
48
|
+
# "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
|
|
49
49
|
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
50
50
|
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
51
|
-
"NousResearch/Hermes-3-Llama-3.1-405B",
|
|
52
|
-
"NovaSky-AI/Sky-T1-32B-Preview",
|
|
51
|
+
# "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
|
|
52
|
+
# "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
|
|
53
53
|
"Qwen/QwQ-32B",
|
|
54
|
-
"Qwen/Qwen2.5-7B-Instruct",
|
|
54
|
+
# "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
|
|
55
55
|
"Qwen/Qwen2.5-72B-Instruct",
|
|
56
56
|
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
57
|
-
"Sao10K/L3.1-70B-Euryale-v2.2",
|
|
58
|
-
"Sao10K/L3.3-70B-Euryale-v2.3",
|
|
57
|
+
# "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
|
|
58
|
+
# "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
|
|
59
59
|
"meta-llama/Llama-3.3-70B-Instruct",
|
|
60
60
|
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
61
61
|
]
|
|
@@ -71,32 +71,41 @@ class DeepInfra(Provider):
|
|
|
71
71
|
proxies: dict = {},
|
|
72
72
|
history_offset: int = 10250,
|
|
73
73
|
act: str = None,
|
|
74
|
-
model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
|
74
|
+
model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
75
|
+
browser: str = "chrome"
|
|
75
76
|
):
|
|
76
77
|
"""Initializes the DeepInfra API client."""
|
|
77
78
|
if model not in self.AVAILABLE_MODELS:
|
|
78
79
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
79
80
|
|
|
80
81
|
self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
81
|
-
|
|
82
|
+
|
|
83
|
+
# Initialize LitAgent for user agent generation
|
|
84
|
+
self.agent = LitAgent()
|
|
85
|
+
# Use fingerprinting to create a consistent browser identity
|
|
86
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
87
|
+
|
|
88
|
+
# Use the fingerprint for headers
|
|
82
89
|
self.headers = {
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
90
|
+
"Accept": self.fingerprint["accept"],
|
|
91
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
92
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
93
|
+
"Content-Type": "application/json",
|
|
94
|
+
"Cache-Control": "no-cache",
|
|
95
|
+
"Connection": "keep-alive",
|
|
96
|
+
"Origin": "https://deepinfra.com",
|
|
97
|
+
"Pragma": "no-cache",
|
|
98
|
+
"Referer": "https://deepinfra.com/",
|
|
99
|
+
"Sec-Fetch-Dest": "empty",
|
|
100
|
+
"Sec-Fetch-Mode": "cors",
|
|
101
|
+
"Sec-Fetch-Site": "same-site",
|
|
102
|
+
"X-Deepinfra-Source": "web-embed",
|
|
103
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
104
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
105
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
106
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
99
107
|
}
|
|
108
|
+
|
|
100
109
|
self.session = requests.Session()
|
|
101
110
|
self.session.headers.update(self.headers)
|
|
102
111
|
self.session.proxies.update(proxies)
|
|
@@ -125,6 +134,31 @@ class DeepInfra(Provider):
|
|
|
125
134
|
)
|
|
126
135
|
self.conversation.history_offset = history_offset
|
|
127
136
|
|
|
137
|
+
def refresh_identity(self, browser: str = None):
|
|
138
|
+
"""
|
|
139
|
+
Refreshes the browser identity fingerprint.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
browser: Specific browser to use for the new fingerprint
|
|
143
|
+
"""
|
|
144
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
145
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
146
|
+
|
|
147
|
+
# Update headers with new fingerprint
|
|
148
|
+
self.headers.update({
|
|
149
|
+
"Accept": self.fingerprint["accept"],
|
|
150
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
151
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
|
|
152
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
153
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
154
|
+
})
|
|
155
|
+
|
|
156
|
+
# Update session headers
|
|
157
|
+
for header, value in self.headers.items():
|
|
158
|
+
self.session.headers[header] = value
|
|
159
|
+
|
|
160
|
+
return self.fingerprint
|
|
161
|
+
|
|
128
162
|
def ask(
|
|
129
163
|
self,
|
|
130
164
|
prompt: str,
|
|
@@ -180,15 +214,30 @@ class DeepInfra(Provider):
|
|
|
180
214
|
except json.JSONDecodeError:
|
|
181
215
|
continue
|
|
182
216
|
|
|
217
|
+
self.last_response = {"text": streaming_text}
|
|
183
218
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
184
219
|
|
|
185
220
|
except requests.RequestException as e:
|
|
186
221
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
187
222
|
|
|
188
223
|
def for_non_stream():
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
224
|
+
try:
|
|
225
|
+
response = requests.post(self.url, headers=self.headers, data=json.dumps(payload), timeout=self.timeout)
|
|
226
|
+
if response.status_code != 200:
|
|
227
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
228
|
+
f"Request failed with status code {response.status_code}"
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
response_data = response.json()
|
|
232
|
+
if 'choices' in response_data and len(response_data['choices']) > 0:
|
|
233
|
+
content = response_data['choices'][0].get('message', {}).get('content', '')
|
|
234
|
+
self.last_response = {"text": content}
|
|
235
|
+
self.conversation.update_chat_history(prompt, content)
|
|
236
|
+
return {"text": content}
|
|
237
|
+
else:
|
|
238
|
+
raise exceptions.FailedToGenerateResponseError("No response content found")
|
|
239
|
+
except Exception as e:
|
|
240
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
192
241
|
|
|
193
242
|
return for_stream() if stream else for_non_stream()
|
|
194
243
|
|
|
@@ -198,7 +247,7 @@ class DeepInfra(Provider):
|
|
|
198
247
|
stream: bool = False,
|
|
199
248
|
optimizer: str = None,
|
|
200
249
|
conversationally: bool = False,
|
|
201
|
-
) -> str:
|
|
250
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
202
251
|
def for_stream():
|
|
203
252
|
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
204
253
|
yield self.get_message(response)
|
|
@@ -213,8 +262,26 @@ class DeepInfra(Provider):
|
|
|
213
262
|
return response["text"]
|
|
214
263
|
|
|
215
264
|
if __name__ == "__main__":
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
265
|
+
print("-" * 80)
|
|
266
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
267
|
+
print("-" * 80)
|
|
268
|
+
|
|
269
|
+
for model in DeepInfra.AVAILABLE_MODELS:
|
|
270
|
+
try:
|
|
271
|
+
test_ai = DeepInfra(model=model, timeout=60)
|
|
272
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
273
|
+
response_text = ""
|
|
274
|
+
for chunk in response:
|
|
275
|
+
response_text += chunk
|
|
276
|
+
|
|
277
|
+
if response_text and len(response_text.strip()) > 0:
|
|
278
|
+
status = "✓"
|
|
279
|
+
# Clean and truncate response
|
|
280
|
+
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
281
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
282
|
+
else:
|
|
283
|
+
status = "✗"
|
|
284
|
+
display_text = "Empty or invalid response"
|
|
285
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
286
|
+
except Exception as e:
|
|
287
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Gemini.py
CHANGED
|
@@ -10,7 +10,7 @@ from ..AIbase import Provider, AsyncProvider
|
|
|
10
10
|
from ..Bard import Chatbot, Model
|
|
11
11
|
|
|
12
12
|
# Import Logger and related classes (assumed similar to what is in yep.py)
|
|
13
|
-
from webscout import Logger, LogFormat
|
|
13
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
14
14
|
|
|
15
15
|
warnings.simplefilter("ignore", category=UserWarning)
|
|
16
16
|
|
webscout/Provider/Glider.py
CHANGED
|
@@ -12,12 +12,12 @@ class GliderAI(Provider):
|
|
|
12
12
|
A class to interact with the Glider.so API.
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
|
-
AVAILABLE_MODELS =
|
|
15
|
+
AVAILABLE_MODELS = [
|
|
16
16
|
"chat-llama-3-1-70b",
|
|
17
17
|
"chat-llama-3-1-8b",
|
|
18
18
|
"chat-llama-3-2-3b",
|
|
19
19
|
"deepseek-ai/DeepSeek-R1",
|
|
20
|
-
|
|
20
|
+
]
|
|
21
21
|
|
|
22
22
|
def __init__(
|
|
23
23
|
self,
|
|
@@ -180,9 +180,26 @@ class GliderAI(Provider):
|
|
|
180
180
|
return response["text"]
|
|
181
181
|
|
|
182
182
|
if __name__ == "__main__":
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
for
|
|
188
|
-
|
|
183
|
+
print("-" * 80)
|
|
184
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
185
|
+
print("-" * 80)
|
|
186
|
+
|
|
187
|
+
for model in GliderAI.AVAILABLE_MODELS:
|
|
188
|
+
try:
|
|
189
|
+
test_ai = GliderAI(model=model, timeout=60)
|
|
190
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
191
|
+
response_text = ""
|
|
192
|
+
for chunk in response:
|
|
193
|
+
response_text += chunk
|
|
194
|
+
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
195
|
+
|
|
196
|
+
if response_text and len(response_text.strip()) > 0:
|
|
197
|
+
status = "✓"
|
|
198
|
+
# Truncate response if too long
|
|
199
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
200
|
+
else:
|
|
201
|
+
status = "✗"
|
|
202
|
+
display_text = "Empty or invalid response"
|
|
203
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
204
|
+
except Exception as e:
|
|
205
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
from dataclasses import dataclass
|
|
2
|
-
from enum import Enum
|
|
2
|
+
from enum import Enum
|
|
3
3
|
import requests
|
|
4
4
|
import json
|
|
5
5
|
import re
|
|
6
6
|
import uuid
|
|
7
|
-
from typing import List, Dict, Generator, Optional, Any, TypedDict,
|
|
7
|
+
from typing import List, Dict, Generator, Optional, Any, TypedDict, Final
|
|
8
8
|
|
|
9
9
|
# Type definitions
|
|
10
10
|
class Role(Enum):
|
webscout/Provider/HeckAI.py
CHANGED
|
@@ -208,10 +208,26 @@ class HeckAI(Provider):
|
|
|
208
208
|
return response["text"]
|
|
209
209
|
|
|
210
210
|
if __name__ == "__main__":
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
211
|
+
print("-" * 80)
|
|
212
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
213
|
+
print("-" * 80)
|
|
214
|
+
|
|
215
|
+
for model in HeckAI.AVAILABLE_MODELS:
|
|
216
|
+
try:
|
|
217
|
+
test_ai = HeckAI(model=model, timeout=60)
|
|
218
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
219
|
+
response_text = ""
|
|
220
|
+
for chunk in response:
|
|
221
|
+
response_text += chunk
|
|
222
|
+
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
223
|
+
|
|
224
|
+
if response_text and len(response_text.strip()) > 0:
|
|
225
|
+
status = "✓"
|
|
226
|
+
# Truncate response if too long
|
|
227
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
228
|
+
else:
|
|
229
|
+
status = "✗"
|
|
230
|
+
display_text = "Empty or invalid response"
|
|
231
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
232
|
+
except Exception as e:
|
|
233
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Jadve.py
CHANGED
|
@@ -244,8 +244,23 @@ class JadveOpenAI(Provider):
|
|
|
244
244
|
return response["text"]
|
|
245
245
|
|
|
246
246
|
if __name__ == "__main__":
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
247
|
+
print("-" * 80)
|
|
248
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
249
|
+
print("-" * 80)
|
|
250
|
+
|
|
251
|
+
for model in JadveOpenAI.AVAILABLE_MODELS:
|
|
252
|
+
try:
|
|
253
|
+
test_ai = JadveOpenAI(model=model, timeout=60)
|
|
254
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
255
|
+
response_text = response
|
|
256
|
+
|
|
257
|
+
if response_text and len(response_text.strip()) > 0:
|
|
258
|
+
status = "✓"
|
|
259
|
+
# Truncate response if too long
|
|
260
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
261
|
+
else:
|
|
262
|
+
status = "✗"
|
|
263
|
+
display_text = "Empty or invalid response"
|
|
264
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
265
|
+
except Exception as e:
|
|
266
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|