webscout 7.5__py3-none-any.whl → 7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +5 -53
- webscout/AIutel.py +8 -318
- webscout/DWEBS.py +460 -489
- webscout/Extra/YTToolkit/YTdownloader.py +14 -53
- webscout/Extra/YTToolkit/transcriber.py +12 -13
- webscout/Extra/YTToolkit/ytapi/video.py +0 -1
- webscout/Extra/__init__.py +0 -1
- webscout/Extra/autocoder/autocoder_utiles.py +0 -4
- webscout/Extra/autocoder/rawdog.py +13 -41
- webscout/Extra/gguf.py +652 -428
- webscout/Extra/weather.py +178 -156
- webscout/Extra/weather_ascii.py +70 -17
- webscout/Litlogger/core/logger.py +1 -2
- webscout/Litlogger/handlers/file.py +1 -1
- webscout/Litlogger/styles/formats.py +0 -2
- webscout/Litlogger/utils/detectors.py +0 -1
- webscout/Provider/AISEARCH/DeepFind.py +0 -1
- webscout/Provider/AISEARCH/ISou.py +1 -1
- webscout/Provider/AISEARCH/felo_search.py +0 -1
- webscout/Provider/AllenAI.py +24 -9
- webscout/Provider/C4ai.py +29 -11
- webscout/Provider/ChatGPTGratis.py +24 -56
- webscout/Provider/DeepSeek.py +25 -17
- webscout/Provider/Deepinfra.py +115 -48
- webscout/Provider/Gemini.py +1 -1
- webscout/Provider/Glider.py +25 -8
- webscout/Provider/HF_space/qwen_qwen2.py +2 -2
- webscout/Provider/HeckAI.py +23 -7
- webscout/Provider/Jadve.py +20 -5
- webscout/Provider/Netwrck.py +42 -19
- webscout/Provider/PI.py +4 -2
- webscout/Provider/Perplexitylabs.py +26 -6
- webscout/Provider/PizzaGPT.py +10 -51
- webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
- webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
- webscout/Provider/TTI/__init__.py +2 -3
- webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
- webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
- webscout/Provider/TTS/__init__.py +2 -2
- webscout/Provider/TTS/deepgram.py +12 -39
- webscout/Provider/TTS/elevenlabs.py +14 -40
- webscout/Provider/TTS/gesserit.py +11 -35
- webscout/Provider/TTS/murfai.py +13 -39
- webscout/Provider/TTS/parler.py +17 -40
- webscout/Provider/TTS/speechma.py +180 -0
- webscout/Provider/TTS/streamElements.py +17 -44
- webscout/Provider/TextPollinationsAI.py +39 -59
- webscout/Provider/Venice.py +25 -8
- webscout/Provider/WiseCat.py +27 -5
- webscout/Provider/Youchat.py +64 -37
- webscout/Provider/__init__.py +0 -6
- webscout/Provider/akashgpt.py +20 -5
- webscout/Provider/flowith.py +20 -5
- webscout/Provider/freeaichat.py +32 -45
- webscout/Provider/koala.py +20 -5
- webscout/Provider/llamatutor.py +1 -1
- webscout/Provider/llmchat.py +30 -8
- webscout/Provider/multichat.py +65 -9
- webscout/Provider/talkai.py +1 -0
- webscout/Provider/turboseek.py +3 -0
- webscout/Provider/tutorai.py +2 -0
- webscout/Provider/typegpt.py +154 -64
- webscout/Provider/x0gpt.py +3 -1
- webscout/Provider/yep.py +102 -20
- webscout/__init__.py +3 -0
- webscout/cli.py +4 -40
- webscout/conversation.py +1 -10
- webscout/litagent/__init__.py +2 -2
- webscout/litagent/agent.py +351 -20
- webscout/litagent/constants.py +34 -5
- webscout/litprinter/__init__.py +0 -3
- webscout/models.py +181 -0
- webscout/optimizers.py +1 -1
- webscout/prompt_manager.py +2 -8
- webscout/scout/core/scout.py +1 -4
- webscout/scout/core/search_result.py +1 -1
- webscout/scout/core/text_utils.py +1 -1
- webscout/scout/core.py +2 -5
- webscout/scout/element.py +1 -1
- webscout/scout/parsers/html_parser.py +1 -1
- webscout/scout/utils.py +0 -1
- webscout/swiftcli/__init__.py +1 -3
- webscout/tempid.py +1 -1
- webscout/update_checker.py +1 -3
- webscout/version.py +1 -1
- webscout/webscout_search_async.py +1 -2
- webscout/yep_search.py +297 -297
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/LICENSE.md +4 -4
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/METADATA +101 -390
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/RECORD +104 -110
- webscout/Extra/autollama.py +0 -231
- webscout/Provider/Amigo.py +0 -274
- webscout/Provider/Bing.py +0 -243
- webscout/Provider/DiscordRocks.py +0 -253
- webscout/Provider/TTI/blackbox/__init__.py +0 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
- webscout/Provider/TTI/deepinfra/__init__.py +0 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
- webscout/Provider/TTI/imgninza/__init__.py +0 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
- webscout/Provider/TTS/voicepod.py +0 -117
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
|
@@ -4,11 +4,11 @@ from typing import Any, Dict, Generator
|
|
|
4
4
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
5
5
|
from webscout.AIbase import Provider
|
|
6
6
|
from webscout import exceptions
|
|
7
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
8
7
|
from webscout import LitAgent as Lit
|
|
8
|
+
|
|
9
9
|
class TextPollinationsAI(Provider):
|
|
10
10
|
"""
|
|
11
|
-
A class to interact with the Pollinations AI API
|
|
11
|
+
A class to interact with the Pollinations AI API.
|
|
12
12
|
"""
|
|
13
13
|
|
|
14
14
|
AVAILABLE_MODELS = [
|
|
@@ -23,21 +23,21 @@ class TextPollinationsAI(Provider):
|
|
|
23
23
|
"rtist", # Rtist image generator
|
|
24
24
|
"searchgpt", # SearchGPT with realtime search
|
|
25
25
|
"evil", # Evil Mode - Experimental
|
|
26
|
-
"deepseek", # DeepSeek-V3
|
|
26
|
+
# "deepseek", # DeepSeek-V3 >>>> NOT WORKING
|
|
27
27
|
"claude-hybridspace", # Claude Hybridspace
|
|
28
28
|
"deepseek-r1", # DeepSeek-R1 Distill Qwen 32B
|
|
29
|
-
"deepseek-reasoner", # DeepSeek R1 - Full
|
|
30
|
-
"llamalight", # Llama 3.1 8B Instruct
|
|
31
|
-
"llamaguard", # Llamaguard 7B AWQ
|
|
29
|
+
# "deepseek-reasoner", # DeepSeek R1 - Full >>>> NOT WORKING
|
|
30
|
+
# "llamalight", # Llama 3.1 8B Instruct >>>> NOT WORKING
|
|
31
|
+
# "llamaguard", # Llamaguard 7B AWQ >>>> NOT WORKING
|
|
32
32
|
"gemini", # Gemini 2.0 Flash
|
|
33
33
|
"gemini-thinking", # Gemini 2.0 Flash Thinking
|
|
34
34
|
"hormoz", # Hormoz 8b
|
|
35
35
|
"hypnosis-tracy", # Hypnosis Tracy
|
|
36
36
|
"sur", # Sur AI Assistant
|
|
37
37
|
"sur-mistral", # Sur AI Assistant (Mistral)
|
|
38
|
-
"llama-scaleway", # Llama (Scaleway)
|
|
38
|
+
# "llama-scaleway", # Llama (Scaleway) >>>> NOT WORKING
|
|
39
39
|
"phi", # Phi model
|
|
40
|
-
"openai-audio" # OpenAI Audio model
|
|
40
|
+
# "openai-audio" # OpenAI Audio model >>>> NOT WORKING
|
|
41
41
|
]
|
|
42
42
|
|
|
43
43
|
def __init__(
|
|
@@ -53,20 +53,11 @@ class TextPollinationsAI(Provider):
|
|
|
53
53
|
act: str = None,
|
|
54
54
|
model: str = "openai-large",
|
|
55
55
|
system_prompt: str = "You are a helpful AI assistant.",
|
|
56
|
-
logging: bool = False
|
|
57
56
|
):
|
|
58
|
-
"""Initializes the TextPollinationsAI API client
|
|
57
|
+
"""Initializes the TextPollinationsAI API client."""
|
|
59
58
|
if model not in self.AVAILABLE_MODELS:
|
|
60
59
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
61
60
|
|
|
62
|
-
self.logger = Logger(
|
|
63
|
-
name="TextPollinationsAI",
|
|
64
|
-
format=LogFormat.MODERN_EMOJI,
|
|
65
|
-
) if logging else None
|
|
66
|
-
|
|
67
|
-
if self.logger:
|
|
68
|
-
self.logger.info(f"Initializing TextPollinationsAI with model: {model}")
|
|
69
|
-
|
|
70
61
|
self.session = requests.Session()
|
|
71
62
|
self.is_conversation = is_conversation
|
|
72
63
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -105,9 +96,6 @@ class TextPollinationsAI(Provider):
|
|
|
105
96
|
)
|
|
106
97
|
self.conversation.history_offset = history_offset
|
|
107
98
|
|
|
108
|
-
if self.logger:
|
|
109
|
-
self.logger.info("TextPollinationsAI initialized successfully")
|
|
110
|
-
|
|
111
99
|
def ask(
|
|
112
100
|
self,
|
|
113
101
|
prompt: str,
|
|
@@ -116,22 +104,14 @@ class TextPollinationsAI(Provider):
|
|
|
116
104
|
optimizer: str = None,
|
|
117
105
|
conversationally: bool = False,
|
|
118
106
|
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
119
|
-
"""Chat with AI
|
|
120
|
-
if self.logger:
|
|
121
|
-
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
122
|
-
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
123
|
-
|
|
107
|
+
"""Chat with AI"""
|
|
124
108
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
125
109
|
if optimizer:
|
|
126
110
|
if optimizer in self.__available_optimizers:
|
|
127
111
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
128
112
|
conversation_prompt if conversationally else prompt
|
|
129
113
|
)
|
|
130
|
-
if self.logger:
|
|
131
|
-
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
132
114
|
else:
|
|
133
|
-
if self.logger:
|
|
134
|
-
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
135
115
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
136
116
|
|
|
137
117
|
payload = {
|
|
@@ -144,9 +124,6 @@ class TextPollinationsAI(Provider):
|
|
|
144
124
|
}
|
|
145
125
|
|
|
146
126
|
def for_stream():
|
|
147
|
-
if self.logger:
|
|
148
|
-
self.logger.debug("Initiating streaming request to API")
|
|
149
|
-
|
|
150
127
|
response = self.session.post(
|
|
151
128
|
self.api_endpoint,
|
|
152
129
|
headers=self.headers,
|
|
@@ -156,22 +133,15 @@ class TextPollinationsAI(Provider):
|
|
|
156
133
|
)
|
|
157
134
|
|
|
158
135
|
if not response.ok:
|
|
159
|
-
if self.logger:
|
|
160
|
-
self.logger.error(f"API request failed. Status: {response.status_code}, Reason: {response.reason}")
|
|
161
136
|
raise exceptions.FailedToGenerateResponseError(
|
|
162
137
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
163
138
|
)
|
|
164
139
|
|
|
165
|
-
if self.logger:
|
|
166
|
-
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
167
|
-
|
|
168
140
|
full_response = ""
|
|
169
141
|
for line in response.iter_lines():
|
|
170
142
|
if line:
|
|
171
143
|
line = line.decode('utf-8').strip()
|
|
172
144
|
if line == "data: [DONE]":
|
|
173
|
-
if self.logger:
|
|
174
|
-
self.logger.debug("Stream completed")
|
|
175
145
|
break
|
|
176
146
|
if line.startswith('data: '):
|
|
177
147
|
try:
|
|
@@ -184,9 +154,7 @@ class TextPollinationsAI(Provider):
|
|
|
184
154
|
content = ""
|
|
185
155
|
full_response += content
|
|
186
156
|
yield content if raw else dict(text=content)
|
|
187
|
-
except json.JSONDecodeError
|
|
188
|
-
if self.logger:
|
|
189
|
-
self.logger.error(f"JSON parsing error: {str(e)}")
|
|
157
|
+
except json.JSONDecodeError:
|
|
190
158
|
continue
|
|
191
159
|
|
|
192
160
|
self.last_response.update(dict(text=full_response))
|
|
@@ -194,12 +162,7 @@ class TextPollinationsAI(Provider):
|
|
|
194
162
|
prompt, self.get_message(self.last_response)
|
|
195
163
|
)
|
|
196
164
|
|
|
197
|
-
if self.logger:
|
|
198
|
-
self.logger.debug("Response processing completed")
|
|
199
|
-
|
|
200
165
|
def for_non_stream():
|
|
201
|
-
if self.logger:
|
|
202
|
-
self.logger.debug("Processing non-streaming request")
|
|
203
166
|
for _ in for_stream():
|
|
204
167
|
pass
|
|
205
168
|
return self.last_response
|
|
@@ -213,10 +176,7 @@ class TextPollinationsAI(Provider):
|
|
|
213
176
|
optimizer: str = None,
|
|
214
177
|
conversationally: bool = False,
|
|
215
178
|
) -> str | Generator[str, None, None]:
|
|
216
|
-
"""Generate response as a string
|
|
217
|
-
if self.logger:
|
|
218
|
-
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
219
|
-
|
|
179
|
+
"""Generate response as a string"""
|
|
220
180
|
def for_stream():
|
|
221
181
|
for response in self.ask(
|
|
222
182
|
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
@@ -241,10 +201,30 @@ class TextPollinationsAI(Provider):
|
|
|
241
201
|
return response["text"]
|
|
242
202
|
|
|
243
203
|
if __name__ == "__main__":
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
204
|
+
print("-" * 80)
|
|
205
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
206
|
+
print("-" * 80)
|
|
207
|
+
|
|
208
|
+
# Test all available models
|
|
209
|
+
working = 0
|
|
210
|
+
total = len(TextPollinationsAI.AVAILABLE_MODELS)
|
|
211
|
+
|
|
212
|
+
for model in TextPollinationsAI.AVAILABLE_MODELS:
|
|
213
|
+
try:
|
|
214
|
+
test_ai = TextPollinationsAI(model=model, timeout=60)
|
|
215
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
216
|
+
response_text = ""
|
|
217
|
+
for chunk in response:
|
|
218
|
+
response_text += chunk
|
|
219
|
+
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
220
|
+
|
|
221
|
+
if response_text and len(response_text.strip()) > 0:
|
|
222
|
+
status = "✓"
|
|
223
|
+
# Truncate response if too long
|
|
224
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
225
|
+
else:
|
|
226
|
+
status = "✗"
|
|
227
|
+
display_text = "Empty or invalid response"
|
|
228
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
229
|
+
except Exception as e:
|
|
230
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Venice.py
CHANGED
|
@@ -188,13 +188,30 @@ class Venice(Provider):
|
|
|
188
188
|
return response["text"]
|
|
189
189
|
|
|
190
190
|
if __name__ == "__main__":
|
|
191
|
-
|
|
191
|
+
print("-" * 80)
|
|
192
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
193
|
+
print("-" * 80)
|
|
192
194
|
|
|
193
|
-
#
|
|
194
|
-
|
|
195
|
+
# Test all available models
|
|
196
|
+
working = 0
|
|
197
|
+
total = len(Venice.AVAILABLE_MODELS)
|
|
195
198
|
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
199
|
+
for model in Venice.AVAILABLE_MODELS:
|
|
200
|
+
try:
|
|
201
|
+
test_ai = Venice(model=model, timeout=60)
|
|
202
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
203
|
+
response_text = ""
|
|
204
|
+
for chunk in response:
|
|
205
|
+
response_text += chunk
|
|
206
|
+
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
207
|
+
|
|
208
|
+
if response_text and len(response_text.strip()) > 0:
|
|
209
|
+
status = "✓"
|
|
210
|
+
# Truncate response if too long
|
|
211
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
212
|
+
else:
|
|
213
|
+
status = "✗"
|
|
214
|
+
display_text = "Empty or invalid response"
|
|
215
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
216
|
+
except Exception as e:
|
|
217
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/WiseCat.py
CHANGED
|
@@ -169,8 +169,30 @@ class WiseCat(Provider):
|
|
|
169
169
|
return response["text"]
|
|
170
170
|
|
|
171
171
|
if __name__ == "__main__":
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
172
|
+
print("-" * 80)
|
|
173
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
174
|
+
print("-" * 80)
|
|
175
|
+
|
|
176
|
+
# Test all available models
|
|
177
|
+
working = 0
|
|
178
|
+
total = len(WiseCat.AVAILABLE_MODELS)
|
|
179
|
+
|
|
180
|
+
for model in WiseCat.AVAILABLE_MODELS:
|
|
181
|
+
try:
|
|
182
|
+
test_ai = WiseCat(model=model, timeout=60)
|
|
183
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
184
|
+
response_text = ""
|
|
185
|
+
for chunk in response:
|
|
186
|
+
response_text += chunk
|
|
187
|
+
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
188
|
+
|
|
189
|
+
if response_text and len(response_text.strip()) > 0:
|
|
190
|
+
status = "✓"
|
|
191
|
+
# Truncate response if too long
|
|
192
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
193
|
+
else:
|
|
194
|
+
status = "✗"
|
|
195
|
+
display_text = "Empty or invalid response"
|
|
196
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
197
|
+
except Exception as e:
|
|
198
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Youchat.py
CHANGED
|
@@ -2,6 +2,7 @@ from uuid import uuid4
|
|
|
2
2
|
from re import findall
|
|
3
3
|
import json
|
|
4
4
|
|
|
5
|
+
|
|
5
6
|
from webscout.AIutel import Optimizers
|
|
6
7
|
from webscout.AIutel import Conversation
|
|
7
8
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
@@ -11,6 +12,7 @@ from typing import Any, AsyncGenerator, Dict
|
|
|
11
12
|
|
|
12
13
|
import cloudscraper
|
|
13
14
|
|
|
15
|
+
|
|
14
16
|
class YouChat(Provider):
|
|
15
17
|
"""
|
|
16
18
|
This class provides methods for interacting with the You.com chat API in a consistent provider structure.
|
|
@@ -18,31 +20,36 @@ class YouChat(Provider):
|
|
|
18
20
|
|
|
19
21
|
# Updated available models based on provided "aiModels" list
|
|
20
22
|
AVAILABLE_MODELS = [
|
|
21
|
-
"
|
|
22
|
-
"
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
"
|
|
23
|
+
# "gpt_4_5_preview", #isProOnly": true,
|
|
24
|
+
# "openai_o3_mini_high", #isProOnly": true,
|
|
25
|
+
# "openai_o3_mini_medium", #isProOnly": true,
|
|
26
|
+
# "openai_o1", #isProOnly": true,
|
|
27
|
+
# "openai_o1_preview", #isProOnly": true,
|
|
28
|
+
# "openai_o1_mini", #isProOnly": true,
|
|
26
29
|
"gpt_4o_mini",
|
|
27
30
|
"gpt_4o",
|
|
28
31
|
"gpt_4_turbo",
|
|
29
|
-
"gpt_4",
|
|
30
|
-
"
|
|
31
|
-
"
|
|
32
|
-
"
|
|
32
|
+
# "gpt_4", #isProOnly": true,
|
|
33
|
+
# "claude_3_7_sonnet_thinking", #isProOnly": true,
|
|
34
|
+
# "claude_3_7_sonnet", #isProOnly": true,
|
|
35
|
+
# "claude_3_5_sonnet", #isProOnly": true,
|
|
36
|
+
# "claude_3_opus", #isProOnly": true,
|
|
33
37
|
"claude_3_sonnet",
|
|
34
38
|
"claude_3_5_haiku",
|
|
35
|
-
"
|
|
36
|
-
"
|
|
37
|
-
"
|
|
38
|
-
"
|
|
39
|
+
# "qwq_32b", #isProOnly": true,
|
|
40
|
+
"qwen2p5_72b",
|
|
41
|
+
"qwen2p5_coder_32b",
|
|
42
|
+
# "deepseek_r1", #isProOnly": true,
|
|
43
|
+
# "deepseek_v3", #isProOnly": true,
|
|
44
|
+
"grok_2",
|
|
45
|
+
# "llama3_3_70b", #isProOnly": false, "isAllowedForUserChatModes": false,
|
|
46
|
+
# "llama3_2_90b", #isProOnly": false, "isAllowedForUserChatModes": false,
|
|
39
47
|
"llama3_1_405b",
|
|
40
48
|
"mistral_large_2",
|
|
49
|
+
"gemini_2_flash",
|
|
41
50
|
"gemini_1_5_flash",
|
|
42
51
|
"gemini_1_5_pro",
|
|
43
52
|
"databricks_dbrx_instruct",
|
|
44
|
-
"qwen2p5_72b",
|
|
45
|
-
"qwen2p5_coder_32b",
|
|
46
53
|
"command_r_plus",
|
|
47
54
|
"solar_1_mini",
|
|
48
55
|
"dolphin_2_5"
|
|
@@ -59,7 +66,7 @@ class YouChat(Provider):
|
|
|
59
66
|
proxies: dict = {},
|
|
60
67
|
history_offset: int = 10250,
|
|
61
68
|
act: str = None,
|
|
62
|
-
model: str = "
|
|
69
|
+
model: str = "gemini_2_flash",
|
|
63
70
|
):
|
|
64
71
|
"""Instantiates YouChat
|
|
65
72
|
|
|
@@ -157,30 +164,44 @@ class YouChat(Provider):
|
|
|
157
164
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
158
165
|
)
|
|
159
166
|
|
|
160
|
-
|
|
161
|
-
|
|
167
|
+
trace_id = str(uuid4())
|
|
168
|
+
conversation_turn_id = str(uuid4())
|
|
169
|
+
|
|
170
|
+
# Updated query parameters to match the new API format
|
|
171
|
+
params = {
|
|
162
172
|
"page": 1,
|
|
163
173
|
"count": 10,
|
|
164
174
|
"safeSearch": "Moderate",
|
|
165
175
|
"mkt": "en-IN",
|
|
166
|
-
"
|
|
176
|
+
"enable_worklow_generation_ux": "true",
|
|
167
177
|
"domain": "youchat",
|
|
168
|
-
"use_personalization_extraction": "
|
|
169
|
-
"
|
|
170
|
-
"
|
|
171
|
-
"
|
|
172
|
-
"conversationTurnId": str(uuid4()),
|
|
178
|
+
"use_personalization_extraction": "true",
|
|
179
|
+
"queryTraceId": trace_id,
|
|
180
|
+
"chatId": trace_id,
|
|
181
|
+
"conversationTurnId": conversation_turn_id,
|
|
173
182
|
"pastChatLength": 0,
|
|
174
|
-
"
|
|
175
|
-
"
|
|
176
|
-
"
|
|
177
|
-
"traceId":
|
|
183
|
+
"selectedChatMode": "custom",
|
|
184
|
+
"selectedAiModel": self.model,
|
|
185
|
+
"enable_agent_clarification_questions": "true",
|
|
186
|
+
"traceId": f"{trace_id}|{conversation_turn_id}|{uuid4()}",
|
|
187
|
+
"use_nested_youchat_updates": "true"
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
# New payload format is JSON
|
|
191
|
+
payload = {
|
|
192
|
+
"query": conversation_prompt,
|
|
178
193
|
"chat": "[]"
|
|
179
194
|
}
|
|
180
195
|
|
|
181
196
|
def for_stream():
|
|
182
|
-
response = self.session.
|
|
183
|
-
self.chat_endpoint,
|
|
197
|
+
response = self.session.post(
|
|
198
|
+
self.chat_endpoint,
|
|
199
|
+
headers=self.headers,
|
|
200
|
+
cookies=self.cookies,
|
|
201
|
+
params=params,
|
|
202
|
+
data=json.dumps(payload),
|
|
203
|
+
stream=True,
|
|
204
|
+
timeout=self.timeout
|
|
184
205
|
)
|
|
185
206
|
if not response.ok:
|
|
186
207
|
raise exceptions.FailedToGenerateResponseError(
|
|
@@ -188,6 +209,8 @@ class YouChat(Provider):
|
|
|
188
209
|
)
|
|
189
210
|
|
|
190
211
|
streaming_text = ""
|
|
212
|
+
found_marker = False # Flag to track if we've passed the '####' marker
|
|
213
|
+
|
|
191
214
|
for value in response.iter_lines(
|
|
192
215
|
decode_unicode=True,
|
|
193
216
|
chunk_size=self.stream_chunk_size,
|
|
@@ -197,11 +220,19 @@ class YouChat(Provider):
|
|
|
197
220
|
if bool(value) and value.startswith('data: ') and 'youChatToken' in value:
|
|
198
221
|
data = json.loads(value[6:])
|
|
199
222
|
token = data.get('youChatToken', '')
|
|
200
|
-
|
|
223
|
+
|
|
224
|
+
# Check if this is the marker with '####'
|
|
225
|
+
if token == '####':
|
|
226
|
+
found_marker = True
|
|
227
|
+
continue # Skip the marker itself
|
|
228
|
+
|
|
229
|
+
# Only process tokens after the marker has been found
|
|
230
|
+
if found_marker and token:
|
|
201
231
|
streaming_text += token
|
|
202
232
|
yield token if raw else dict(text=token)
|
|
203
233
|
except json.decoder.JSONDecodeError:
|
|
204
234
|
pass
|
|
235
|
+
|
|
205
236
|
self.last_response.update(dict(text=streaming_text))
|
|
206
237
|
self.conversation.update_chat_history(
|
|
207
238
|
prompt, self.get_message(self.last_response)
|
|
@@ -252,10 +283,6 @@ class YouChat(Provider):
|
|
|
252
283
|
def get_message(self, response: dict) -> str:
|
|
253
284
|
"""Retrieves message only from response
|
|
254
285
|
|
|
255
|
-
Args:
|
|
256
|
-
response (dict): Response generated by `self.ask`
|
|
257
|
-
|
|
258
|
-
Returns:
|
|
259
286
|
str: Message extracted
|
|
260
287
|
"""
|
|
261
288
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
@@ -264,6 +291,6 @@ class YouChat(Provider):
|
|
|
264
291
|
if __name__ == '__main__':
|
|
265
292
|
from rich import print
|
|
266
293
|
ai = YouChat(timeout=5000)
|
|
267
|
-
response = ai.chat(
|
|
294
|
+
response = ai.chat("hi", stream=True)
|
|
268
295
|
for chunk in response:
|
|
269
|
-
print(chunk, end="", flush=True)
|
|
296
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/__init__.py
CHANGED
|
@@ -23,7 +23,6 @@ from .Llama3 import *
|
|
|
23
23
|
from .DARKAI import *
|
|
24
24
|
from .koala import *
|
|
25
25
|
from .meta import *
|
|
26
|
-
from .DiscordRocks import *
|
|
27
26
|
from .julius import *
|
|
28
27
|
from .Youchat import *
|
|
29
28
|
from .yep import *
|
|
@@ -39,14 +38,12 @@ from .cerebras import *
|
|
|
39
38
|
from .lepton import *
|
|
40
39
|
from .geminiapi import *
|
|
41
40
|
from .elmo import *
|
|
42
|
-
from .Bing import *
|
|
43
41
|
from .GPTWeb import *
|
|
44
42
|
from .Netwrck import Netwrck
|
|
45
43
|
from .llamatutor import *
|
|
46
44
|
from .promptrefine import *
|
|
47
45
|
from .tutorai import *
|
|
48
46
|
from .ChatGPTES import *
|
|
49
|
-
from .Amigo import *
|
|
50
47
|
from .bagoodex import *
|
|
51
48
|
from .aimathgpt import *
|
|
52
49
|
from .gaurish import *
|
|
@@ -123,7 +120,6 @@ __all__ = [
|
|
|
123
120
|
'KOALA',
|
|
124
121
|
'Meta',
|
|
125
122
|
'AskMyAI',
|
|
126
|
-
'DiscordRocks',
|
|
127
123
|
'PiAI',
|
|
128
124
|
'Julius',
|
|
129
125
|
'YouChat',
|
|
@@ -141,14 +137,12 @@ __all__ = [
|
|
|
141
137
|
'Cleeai',
|
|
142
138
|
'Elmo',
|
|
143
139
|
'Free2GPT',
|
|
144
|
-
'Bing',
|
|
145
140
|
'GPTWeb',
|
|
146
141
|
'Netwrck',
|
|
147
142
|
'LlamaTutor',
|
|
148
143
|
'PromptRefine',
|
|
149
144
|
'TutorAI',
|
|
150
145
|
'ChatGPTES',
|
|
151
|
-
'AmigoChat',
|
|
152
146
|
'Bagoodex',
|
|
153
147
|
'AIMathGPT',
|
|
154
148
|
'GaurishCerebras',
|
webscout/Provider/akashgpt.py
CHANGED
|
@@ -308,8 +308,23 @@ class AkashGPT(Provider):
|
|
|
308
308
|
return response.get("text", "")
|
|
309
309
|
|
|
310
310
|
if __name__ == "__main__":
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
311
|
+
print("-" * 80)
|
|
312
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
313
|
+
print("-" * 80)
|
|
314
|
+
|
|
315
|
+
for model in AkashGPT.AVAILABLE_MODELS:
|
|
316
|
+
try:
|
|
317
|
+
test_ai = AkashGPT(model=model, timeout=60)
|
|
318
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
319
|
+
response_text = response
|
|
320
|
+
|
|
321
|
+
if response_text and len(response_text.strip()) > 0:
|
|
322
|
+
status = "✓"
|
|
323
|
+
# Truncate response if too long
|
|
324
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
325
|
+
else:
|
|
326
|
+
status = "✗"
|
|
327
|
+
display_text = "Empty or invalid response"
|
|
328
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
329
|
+
except Exception as e:
|
|
330
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/flowith.py
CHANGED
|
@@ -174,8 +174,23 @@ class Flowith(Provider):
|
|
|
174
174
|
return response["text"]
|
|
175
175
|
|
|
176
176
|
if __name__ == "__main__":
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
177
|
+
print("-" * 80)
|
|
178
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
179
|
+
print("-" * 80)
|
|
180
|
+
|
|
181
|
+
for model in Flowith.AVAILABLE_MODELS:
|
|
182
|
+
try:
|
|
183
|
+
test_ai = Flowith(model=model, timeout=60)
|
|
184
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
185
|
+
response_text = response
|
|
186
|
+
|
|
187
|
+
if response_text and len(response_text.strip()) > 0:
|
|
188
|
+
status = "✓"
|
|
189
|
+
# Truncate response if too long
|
|
190
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
191
|
+
else:
|
|
192
|
+
status = "✗"
|
|
193
|
+
display_text = "Empty or invalid response"
|
|
194
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
195
|
+
except Exception as e:
|
|
196
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|