webscout 7.5__py3-none-any.whl → 7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +5 -53
- webscout/AIutel.py +8 -318
- webscout/DWEBS.py +460 -489
- webscout/Extra/YTToolkit/YTdownloader.py +14 -53
- webscout/Extra/YTToolkit/transcriber.py +12 -13
- webscout/Extra/YTToolkit/ytapi/video.py +0 -1
- webscout/Extra/__init__.py +0 -1
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +193 -199
- webscout/Extra/autocoder/rawdog.py +789 -677
- webscout/Extra/gguf.py +682 -428
- webscout/Extra/weather.py +178 -156
- webscout/Extra/weather_ascii.py +70 -17
- webscout/Litlogger/core/logger.py +1 -2
- webscout/Litlogger/handlers/file.py +1 -1
- webscout/Litlogger/styles/formats.py +0 -2
- webscout/Litlogger/utils/detectors.py +0 -1
- webscout/Provider/AISEARCH/DeepFind.py +0 -1
- webscout/Provider/AISEARCH/ISou.py +1 -22
- webscout/Provider/AISEARCH/felo_search.py +0 -1
- webscout/Provider/AllenAI.py +28 -30
- webscout/Provider/C4ai.py +29 -11
- webscout/Provider/ChatGPTClone.py +226 -0
- webscout/Provider/ChatGPTGratis.py +24 -56
- webscout/Provider/DeepSeek.py +25 -17
- webscout/Provider/Deepinfra.py +115 -48
- webscout/Provider/Gemini.py +1 -1
- webscout/Provider/Glider.py +33 -12
- webscout/Provider/HF_space/qwen_qwen2.py +2 -2
- webscout/Provider/HeckAI.py +23 -7
- webscout/Provider/Hunyuan.py +272 -0
- webscout/Provider/Jadve.py +20 -5
- webscout/Provider/LambdaChat.py +391 -0
- webscout/Provider/Netwrck.py +42 -19
- webscout/Provider/OLLAMA.py +256 -32
- webscout/Provider/PI.py +4 -2
- webscout/Provider/Perplexitylabs.py +26 -6
- webscout/Provider/PizzaGPT.py +10 -51
- webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
- webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +179 -206
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -192
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
- webscout/Provider/TTI/__init__.py +2 -3
- webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
- webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
- webscout/Provider/TTI/artbit/async_artbit.py +3 -32
- webscout/Provider/TTI/artbit/sync_artbit.py +3 -31
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/piclumen/__init__.py +22 -22
- webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
- webscout/Provider/TTS/__init__.py +2 -2
- webscout/Provider/TTS/deepgram.py +12 -39
- webscout/Provider/TTS/elevenlabs.py +14 -40
- webscout/Provider/TTS/gesserit.py +11 -35
- webscout/Provider/TTS/murfai.py +13 -39
- webscout/Provider/TTS/parler.py +17 -40
- webscout/Provider/TTS/speechma.py +180 -0
- webscout/Provider/TTS/streamElements.py +17 -44
- webscout/Provider/TextPollinationsAI.py +39 -59
- webscout/Provider/Venice.py +25 -8
- webscout/Provider/WebSim.py +227 -0
- webscout/Provider/WiseCat.py +27 -5
- webscout/Provider/Youchat.py +64 -37
- webscout/Provider/__init__.py +12 -7
- webscout/Provider/akashgpt.py +20 -5
- webscout/Provider/flowith.py +33 -7
- webscout/Provider/freeaichat.py +32 -45
- webscout/Provider/koala.py +20 -5
- webscout/Provider/labyrinth.py +239 -0
- webscout/Provider/learnfastai.py +28 -15
- webscout/Provider/llamatutor.py +1 -1
- webscout/Provider/llmchat.py +30 -8
- webscout/Provider/multichat.py +65 -9
- webscout/Provider/sonus.py +208 -0
- webscout/Provider/talkai.py +1 -0
- webscout/Provider/turboseek.py +3 -0
- webscout/Provider/tutorai.py +2 -0
- webscout/Provider/typegpt.py +155 -65
- webscout/Provider/uncovr.py +297 -0
- webscout/Provider/x0gpt.py +3 -1
- webscout/Provider/yep.py +102 -20
- webscout/__init__.py +3 -0
- webscout/cli.py +53 -40
- webscout/conversation.py +1 -10
- webscout/litagent/__init__.py +2 -2
- webscout/litagent/agent.py +356 -20
- webscout/litagent/constants.py +34 -5
- webscout/litprinter/__init__.py +0 -3
- webscout/models.py +181 -0
- webscout/optimizers.py +1 -1
- webscout/prompt_manager.py +2 -8
- webscout/scout/core/scout.py +1 -4
- webscout/scout/core/search_result.py +1 -1
- webscout/scout/core/text_utils.py +1 -1
- webscout/scout/core.py +2 -5
- webscout/scout/element.py +1 -1
- webscout/scout/parsers/html_parser.py +1 -1
- webscout/scout/utils.py +0 -1
- webscout/swiftcli/__init__.py +1 -3
- webscout/tempid.py +1 -1
- webscout/update_checker.py +1 -3
- webscout/version.py +1 -1
- webscout/webscout_search_async.py +1 -2
- webscout/yep_search.py +297 -297
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/LICENSE.md +4 -4
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/METADATA +127 -405
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/RECORD +118 -117
- webscout/Extra/autollama.py +0 -231
- webscout/Provider/Amigo.py +0 -274
- webscout/Provider/Bing.py +0 -243
- webscout/Provider/DiscordRocks.py +0 -253
- webscout/Provider/TTI/blackbox/__init__.py +0 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
- webscout/Provider/TTI/deepinfra/__init__.py +0 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
- webscout/Provider/TTI/imgninza/__init__.py +0 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
- webscout/Provider/TTS/voicepod.py +0 -117
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/WHEEL +0 -0
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/entry_points.txt +0 -0
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/top_level.txt +0 -0
webscout/Provider/C4ai.py
CHANGED
|
@@ -16,7 +16,15 @@ class C4ai(Provider):
|
|
|
16
16
|
A class to interact with the Hugging Face Chat API.
|
|
17
17
|
"""
|
|
18
18
|
# Default available models
|
|
19
|
-
AVAILABLE_MODELS = [
|
|
19
|
+
AVAILABLE_MODELS = [
|
|
20
|
+
'command-a-03-2025',
|
|
21
|
+
'command-r-plus-08-2024',
|
|
22
|
+
'command-r-08-2024',
|
|
23
|
+
'command-r-plus',
|
|
24
|
+
'command-r',
|
|
25
|
+
'command-r7b-12-2024',
|
|
26
|
+
'command-r7b-arabic-02-2025'
|
|
27
|
+
] # Placeholder for available models, It will be updated in the constructor
|
|
20
28
|
|
|
21
29
|
def __repr__(self) -> str:
|
|
22
30
|
return f"C4ai({self.model})"
|
|
@@ -402,13 +410,23 @@ class C4ai(Provider):
|
|
|
402
410
|
return response.get("text", "")
|
|
403
411
|
|
|
404
412
|
if __name__ == "__main__":
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
413
|
+
print("-" * 80)
|
|
414
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
415
|
+
print("-" * 80)
|
|
416
|
+
|
|
417
|
+
for model in C4ai.AVAILABLE_MODELS:
|
|
418
|
+
try:
|
|
419
|
+
test_ai = C4ai(model=model, timeout=60)
|
|
420
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
421
|
+
response_text = response
|
|
422
|
+
|
|
423
|
+
if response_text and len(response_text.strip()) > 0:
|
|
424
|
+
status = "✓"
|
|
425
|
+
# Truncate response if too long
|
|
426
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
427
|
+
else:
|
|
428
|
+
status = "✗"
|
|
429
|
+
display_text = "Empty or invalid response"
|
|
430
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
431
|
+
except Exception as e:
|
|
432
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import cloudscraper
|
|
4
|
+
import json
|
|
5
|
+
import re
|
|
6
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
7
|
+
from dataclasses import dataclass, asdict
|
|
8
|
+
from datetime import date
|
|
9
|
+
|
|
10
|
+
from webscout.AIutel import Optimizers
|
|
11
|
+
from webscout.AIutel import Conversation
|
|
12
|
+
from webscout.AIutel import AwesomePrompts
|
|
13
|
+
from webscout.AIbase import Provider
|
|
14
|
+
from webscout import WEBS, exceptions
|
|
15
|
+
from webscout.litagent import LitAgent
|
|
16
|
+
|
|
17
|
+
class ChatGPTClone(Provider):
|
|
18
|
+
"""
|
|
19
|
+
ChatGPTClone is a provider class for interacting with the ChatGPT Clone API.
|
|
20
|
+
Supports streaming responses.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
url = "https://chatgpt-clone-ten-nu.vercel.app"
|
|
24
|
+
AVAILABLE_MODELS = ["gpt-4", "gpt-3.5-turbo"]
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
is_conversation: bool = True,
|
|
29
|
+
max_tokens: int = 2000,
|
|
30
|
+
timeout: int = 60,
|
|
31
|
+
intro: str = None,
|
|
32
|
+
filepath: str = None,
|
|
33
|
+
update_file: bool = True,
|
|
34
|
+
proxies: dict = {},
|
|
35
|
+
history_offset: int = 10250,
|
|
36
|
+
act: str = None,
|
|
37
|
+
model: str = "gpt-4",
|
|
38
|
+
temperature: float = 0.6,
|
|
39
|
+
top_p: float = 0.7,
|
|
40
|
+
browser: str = "chrome",
|
|
41
|
+
system_prompt: str = "You are a helpful assistant."
|
|
42
|
+
):
|
|
43
|
+
"""Initialize the ChatGPT Clone client."""
|
|
44
|
+
if model not in self.AVAILABLE_MODELS:
|
|
45
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
46
|
+
|
|
47
|
+
self.model = model
|
|
48
|
+
self.session = cloudscraper.create_scraper()
|
|
49
|
+
self.is_conversation = is_conversation
|
|
50
|
+
self.max_tokens_to_sample = max_tokens
|
|
51
|
+
self.timeout = timeout
|
|
52
|
+
self.last_response = {}
|
|
53
|
+
self.temperature = temperature
|
|
54
|
+
self.top_p = top_p
|
|
55
|
+
self.system_prompt = system_prompt
|
|
56
|
+
|
|
57
|
+
# Initialize LitAgent for user agent generation
|
|
58
|
+
self.agent = LitAgent()
|
|
59
|
+
# Use fingerprinting to create a consistent browser identity
|
|
60
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
61
|
+
|
|
62
|
+
# Use the fingerprint for headers
|
|
63
|
+
self.headers = {
|
|
64
|
+
"Accept": self.fingerprint["accept"],
|
|
65
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
66
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
67
|
+
"Content-Type": "application/json",
|
|
68
|
+
"DNT": "1",
|
|
69
|
+
"Origin": self.url,
|
|
70
|
+
"Referer": f"{self.url}/",
|
|
71
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
72
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
73
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
74
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
# Create session cookies with unique identifiers
|
|
78
|
+
self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
|
|
79
|
+
|
|
80
|
+
self.__available_optimizers = (
|
|
81
|
+
method
|
|
82
|
+
for method in dir(Optimizers)
|
|
83
|
+
if callable(getattr(Optimizers, method))
|
|
84
|
+
and not method.startswith("__")
|
|
85
|
+
)
|
|
86
|
+
Conversation.intro = (
|
|
87
|
+
AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
|
|
88
|
+
if act
|
|
89
|
+
else intro or Conversation.intro
|
|
90
|
+
)
|
|
91
|
+
self.conversation = Conversation(
|
|
92
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
93
|
+
)
|
|
94
|
+
self.conversation.history_offset = history_offset
|
|
95
|
+
self.session.proxies = proxies
|
|
96
|
+
|
|
97
|
+
# Set consistent headers for the scraper session
|
|
98
|
+
for header, value in self.headers.items():
|
|
99
|
+
self.session.headers[header] = value
|
|
100
|
+
|
|
101
|
+
def refresh_identity(self, browser: str = None):
|
|
102
|
+
"""Refreshes the browser identity fingerprint."""
|
|
103
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
104
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
105
|
+
|
|
106
|
+
# Update headers with new fingerprint
|
|
107
|
+
self.headers.update({
|
|
108
|
+
"Accept": self.fingerprint["accept"],
|
|
109
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
110
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
|
|
111
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
112
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
113
|
+
})
|
|
114
|
+
|
|
115
|
+
# Update session headers
|
|
116
|
+
for header, value in self.headers.items():
|
|
117
|
+
self.session.headers[header] = value
|
|
118
|
+
|
|
119
|
+
# Generate new cookies
|
|
120
|
+
self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
|
|
121
|
+
|
|
122
|
+
return self.fingerprint
|
|
123
|
+
|
|
124
|
+
def ask(
|
|
125
|
+
self,
|
|
126
|
+
prompt: str,
|
|
127
|
+
stream: bool = False,
|
|
128
|
+
raw: bool = False,
|
|
129
|
+
optimizer: str = None,
|
|
130
|
+
conversationally: bool = False,
|
|
131
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
132
|
+
"""Send a message to the ChatGPT Clone API"""
|
|
133
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
134
|
+
if optimizer:
|
|
135
|
+
if optimizer in self.__available_optimizers:
|
|
136
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
137
|
+
conversation_prompt if conversationally else prompt
|
|
138
|
+
)
|
|
139
|
+
else:
|
|
140
|
+
raise Exception(
|
|
141
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
payload = {
|
|
145
|
+
"messages": [
|
|
146
|
+
{"role": "system", "content": self.system_prompt},
|
|
147
|
+
{"role": "user", "content": conversation_prompt}
|
|
148
|
+
],
|
|
149
|
+
"model": self.model
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
def for_stream():
|
|
153
|
+
try:
|
|
154
|
+
with self.session.post(f"{self.url}/api/chat", headers=self.headers, cookies=self.cookies, json=payload, stream=True, timeout=self.timeout) as response:
|
|
155
|
+
if not response.ok:
|
|
156
|
+
# If we get a non-200 response, try refreshing our identity once
|
|
157
|
+
if response.status_code in [403, 429]:
|
|
158
|
+
self.refresh_identity()
|
|
159
|
+
# Retry with new identity
|
|
160
|
+
with self.session.post(f"{self.url}/api/chat", headers=self.headers, cookies=self.cookies, json=payload, stream=True, timeout=self.timeout) as retry_response:
|
|
161
|
+
if not retry_response.ok:
|
|
162
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
163
|
+
f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
|
|
164
|
+
)
|
|
165
|
+
response = retry_response
|
|
166
|
+
else:
|
|
167
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
168
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
streaming_text = ""
|
|
172
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
173
|
+
if line:
|
|
174
|
+
match = re.search(r'0:"(.*?)"', line)
|
|
175
|
+
if match:
|
|
176
|
+
content = match.group(1)
|
|
177
|
+
streaming_text += content
|
|
178
|
+
yield content if raw else dict(text=content)
|
|
179
|
+
|
|
180
|
+
self.last_response.update(dict(text=streaming_text))
|
|
181
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
182
|
+
except Exception as e:
|
|
183
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
184
|
+
|
|
185
|
+
def for_non_stream():
|
|
186
|
+
for _ in for_stream():
|
|
187
|
+
pass
|
|
188
|
+
return self.last_response
|
|
189
|
+
|
|
190
|
+
return for_stream() if stream else for_non_stream()
|
|
191
|
+
|
|
192
|
+
def chat(
|
|
193
|
+
self,
|
|
194
|
+
prompt: str,
|
|
195
|
+
stream: bool = False,
|
|
196
|
+
optimizer: str = None,
|
|
197
|
+
conversationally: bool = False,
|
|
198
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
199
|
+
"""Generate a response to a prompt"""
|
|
200
|
+
def for_stream():
|
|
201
|
+
for response in self.ask(
|
|
202
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
203
|
+
):
|
|
204
|
+
yield self.get_message(response)
|
|
205
|
+
|
|
206
|
+
def for_non_stream():
|
|
207
|
+
return self.get_message(
|
|
208
|
+
self.ask(
|
|
209
|
+
prompt, False, optimizer=optimizer, conversationally=conversationally
|
|
210
|
+
)
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
return for_stream() if stream else for_non_stream()
|
|
214
|
+
|
|
215
|
+
def get_message(self, response: dict) -> str:
|
|
216
|
+
"""Extract message text from response"""
|
|
217
|
+
assert isinstance(response, dict)
|
|
218
|
+
formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
219
|
+
return formatted_text
|
|
220
|
+
|
|
221
|
+
if __name__ == "__main__":
|
|
222
|
+
from rich import print
|
|
223
|
+
ai = ChatGPTClone(timeout=5000)
|
|
224
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
225
|
+
for chunk in response:
|
|
226
|
+
print(chunk, end="", flush=True)
|
|
@@ -5,13 +5,12 @@ import json
|
|
|
5
5
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
6
|
from webscout.AIbase import Provider
|
|
7
7
|
from webscout import exceptions
|
|
8
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
9
8
|
from webscout import LitAgent as Lit
|
|
10
9
|
|
|
11
10
|
|
|
12
11
|
class ChatGPTGratis(Provider):
|
|
13
12
|
"""
|
|
14
|
-
A class to interact with the chatgptgratis.eu backend API with
|
|
13
|
+
A class to interact with the chatgptgratis.eu backend API with real-time streaming.
|
|
15
14
|
"""
|
|
16
15
|
AVAILABLE_MODELS = [
|
|
17
16
|
"Meta-Llama-3.2-1B-Instruct",
|
|
@@ -20,14 +19,12 @@ class ChatGPTGratis(Provider):
|
|
|
20
19
|
"Meta-Llama-3.1-70B-Instruct",
|
|
21
20
|
"Meta-Llama-3.1-405B-Instruct",
|
|
22
21
|
"gpt4o"
|
|
23
|
-
|
|
24
22
|
]
|
|
25
23
|
|
|
26
24
|
def __init__(
|
|
27
25
|
self,
|
|
28
|
-
model: str = "
|
|
26
|
+
model: str = "Meta-Llama-3.2-1B-Instruct",
|
|
29
27
|
timeout: int = 30,
|
|
30
|
-
logging: bool = False,
|
|
31
28
|
proxies: Optional[Dict[str, str]] = None,
|
|
32
29
|
intro: Optional[str] = None,
|
|
33
30
|
filepath: Optional[str] = None,
|
|
@@ -41,14 +38,6 @@ class ChatGPTGratis(Provider):
|
|
|
41
38
|
if model not in self.AVAILABLE_MODELS:
|
|
42
39
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
43
40
|
|
|
44
|
-
self.logger = Logger(
|
|
45
|
-
name="ChatGPTGratis",
|
|
46
|
-
format=LogFormat.MODERN_EMOJI,
|
|
47
|
-
) if logging else None
|
|
48
|
-
|
|
49
|
-
if self.logger:
|
|
50
|
-
self.logger.info(f"Initializing ChatGPTGratis with model: {model}")
|
|
51
|
-
|
|
52
41
|
self.session = requests.Session()
|
|
53
42
|
self.timeout = timeout
|
|
54
43
|
self.api_endpoint = "https://chatgptgratis.eu/backend/chat.php"
|
|
@@ -78,9 +67,6 @@ class ChatGPTGratis(Provider):
|
|
|
78
67
|
)
|
|
79
68
|
self.conversation.history_offset = history_offset
|
|
80
69
|
|
|
81
|
-
if self.logger:
|
|
82
|
-
self.logger.info("ChatGPTGratis initialized successfully.")
|
|
83
|
-
|
|
84
70
|
def ask(
|
|
85
71
|
self,
|
|
86
72
|
prompt: str,
|
|
@@ -93,10 +79,6 @@ class ChatGPTGratis(Provider):
|
|
|
93
79
|
Sends a request to the API and returns the response.
|
|
94
80
|
If stream is True, yields response chunks as they are received.
|
|
95
81
|
"""
|
|
96
|
-
if self.logger:
|
|
97
|
-
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
98
|
-
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
99
|
-
|
|
100
82
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
101
83
|
if optimizer:
|
|
102
84
|
available_opts = (
|
|
@@ -107,22 +89,15 @@ class ChatGPTGratis(Provider):
|
|
|
107
89
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
108
90
|
conversation_prompt if conversationally else prompt
|
|
109
91
|
)
|
|
110
|
-
if self.logger:
|
|
111
|
-
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
112
92
|
else:
|
|
113
|
-
if self.logger:
|
|
114
|
-
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
115
93
|
raise Exception(f"Optimizer is not one of {list(available_opts)}")
|
|
116
94
|
|
|
117
95
|
payload = {
|
|
118
96
|
"message": conversation_prompt,
|
|
119
97
|
"model": self.model,
|
|
120
|
-
|
|
121
98
|
}
|
|
122
99
|
|
|
123
100
|
def for_stream() -> Generator[Dict[str, Any], None, None]:
|
|
124
|
-
if self.logger:
|
|
125
|
-
self.logger.debug("Initiating streaming request to API")
|
|
126
101
|
response = self.session.post(
|
|
127
102
|
self.api_endpoint,
|
|
128
103
|
json=payload,
|
|
@@ -130,23 +105,15 @@ class ChatGPTGratis(Provider):
|
|
|
130
105
|
timeout=self.timeout
|
|
131
106
|
)
|
|
132
107
|
if not response.ok:
|
|
133
|
-
if self.logger:
|
|
134
|
-
self.logger.error(
|
|
135
|
-
f"API request failed. Status: {response.status_code}, Reason: {response.reason}"
|
|
136
|
-
)
|
|
137
108
|
raise exceptions.FailedToGenerateResponseError(
|
|
138
109
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
139
110
|
)
|
|
140
|
-
if self.logger:
|
|
141
|
-
self.logger.info(f"API connection established. Status: {response.status_code}")
|
|
142
111
|
|
|
143
112
|
full_response = ""
|
|
144
113
|
for line in response.iter_lines():
|
|
145
114
|
if line:
|
|
146
115
|
line_decoded = line.decode('utf-8').strip()
|
|
147
116
|
if line_decoded == "data: [DONE]":
|
|
148
|
-
if self.logger:
|
|
149
|
-
self.logger.debug("Stream completed.")
|
|
150
117
|
break
|
|
151
118
|
if line_decoded.startswith("data: "):
|
|
152
119
|
try:
|
|
@@ -158,18 +125,12 @@ class ChatGPTGratis(Provider):
|
|
|
158
125
|
content = ""
|
|
159
126
|
full_response += content
|
|
160
127
|
yield content if raw else {"text": content}
|
|
161
|
-
except json.JSONDecodeError
|
|
162
|
-
if self.logger:
|
|
163
|
-
self.logger.error(f"JSON parsing error: {str(e)}")
|
|
128
|
+
except json.JSONDecodeError:
|
|
164
129
|
continue
|
|
165
130
|
# Update last response and conversation history.
|
|
166
131
|
self.conversation.update_chat_history(prompt, self.get_message({"text": full_response}))
|
|
167
|
-
if self.logger:
|
|
168
|
-
self.logger.debug("Response processing completed.")
|
|
169
132
|
|
|
170
133
|
def for_non_stream() -> Dict[str, Any]:
|
|
171
|
-
if self.logger:
|
|
172
|
-
self.logger.debug("Processing non-streaming request")
|
|
173
134
|
collected = ""
|
|
174
135
|
for chunk in for_stream():
|
|
175
136
|
collected += chunk["text"] if isinstance(chunk, dict) else chunk
|
|
@@ -188,9 +149,6 @@ class ChatGPTGratis(Provider):
|
|
|
188
149
|
Returns the response as a string.
|
|
189
150
|
For streaming requests, yields each response chunk as a string.
|
|
190
151
|
"""
|
|
191
|
-
if self.logger:
|
|
192
|
-
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
193
|
-
|
|
194
152
|
def stream_response() -> Generator[str, None, None]:
|
|
195
153
|
for response in self.ask(
|
|
196
154
|
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
@@ -213,14 +171,24 @@ class ChatGPTGratis(Provider):
|
|
|
213
171
|
|
|
214
172
|
|
|
215
173
|
if __name__ == "__main__":
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
174
|
+
print("-" * 80)
|
|
175
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
176
|
+
print("-" * 80)
|
|
177
|
+
|
|
178
|
+
for model in ChatGPTGratis.AVAILABLE_MODELS:
|
|
179
|
+
try:
|
|
180
|
+
test_ai = ChatGPTGratis(model=model, timeout=60)
|
|
181
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
182
|
+
response_text = response
|
|
183
|
+
|
|
184
|
+
if response_text and len(response_text.strip()) > 0:
|
|
185
|
+
status = "✓"
|
|
186
|
+
# Clean and truncate response
|
|
187
|
+
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
188
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
189
|
+
else:
|
|
190
|
+
status = "✗"
|
|
191
|
+
display_text = "Empty or invalid response"
|
|
192
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
193
|
+
except Exception as e:
|
|
194
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/DeepSeek.py
CHANGED
|
@@ -13,11 +13,11 @@ class DeepSeek(Provider):
|
|
|
13
13
|
A class to interact with the DeepSeek AI API.
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
|
-
AVAILABLE_MODELS =
|
|
17
|
-
"deepseek-v3"
|
|
18
|
-
"deepseek-r1"
|
|
19
|
-
"deepseek-llm-67b-chat"
|
|
20
|
-
|
|
16
|
+
AVAILABLE_MODELS = [
|
|
17
|
+
"deepseek-v3",
|
|
18
|
+
"deepseek-r1",
|
|
19
|
+
"deepseek-llm-67b-chat"
|
|
20
|
+
]
|
|
21
21
|
|
|
22
22
|
def __init__(
|
|
23
23
|
self,
|
|
@@ -175,15 +175,23 @@ class DeepSeek(Provider):
|
|
|
175
175
|
return response["text"]
|
|
176
176
|
|
|
177
177
|
if __name__ == "__main__":
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
178
|
+
print("-" * 80)
|
|
179
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
180
|
+
print("-" * 80)
|
|
181
|
+
|
|
182
|
+
for model in DeepSeek.AVAILABLE_MODELS:
|
|
183
|
+
try:
|
|
184
|
+
test_ai = DeepSeek(model=model, timeout=60)
|
|
185
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
186
|
+
response_text = response
|
|
187
|
+
|
|
188
|
+
if response_text and len(response_text.strip()) > 0:
|
|
189
|
+
status = "✓"
|
|
190
|
+
# Truncate response if too long
|
|
191
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
192
|
+
else:
|
|
193
|
+
status = "✗"
|
|
194
|
+
display_text = "Empty or invalid response"
|
|
195
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
196
|
+
except Exception as e:
|
|
197
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|