webscout 7.4__py3-none-any.whl → 7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +5 -53
- webscout/AIutel.py +8 -318
- webscout/DWEBS.py +460 -489
- webscout/Extra/YTToolkit/YTdownloader.py +14 -53
- webscout/Extra/YTToolkit/transcriber.py +12 -13
- webscout/Extra/YTToolkit/ytapi/video.py +0 -1
- webscout/Extra/__init__.py +0 -1
- webscout/Extra/autocoder/autocoder_utiles.py +0 -4
- webscout/Extra/autocoder/rawdog.py +13 -41
- webscout/Extra/gguf.py +652 -428
- webscout/Extra/weather.py +178 -156
- webscout/Extra/weather_ascii.py +70 -17
- webscout/Litlogger/core/logger.py +1 -2
- webscout/Litlogger/handlers/file.py +1 -1
- webscout/Litlogger/styles/formats.py +0 -2
- webscout/Litlogger/utils/detectors.py +0 -1
- webscout/Provider/AISEARCH/DeepFind.py +0 -1
- webscout/Provider/AISEARCH/ISou.py +1 -1
- webscout/Provider/AISEARCH/felo_search.py +0 -1
- webscout/Provider/AllenAI.py +24 -9
- webscout/Provider/C4ai.py +432 -0
- webscout/Provider/ChatGPTGratis.py +24 -56
- webscout/Provider/Cloudflare.py +18 -21
- webscout/Provider/DeepSeek.py +27 -48
- webscout/Provider/Deepinfra.py +129 -53
- webscout/Provider/Gemini.py +1 -1
- webscout/Provider/GithubChat.py +362 -0
- webscout/Provider/Glider.py +25 -8
- webscout/Provider/HF_space/qwen_qwen2.py +2 -2
- webscout/Provider/HeckAI.py +38 -5
- webscout/Provider/HuggingFaceChat.py +462 -0
- webscout/Provider/Jadve.py +20 -5
- webscout/Provider/Marcus.py +7 -50
- webscout/Provider/Netwrck.py +43 -67
- webscout/Provider/PI.py +4 -2
- webscout/Provider/Perplexitylabs.py +26 -6
- webscout/Provider/Phind.py +29 -3
- webscout/Provider/PizzaGPT.py +10 -51
- webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
- webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
- webscout/Provider/TTI/__init__.py +2 -3
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
- webscout/Provider/TTS/__init__.py +2 -2
- webscout/Provider/TTS/deepgram.py +12 -39
- webscout/Provider/TTS/elevenlabs.py +14 -40
- webscout/Provider/TTS/gesserit.py +11 -35
- webscout/Provider/TTS/murfai.py +13 -39
- webscout/Provider/TTS/parler.py +17 -40
- webscout/Provider/TTS/speechma.py +180 -0
- webscout/Provider/TTS/streamElements.py +17 -44
- webscout/Provider/TextPollinationsAI.py +39 -59
- webscout/Provider/Venice.py +217 -200
- webscout/Provider/WiseCat.py +27 -5
- webscout/Provider/Youchat.py +63 -36
- webscout/Provider/__init__.py +13 -8
- webscout/Provider/akashgpt.py +28 -10
- webscout/Provider/copilot.py +416 -0
- webscout/Provider/flowith.py +196 -0
- webscout/Provider/freeaichat.py +32 -45
- webscout/Provider/granite.py +17 -53
- webscout/Provider/koala.py +20 -5
- webscout/Provider/llamatutor.py +7 -47
- webscout/Provider/llmchat.py +36 -53
- webscout/Provider/multichat.py +92 -98
- webscout/Provider/talkai.py +1 -0
- webscout/Provider/turboseek.py +3 -0
- webscout/Provider/tutorai.py +2 -0
- webscout/Provider/typegpt.py +154 -64
- webscout/Provider/x0gpt.py +3 -1
- webscout/Provider/yep.py +102 -20
- webscout/__init__.py +3 -0
- webscout/cli.py +4 -40
- webscout/conversation.py +1 -10
- webscout/exceptions.py +19 -9
- webscout/litagent/__init__.py +2 -2
- webscout/litagent/agent.py +351 -20
- webscout/litagent/constants.py +34 -5
- webscout/litprinter/__init__.py +0 -3
- webscout/models.py +181 -0
- webscout/optimizers.py +1 -1
- webscout/prompt_manager.py +2 -8
- webscout/scout/core/scout.py +1 -4
- webscout/scout/core/search_result.py +1 -1
- webscout/scout/core/text_utils.py +1 -1
- webscout/scout/core.py +2 -5
- webscout/scout/element.py +1 -1
- webscout/scout/parsers/html_parser.py +1 -1
- webscout/scout/utils.py +0 -1
- webscout/swiftcli/__init__.py +1 -3
- webscout/tempid.py +1 -1
- webscout/update_checker.py +55 -95
- webscout/version.py +1 -1
- webscout/webscout_search_async.py +1 -2
- webscout/yep_search.py +297 -297
- webscout-7.6.dist-info/LICENSE.md +146 -0
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/METADATA +104 -514
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/RECORD +113 -120
- webscout/Extra/autollama.py +0 -231
- webscout/Local/__init__.py +0 -10
- webscout/Local/_version.py +0 -3
- webscout/Local/formats.py +0 -747
- webscout/Local/model.py +0 -1368
- webscout/Local/samplers.py +0 -125
- webscout/Local/thread.py +0 -539
- webscout/Local/ui.py +0 -401
- webscout/Local/utils.py +0 -388
- webscout/Provider/Amigo.py +0 -274
- webscout/Provider/Bing.py +0 -243
- webscout/Provider/DiscordRocks.py +0 -253
- webscout/Provider/TTI/blackbox/__init__.py +0 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
- webscout/Provider/TTI/deepinfra/__init__.py +0 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
- webscout/Provider/TTI/imgninza/__init__.py +0 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
- webscout/Provider/TTS/voicepod.py +0 -117
- webscout/Provider/dgaf.py +0 -214
- webscout-7.4.dist-info/LICENSE.md +0 -211
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,462 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import uuid
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import random
|
|
6
|
+
import re
|
|
7
|
+
from typing import Any, Dict, List, Optional, Union, Generator
|
|
8
|
+
|
|
9
|
+
from webscout.AIutel import Conversation
|
|
10
|
+
from webscout.AIbase import Provider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
from webscout import LitAgent
|
|
13
|
+
|
|
14
|
+
class HuggingFaceChat(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the Hugging Face Chat API.
|
|
17
|
+
Uses cookies for authentication and supports streaming responses.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
# Available models (default models - will be updated dynamically)
|
|
21
|
+
AVAILABLE_MODELS = [
|
|
22
|
+
'meta-llama/Llama-3.3-70B-Instruct',
|
|
23
|
+
'Qwen/Qwen2.5-72B-Instruct',
|
|
24
|
+
'CohereForAI/c4ai-command-r-plus-08-2024',
|
|
25
|
+
'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
|
|
26
|
+
'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
|
|
27
|
+
'Qwen/QwQ-32B',
|
|
28
|
+
'Qwen/Qwen2.5-Coder-32B-Instruct',
|
|
29
|
+
'meta-llama/Llama-3.2-11B-Vision-Instruct',
|
|
30
|
+
'NousResearch/Hermes-3-Llama-3.1-8B',
|
|
31
|
+
'mistralai/Mistral-Nemo-Instruct-2407',
|
|
32
|
+
'microsoft/Phi-3.5-mini-instruct',
|
|
33
|
+
'meta-llama/Llama-3.1-8B-Instruct'
|
|
34
|
+
|
|
35
|
+
]
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
is_conversation: bool = True,
|
|
40
|
+
max_tokens: int = 2000,
|
|
41
|
+
timeout: int = 60,
|
|
42
|
+
filepath: str = None,
|
|
43
|
+
update_file: bool = True,
|
|
44
|
+
proxies: dict = {},
|
|
45
|
+
model: str = "Qwen/QwQ-32B",
|
|
46
|
+
cookie_path: str = "cookies.json",
|
|
47
|
+
assistantId: str = None,
|
|
48
|
+
system_prompt: str = "You are a helpful assistant. Please answer the following question.",
|
|
49
|
+
):
|
|
50
|
+
"""Initialize the HuggingFaceChat client."""
|
|
51
|
+
self.url = "https://huggingface.co/chat"
|
|
52
|
+
self.cookie_path = cookie_path
|
|
53
|
+
self.session = requests.Session()
|
|
54
|
+
self.session.proxies.update(proxies)
|
|
55
|
+
self.assistantId = assistantId
|
|
56
|
+
self.system_prompt = system_prompt
|
|
57
|
+
# Load cookies for authentication
|
|
58
|
+
self.cookies = self.load_cookies()
|
|
59
|
+
|
|
60
|
+
# Set up headers for all requests
|
|
61
|
+
self.headers = {
|
|
62
|
+
"Content-Type": "application/json",
|
|
63
|
+
"User-Agent": LitAgent().random(),
|
|
64
|
+
"Accept": "*/*",
|
|
65
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
66
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
67
|
+
"Origin": "https://huggingface.co",
|
|
68
|
+
"Referer": "https://huggingface.co/chat",
|
|
69
|
+
"Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
|
|
70
|
+
"Sec-Ch-Ua-Mobile": "?0",
|
|
71
|
+
"Sec-Ch-Ua-Platform": "\"Windows\"",
|
|
72
|
+
"Sec-Fetch-Dest": "empty",
|
|
73
|
+
"Sec-Fetch-Mode": "cors",
|
|
74
|
+
"Sec-Fetch-Site": "same-origin",
|
|
75
|
+
"DNT": "1",
|
|
76
|
+
"Priority": "u=1, i"
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
# Apply cookies to session
|
|
80
|
+
if self.cookies:
|
|
81
|
+
self.session.cookies.update(self.cookies)
|
|
82
|
+
|
|
83
|
+
# Update available models
|
|
84
|
+
self.update_available_models()
|
|
85
|
+
|
|
86
|
+
# Set default model if none provided
|
|
87
|
+
self.model = model
|
|
88
|
+
|
|
89
|
+
# Provider settings
|
|
90
|
+
self.is_conversation = is_conversation
|
|
91
|
+
self.max_tokens_to_sample = max_tokens
|
|
92
|
+
self.timeout = timeout
|
|
93
|
+
self.last_response = {}
|
|
94
|
+
|
|
95
|
+
# Initialize a simplified conversation history for file saving only
|
|
96
|
+
self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
|
|
97
|
+
|
|
98
|
+
# Store conversation data for different models
|
|
99
|
+
self._conversation_data = {}
|
|
100
|
+
|
|
101
|
+
def update_available_models(self):
|
|
102
|
+
"""Update the available models list from HuggingFace"""
|
|
103
|
+
try:
|
|
104
|
+
models = self.get_models()
|
|
105
|
+
if models and len(models) > 0:
|
|
106
|
+
self.AVAILABLE_MODELS = models
|
|
107
|
+
except Exception:
|
|
108
|
+
# Fallback to default models list if fetching fails
|
|
109
|
+
pass
|
|
110
|
+
|
|
111
|
+
@classmethod
|
|
112
|
+
def get_models(cls):
|
|
113
|
+
"""Fetch available models from HuggingFace."""
|
|
114
|
+
try:
|
|
115
|
+
response = requests.get("https://huggingface.co/chat")
|
|
116
|
+
text = response.text
|
|
117
|
+
models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
|
|
118
|
+
|
|
119
|
+
if not models_match:
|
|
120
|
+
return cls.AVAILABLE_MODELS
|
|
121
|
+
|
|
122
|
+
models_text = models_match.group(1)
|
|
123
|
+
models_text = re.sub(r',parameters:{[^}]+?}', '', models_text)
|
|
124
|
+
models_text = models_text.replace('void 0', 'null')
|
|
125
|
+
|
|
126
|
+
def add_quotation_mark(match):
|
|
127
|
+
return f'{match.group(1)}"{match.group(2)}":'
|
|
128
|
+
|
|
129
|
+
models_text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, models_text)
|
|
130
|
+
|
|
131
|
+
models_data = json.loads(models_text)
|
|
132
|
+
# print([model["id"] for model in models_data])
|
|
133
|
+
return [model["id"] for model in models_data]
|
|
134
|
+
except Exception:
|
|
135
|
+
return cls.AVAILABLE_MODELS
|
|
136
|
+
|
|
137
|
+
def load_cookies(self):
|
|
138
|
+
"""Load cookies from a JSON file"""
|
|
139
|
+
try:
|
|
140
|
+
with open(self.cookie_path, 'r') as f:
|
|
141
|
+
cookies_data = json.load(f)
|
|
142
|
+
|
|
143
|
+
# Convert the cookie list to a dictionary format for requests
|
|
144
|
+
cookies = {}
|
|
145
|
+
for cookie in cookies_data:
|
|
146
|
+
# Only include cookies that are not expired and have a name and value
|
|
147
|
+
if 'name' in cookie and 'value' in cookie:
|
|
148
|
+
# Check if the cookie hasn't expired
|
|
149
|
+
if 'expirationDate' not in cookie or cookie['expirationDate'] > time.time():
|
|
150
|
+
cookies[cookie['name']] = cookie['value']
|
|
151
|
+
|
|
152
|
+
return cookies
|
|
153
|
+
except Exception:
|
|
154
|
+
return {}
|
|
155
|
+
|
|
156
|
+
def create_conversation(self, model: str):
|
|
157
|
+
"""Create a new conversation with the specified model."""
|
|
158
|
+
url = "https://huggingface.co/chat/conversation"
|
|
159
|
+
payload = {"model": model, "assistantId": self.assistantId, "preprompt": self.system_prompt}
|
|
160
|
+
|
|
161
|
+
# Update referer for this specific request
|
|
162
|
+
headers = self.headers.copy()
|
|
163
|
+
headers["Referer"] = f"https://huggingface.co/chat/models/{model}"
|
|
164
|
+
|
|
165
|
+
try:
|
|
166
|
+
response = self.session.post(url, json=payload, headers=headers)
|
|
167
|
+
|
|
168
|
+
if response.status_code == 401:
|
|
169
|
+
raise exceptions.AuthenticationError("Authentication failed. Please check your cookies.")
|
|
170
|
+
|
|
171
|
+
# Handle other error codes
|
|
172
|
+
if response.status_code != 200:
|
|
173
|
+
return None
|
|
174
|
+
|
|
175
|
+
data = response.json()
|
|
176
|
+
conversation_id = data.get("conversationId")
|
|
177
|
+
|
|
178
|
+
# Store conversation data
|
|
179
|
+
if model not in self._conversation_data:
|
|
180
|
+
self._conversation_data[model] = {
|
|
181
|
+
"conversationId": conversation_id,
|
|
182
|
+
"messageId": str(uuid.uuid4()) # Initial message ID
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
# Update cookies if needed
|
|
186
|
+
if 'hf-chat' in response.cookies:
|
|
187
|
+
self.cookies["hf-chat"] = response.cookies['hf-chat']
|
|
188
|
+
|
|
189
|
+
return conversation_id
|
|
190
|
+
except requests.exceptions.RequestException:
|
|
191
|
+
return None
|
|
192
|
+
|
|
193
|
+
def fetch_message_id(self, conversation_id: str) -> str:
|
|
194
|
+
"""Fetch the latest message ID for a conversation."""
|
|
195
|
+
try:
|
|
196
|
+
url = f"https://huggingface.co/chat/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
|
|
197
|
+
response = self.session.get(url, headers=self.headers)
|
|
198
|
+
response.raise_for_status()
|
|
199
|
+
|
|
200
|
+
# Parse the JSON data from the response
|
|
201
|
+
json_data = None
|
|
202
|
+
for line in response.text.split('\n'):
|
|
203
|
+
if line.strip():
|
|
204
|
+
try:
|
|
205
|
+
parsed = json.loads(line)
|
|
206
|
+
if isinstance(parsed, dict) and "nodes" in parsed:
|
|
207
|
+
json_data = parsed
|
|
208
|
+
break
|
|
209
|
+
except json.JSONDecodeError:
|
|
210
|
+
continue
|
|
211
|
+
|
|
212
|
+
if not json_data:
|
|
213
|
+
# Fall back to a UUID if we can't parse the response
|
|
214
|
+
return str(uuid.uuid4())
|
|
215
|
+
|
|
216
|
+
# Extract message ID using the same pattern as in the example
|
|
217
|
+
if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
|
|
218
|
+
return str(uuid.uuid4())
|
|
219
|
+
|
|
220
|
+
data = json_data["nodes"][1]["data"]
|
|
221
|
+
keys = data[data[0]["messages"]]
|
|
222
|
+
message_keys = data[keys[-1]]
|
|
223
|
+
message_id = data[message_keys["id"]]
|
|
224
|
+
|
|
225
|
+
return message_id
|
|
226
|
+
|
|
227
|
+
except Exception:
|
|
228
|
+
# Fall back to a UUID if there's an error
|
|
229
|
+
return str(uuid.uuid4())
|
|
230
|
+
|
|
231
|
+
def generate_boundary(self):
|
|
232
|
+
"""Generate a random boundary for multipart/form-data requests"""
|
|
233
|
+
boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
|
234
|
+
boundary = "----WebKitFormBoundary"
|
|
235
|
+
boundary += "".join(random.choice(boundary_chars) for _ in range(16))
|
|
236
|
+
return boundary
|
|
237
|
+
|
|
238
|
+
def process_response(self, response, prompt: str):
|
|
239
|
+
"""Process streaming response and extract content."""
|
|
240
|
+
full_text = ""
|
|
241
|
+
sources = None
|
|
242
|
+
reasoning_text = ""
|
|
243
|
+
has_reasoning = False
|
|
244
|
+
|
|
245
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
246
|
+
if not line:
|
|
247
|
+
continue
|
|
248
|
+
|
|
249
|
+
try:
|
|
250
|
+
# Parse each line as JSON
|
|
251
|
+
data = json.loads(line)
|
|
252
|
+
|
|
253
|
+
# Handle different response types
|
|
254
|
+
if "type" not in data:
|
|
255
|
+
continue
|
|
256
|
+
|
|
257
|
+
if data["type"] == "stream" and "token" in data:
|
|
258
|
+
token = data["token"].replace("\u0000", "")
|
|
259
|
+
full_text += token
|
|
260
|
+
resp = {"text": token}
|
|
261
|
+
yield resp
|
|
262
|
+
elif data["type"] == "finalAnswer":
|
|
263
|
+
final_text = data.get("text", "")
|
|
264
|
+
if final_text and not full_text:
|
|
265
|
+
full_text = final_text
|
|
266
|
+
resp = {"text": final_text}
|
|
267
|
+
yield resp
|
|
268
|
+
elif data["type"] == "webSearch" and "sources" in data:
|
|
269
|
+
sources = data["sources"]
|
|
270
|
+
elif data["type"] == "reasoning":
|
|
271
|
+
has_reasoning = True
|
|
272
|
+
if data.get("subtype") == "stream" and "token" in data:
|
|
273
|
+
reasoning_text += data["token"]
|
|
274
|
+
# elif data.get("subtype") == "status":
|
|
275
|
+
# # For status updates in reasoning, we can just append them as a comment
|
|
276
|
+
# if data.get("status"):
|
|
277
|
+
# reasoning_text += f"\n# {data['status']}"
|
|
278
|
+
|
|
279
|
+
# If we have reasoning, prepend it to the next text output
|
|
280
|
+
if reasoning_text and not full_text:
|
|
281
|
+
resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
|
|
282
|
+
yield resp
|
|
283
|
+
|
|
284
|
+
except json.JSONDecodeError:
|
|
285
|
+
continue
|
|
286
|
+
|
|
287
|
+
# Update conversation history only for saving to file if needed
|
|
288
|
+
if full_text and self.conversation.file:
|
|
289
|
+
if has_reasoning:
|
|
290
|
+
full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
|
|
291
|
+
self.last_response = {"text": full_text_with_reasoning}
|
|
292
|
+
self.conversation.update_chat_history(prompt, full_text_with_reasoning)
|
|
293
|
+
else:
|
|
294
|
+
self.last_response = {"text": full_text}
|
|
295
|
+
self.conversation.update_chat_history(prompt, full_text)
|
|
296
|
+
|
|
297
|
+
return full_text
|
|
298
|
+
|
|
299
|
+
def ask(
|
|
300
|
+
self,
|
|
301
|
+
prompt: str,
|
|
302
|
+
stream: bool = False,
|
|
303
|
+
raw: bool = False,
|
|
304
|
+
optimizer: str = None,
|
|
305
|
+
conversationally: bool = False,
|
|
306
|
+
web_search: bool = False,
|
|
307
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
308
|
+
"""Send a message to the HuggingFace Chat API"""
|
|
309
|
+
model = self.model
|
|
310
|
+
|
|
311
|
+
# Check if we have a conversation for this model
|
|
312
|
+
if model not in self._conversation_data:
|
|
313
|
+
conversation_id = self.create_conversation(model)
|
|
314
|
+
if not conversation_id:
|
|
315
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
|
|
316
|
+
else:
|
|
317
|
+
conversation_id = self._conversation_data[model]["conversationId"]
|
|
318
|
+
# Refresh message ID
|
|
319
|
+
self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
|
|
320
|
+
|
|
321
|
+
url = f"https://huggingface.co/chat/conversation/{conversation_id}"
|
|
322
|
+
message_id = self._conversation_data[model]["messageId"]
|
|
323
|
+
|
|
324
|
+
# Data to send - use the prompt directly without generating a complete prompt
|
|
325
|
+
# since HuggingFace maintains conversation state internally
|
|
326
|
+
request_data = {
|
|
327
|
+
"inputs": prompt,
|
|
328
|
+
"id": message_id,
|
|
329
|
+
"is_retry": False,
|
|
330
|
+
"is_continue": False,
|
|
331
|
+
"web_search": web_search,
|
|
332
|
+
"tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
# Update headers for this specific request
|
|
336
|
+
headers = self.headers.copy()
|
|
337
|
+
headers["Referer"] = f"https://huggingface.co/chat/conversation/{conversation_id}"
|
|
338
|
+
|
|
339
|
+
# Create multipart form data
|
|
340
|
+
boundary = self.generate_boundary()
|
|
341
|
+
multipart_headers = headers.copy()
|
|
342
|
+
multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
|
|
343
|
+
|
|
344
|
+
# Serialize the data to JSON
|
|
345
|
+
data_json = json.dumps(request_data, separators=(',', ':'))
|
|
346
|
+
|
|
347
|
+
# Create the multipart form data body
|
|
348
|
+
body = f"--{boundary}\r\n"
|
|
349
|
+
body += f'Content-Disposition: form-data; name="data"\r\n'
|
|
350
|
+
body += f"Content-Type: application/json\r\n\r\n"
|
|
351
|
+
body += f"{data_json}\r\n"
|
|
352
|
+
body += f"--{boundary}--\r\n"
|
|
353
|
+
|
|
354
|
+
multipart_headers["Content-Length"] = str(len(body))
|
|
355
|
+
|
|
356
|
+
def for_stream():
|
|
357
|
+
try:
|
|
358
|
+
# Try with multipart/form-data first
|
|
359
|
+
response = None
|
|
360
|
+
try:
|
|
361
|
+
response = self.session.post(
|
|
362
|
+
url,
|
|
363
|
+
data=body,
|
|
364
|
+
headers=multipart_headers,
|
|
365
|
+
stream=True,
|
|
366
|
+
timeout=self.timeout
|
|
367
|
+
)
|
|
368
|
+
except requests.exceptions.RequestException:
|
|
369
|
+
pass
|
|
370
|
+
|
|
371
|
+
# If multipart fails or returns error, try with regular JSON
|
|
372
|
+
if not response or response.status_code != 200:
|
|
373
|
+
response = self.session.post(
|
|
374
|
+
url,
|
|
375
|
+
json=request_data,
|
|
376
|
+
headers=headers,
|
|
377
|
+
stream=True,
|
|
378
|
+
timeout=self.timeout
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
# If both methods fail, raise exception
|
|
382
|
+
if response.status_code != 200:
|
|
383
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
|
|
384
|
+
|
|
385
|
+
# Process the streaming response
|
|
386
|
+
yield from self.process_response(response, prompt)
|
|
387
|
+
|
|
388
|
+
except Exception as e:
|
|
389
|
+
if isinstance(e, requests.exceptions.RequestException):
|
|
390
|
+
if hasattr(e, 'response') and e.response is not None:
|
|
391
|
+
status_code = e.response.status_code
|
|
392
|
+
if status_code == 401:
|
|
393
|
+
raise exceptions.AuthenticationError("Authentication failed. Please check your cookies.")
|
|
394
|
+
|
|
395
|
+
# Try another model if current one fails
|
|
396
|
+
if len(self.AVAILABLE_MODELS) > 1:
|
|
397
|
+
current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
|
|
398
|
+
next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
|
|
399
|
+
self.model = self.AVAILABLE_MODELS[next_model_index]
|
|
400
|
+
|
|
401
|
+
# Create new conversation with the alternate model
|
|
402
|
+
conversation_id = self.create_conversation(self.model)
|
|
403
|
+
if conversation_id:
|
|
404
|
+
# Try again with the new model
|
|
405
|
+
yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
|
|
406
|
+
conversationally=conversationally, web_search=web_search)
|
|
407
|
+
return
|
|
408
|
+
|
|
409
|
+
# If we get here, all models failed
|
|
410
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
411
|
+
|
|
412
|
+
def for_non_stream():
|
|
413
|
+
response_text = ""
|
|
414
|
+
for response in for_stream():
|
|
415
|
+
if "text" in response:
|
|
416
|
+
response_text += response["text"]
|
|
417
|
+
self.last_response = {"text": response_text}
|
|
418
|
+
return self.last_response
|
|
419
|
+
|
|
420
|
+
return for_stream() if stream else for_non_stream()
|
|
421
|
+
|
|
422
|
+
def chat(
|
|
423
|
+
self,
|
|
424
|
+
prompt: str,
|
|
425
|
+
stream: bool = False,
|
|
426
|
+
optimizer: str = None,
|
|
427
|
+
conversationally: bool = False,
|
|
428
|
+
web_search: bool = False
|
|
429
|
+
) -> Union[str, Generator]:
|
|
430
|
+
"""Generate a response to a prompt"""
|
|
431
|
+
def for_stream():
|
|
432
|
+
for response in self.ask(
|
|
433
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
|
|
434
|
+
):
|
|
435
|
+
yield self.get_message(response)
|
|
436
|
+
|
|
437
|
+
def for_non_stream():
|
|
438
|
+
return self.get_message(
|
|
439
|
+
self.ask(
|
|
440
|
+
prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
|
|
441
|
+
)
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
return for_stream() if stream else for_non_stream()
|
|
445
|
+
|
|
446
|
+
def get_message(self, response: dict) -> str:
|
|
447
|
+
"""Extract message text from response"""
|
|
448
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
449
|
+
return response.get("text", "")
|
|
450
|
+
|
|
451
|
+
if __name__ == "__main__":
|
|
452
|
+
# Simple test code
|
|
453
|
+
from rich import print
|
|
454
|
+
|
|
455
|
+
try:
|
|
456
|
+
ai = HuggingFaceChat(cookie_path="cookies.json", system_prompt="You are a helpful assistant. Please answer the following question.")
|
|
457
|
+
response = ai.chat("how many r in strawberry", stream=True, web_search=False)
|
|
458
|
+
for chunk in response:
|
|
459
|
+
print(chunk, end="", flush=True)
|
|
460
|
+
print()
|
|
461
|
+
except Exception as e:
|
|
462
|
+
print(f"An error occurred: {e}")
|
webscout/Provider/Jadve.py
CHANGED
|
@@ -244,8 +244,23 @@ class JadveOpenAI(Provider):
|
|
|
244
244
|
return response["text"]
|
|
245
245
|
|
|
246
246
|
if __name__ == "__main__":
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
247
|
+
print("-" * 80)
|
|
248
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
249
|
+
print("-" * 80)
|
|
250
|
+
|
|
251
|
+
for model in JadveOpenAI.AVAILABLE_MODELS:
|
|
252
|
+
try:
|
|
253
|
+
test_ai = JadveOpenAI(model=model, timeout=60)
|
|
254
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
255
|
+
response_text = response
|
|
256
|
+
|
|
257
|
+
if response_text and len(response_text.strip()) > 0:
|
|
258
|
+
status = "✓"
|
|
259
|
+
# Truncate response if too long
|
|
260
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
261
|
+
else:
|
|
262
|
+
status = "✗"
|
|
263
|
+
display_text = "Empty or invalid response"
|
|
264
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
265
|
+
except Exception as e:
|
|
266
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Marcus.py
CHANGED
|
@@ -7,13 +7,11 @@ from webscout.AIutel import Conversation
|
|
|
7
7
|
from webscout.AIutel import AwesomePrompts
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
11
|
-
from webscout import LitAgent as Lit
|
|
12
10
|
|
|
13
11
|
class Marcus(Provider):
|
|
14
12
|
"""
|
|
15
13
|
This class provides methods for interacting with the AskMarcus API.
|
|
16
|
-
Improved to match webscout provider standards
|
|
14
|
+
Improved to match webscout provider standards.
|
|
17
15
|
"""
|
|
18
16
|
|
|
19
17
|
def __init__(
|
|
@@ -26,18 +24,9 @@ class Marcus(Provider):
|
|
|
26
24
|
update_file: bool = True,
|
|
27
25
|
proxies: dict = {},
|
|
28
26
|
history_offset: int = 10250,
|
|
29
|
-
act: str = None
|
|
30
|
-
logging: bool = False
|
|
27
|
+
act: str = None
|
|
31
28
|
):
|
|
32
|
-
"""Initializes the Marcus API
|
|
33
|
-
self.logger = Logger(
|
|
34
|
-
name="Marcus",
|
|
35
|
-
format=LogFormat.MODERN_EMOJI,
|
|
36
|
-
) if logging else None
|
|
37
|
-
|
|
38
|
-
if self.logger:
|
|
39
|
-
self.logger.info("Initializing Marcus API")
|
|
40
|
-
|
|
29
|
+
"""Initializes the Marcus API."""
|
|
41
30
|
self.session = requests.Session()
|
|
42
31
|
self.is_conversation = is_conversation
|
|
43
32
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -50,7 +39,7 @@ class Marcus(Provider):
|
|
|
50
39
|
'accept': '*/*',
|
|
51
40
|
'origin': 'https://www.askmarcus.app',
|
|
52
41
|
'referer': 'https://www.askmarcus.app/chat',
|
|
53
|
-
'user-agent':
|
|
42
|
+
'user-agent': 'Mozilla/5.0',
|
|
54
43
|
}
|
|
55
44
|
|
|
56
45
|
self.__available_optimizers = (
|
|
@@ -73,9 +62,6 @@ class Marcus(Provider):
|
|
|
73
62
|
self.conversation.history_offset = history_offset
|
|
74
63
|
self.session.proxies = proxies
|
|
75
64
|
|
|
76
|
-
if self.logger:
|
|
77
|
-
self.logger.info("Marcus API initialized successfully")
|
|
78
|
-
|
|
79
65
|
def ask(
|
|
80
66
|
self,
|
|
81
67
|
prompt: str,
|
|
@@ -84,22 +70,14 @@ class Marcus(Provider):
|
|
|
84
70
|
optimizer: str = None,
|
|
85
71
|
conversationally: bool = False,
|
|
86
72
|
) -> Dict[str, Any] | Generator[str, None, None]:
|
|
87
|
-
"""Sends a prompt to the AskMarcus API and returns the response
|
|
88
|
-
if self.logger:
|
|
89
|
-
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
90
|
-
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
91
|
-
|
|
73
|
+
"""Sends a prompt to the AskMarcus API and returns the response."""
|
|
92
74
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
93
75
|
if optimizer:
|
|
94
76
|
if optimizer in self.__available_optimizers:
|
|
95
77
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
96
78
|
conversation_prompt if conversationally else prompt
|
|
97
79
|
)
|
|
98
|
-
if self.logger:
|
|
99
|
-
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
100
80
|
else:
|
|
101
|
-
if self.logger:
|
|
102
|
-
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
103
81
|
raise exceptions.FailedToGenerateResponseError(
|
|
104
82
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
105
83
|
)
|
|
@@ -108,9 +86,6 @@ class Marcus(Provider):
|
|
|
108
86
|
|
|
109
87
|
def for_stream():
|
|
110
88
|
try:
|
|
111
|
-
if self.logger:
|
|
112
|
-
self.logger.debug("Initiating streaming request to API")
|
|
113
|
-
|
|
114
89
|
with requests.post(
|
|
115
90
|
self.api_endpoint,
|
|
116
91
|
headers=self.headers,
|
|
@@ -119,35 +94,21 @@ class Marcus(Provider):
|
|
|
119
94
|
timeout=self.timeout
|
|
120
95
|
) as response:
|
|
121
96
|
response.raise_for_status()
|
|
122
|
-
|
|
123
|
-
if self.logger:
|
|
124
|
-
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
125
|
-
|
|
126
97
|
for line in response.iter_lines():
|
|
127
98
|
if line:
|
|
128
99
|
yield line.decode('utf-8')
|
|
129
|
-
|
|
130
100
|
self.conversation.update_chat_history(
|
|
131
101
|
prompt, self.get_message(self.last_response)
|
|
132
102
|
)
|
|
133
103
|
|
|
134
104
|
except requests.exceptions.RequestException as e:
|
|
135
|
-
if self.logger:
|
|
136
|
-
self.logger.error(f"API request failed: {str(e)}")
|
|
137
105
|
raise exceptions.ProviderConnectionError(f"Error connecting to Marcus: {str(e)}")
|
|
138
106
|
|
|
139
107
|
def for_non_stream():
|
|
140
|
-
if self.logger:
|
|
141
|
-
self.logger.debug("Processing non-streaming request")
|
|
142
|
-
|
|
143
108
|
full_response = ""
|
|
144
109
|
for line in for_stream():
|
|
145
110
|
full_response += line
|
|
146
111
|
self.last_response = {"text": full_response}
|
|
147
|
-
|
|
148
|
-
if self.logger:
|
|
149
|
-
self.logger.debug("Response processing completed")
|
|
150
|
-
|
|
151
112
|
return self.last_response
|
|
152
113
|
|
|
153
114
|
return for_stream() if stream else for_non_stream()
|
|
@@ -159,10 +120,7 @@ class Marcus(Provider):
|
|
|
159
120
|
optimizer: str = None,
|
|
160
121
|
conversationally: bool = False,
|
|
161
122
|
) -> str | Generator[str, None, None]:
|
|
162
|
-
"""Generates a response from the AskMarcus API
|
|
163
|
-
if self.logger:
|
|
164
|
-
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
165
|
-
|
|
123
|
+
"""Generates a response from the AskMarcus API."""
|
|
166
124
|
def for_stream():
|
|
167
125
|
for response_chunk in self.ask(
|
|
168
126
|
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
@@ -184,8 +142,7 @@ class Marcus(Provider):
|
|
|
184
142
|
|
|
185
143
|
if __name__ == "__main__":
|
|
186
144
|
from rich import print
|
|
187
|
-
|
|
188
|
-
ai = Marcus(logging=True)
|
|
145
|
+
ai = Marcus()
|
|
189
146
|
response = ai.chat(input(">>> "), stream=True)
|
|
190
147
|
for chunk in response:
|
|
191
148
|
print(chunk, end="", flush=True)
|