webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -239
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
- webscout/Extra/autocoder/autocoder.py +309 -114
- webscout/Extra/autocoder/autocoder_utiles.py +15 -15
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/weather.md +281 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Provider/AISEARCH/DeepFind.py +41 -37
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +0 -1
- webscout/Provider/AISEARCH/genspark_search.py +228 -86
- webscout/Provider/AISEARCH/hika_search.py +11 -11
- webscout/Provider/AISEARCH/scira_search.py +324 -322
- webscout/Provider/AllenAI.py +7 -14
- webscout/Provider/Blackboxai.py +518 -74
- webscout/Provider/Cloudflare.py +0 -1
- webscout/Provider/Deepinfra.py +23 -21
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/GizAI.py +15 -5
- webscout/Provider/Glider.py +11 -8
- webscout/Provider/HeckAI.py +80 -52
- webscout/Provider/Koboldai.py +7 -4
- webscout/Provider/LambdaChat.py +2 -2
- webscout/Provider/Marcus.py +10 -18
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +8 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -286
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +217 -14
- webscout/Provider/OPENAI/c4ai.py +373 -367
- webscout/Provider/OPENAI/chatgpt.py +7 -0
- webscout/Provider/OPENAI/chatgptclone.py +7 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +30 -20
- webscout/Provider/OPENAI/e2b.py +6 -0
- webscout/Provider/OPENAI/exaai.py +7 -0
- webscout/Provider/OPENAI/exachat.py +6 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -352
- webscout/Provider/OPENAI/glider.py +323 -316
- webscout/Provider/OPENAI/groq.py +361 -354
- webscout/Provider/OPENAI/heckai.py +30 -64
- webscout/Provider/OPENAI/llmchatco.py +8 -0
- webscout/Provider/OPENAI/mcpcore.py +7 -0
- webscout/Provider/OPENAI/multichat.py +8 -0
- webscout/Provider/OPENAI/netwrck.py +356 -350
- webscout/Provider/OPENAI/opkfc.py +8 -0
- webscout/Provider/OPENAI/scirachat.py +471 -462
- webscout/Provider/OPENAI/sonus.py +9 -0
- webscout/Provider/OPENAI/standardinput.py +9 -1
- webscout/Provider/OPENAI/textpollinations.py +339 -329
- webscout/Provider/OPENAI/toolbaz.py +7 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -346
- webscout/Provider/OPENAI/uncovrAI.py +7 -0
- webscout/Provider/OPENAI/utils.py +103 -7
- webscout/Provider/OPENAI/venice.py +12 -0
- webscout/Provider/OPENAI/wisecat.py +19 -19
- webscout/Provider/OPENAI/writecream.py +7 -0
- webscout/Provider/OPENAI/x0gpt.py +7 -0
- webscout/Provider/OPENAI/yep.py +50 -21
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/speechma.py +500 -100
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TeachAnything.py +3 -7
- webscout/Provider/TextPollinationsAI.py +4 -2
- webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Writecream.py +11 -2
- webscout/Provider/__init__.py +8 -14
- webscout/Provider/ai4chat.py +4 -58
- webscout/Provider/asksteve.py +17 -9
- webscout/Provider/cerebras.py +3 -1
- webscout/Provider/koala.py +170 -268
- webscout/Provider/llmchat.py +3 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +7 -4
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +4 -2
- webscout/Provider/typefully.py +23 -151
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/scout/README.md +402 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +134 -54
- webscout/zeroart/base.py +19 -13
- webscout/zeroart/effects.py +101 -99
- webscout/zeroart/fonts.py +1239 -816
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.8.dist-info/entry_points.txt +3 -0
- webscout-8.2.8.dist-info/top_level.txt +1 -0
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/scout/core.py +0 -881
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -1,316 +1,323 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import time
|
|
4
|
-
import uuid
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import base classes and utility structures
|
|
8
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
-
from .utils import (
|
|
10
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
# Attempt to import LitAgent, fallback if not available
|
|
15
|
-
try:
|
|
16
|
-
from webscout.litagent import LitAgent
|
|
17
|
-
except ImportError:
|
|
18
|
-
# Define a dummy LitAgent if webscout is not installed or accessible
|
|
19
|
-
class LitAgent:
|
|
20
|
-
def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
|
|
21
|
-
# Return minimal default headers if LitAgent is unavailable
|
|
22
|
-
print("Warning: LitAgent not found. Using default minimal headers.")
|
|
23
|
-
return {
|
|
24
|
-
"accept": "*/*",
|
|
25
|
-
"accept_language": "en-US,en;q=0.9",
|
|
26
|
-
"platform": "Windows",
|
|
27
|
-
"sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
|
|
28
|
-
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
|
29
|
-
"browser_type": browser,
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
# --- Glider Client ---
|
|
33
|
-
|
|
34
|
-
class Completions(BaseCompletions):
|
|
35
|
-
def __init__(self, client: 'Glider'):
|
|
36
|
-
self._client = client
|
|
37
|
-
|
|
38
|
-
def create(
|
|
39
|
-
self,
|
|
40
|
-
*,
|
|
41
|
-
model: str,
|
|
42
|
-
messages: List[Dict[str, str]],
|
|
43
|
-
max_tokens: Optional[int] = 2049,
|
|
44
|
-
stream: bool = False,
|
|
45
|
-
temperature: Optional[float] = None,
|
|
46
|
-
top_p: Optional[float] = None,
|
|
47
|
-
**kwargs: Any
|
|
48
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
49
|
-
"""
|
|
50
|
-
Creates a model response for the given chat conversation.
|
|
51
|
-
Mimics openai.chat.completions.create
|
|
52
|
-
"""
|
|
53
|
-
# Prepare the payload for Glider API
|
|
54
|
-
payload = {
|
|
55
|
-
"messages": messages,
|
|
56
|
-
"model": self._client.convert_model_name(model),
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
# Add optional parameters if provided
|
|
60
|
-
if max_tokens is not None and max_tokens > 0:
|
|
61
|
-
payload["max_tokens"] = max_tokens
|
|
62
|
-
|
|
63
|
-
if temperature is not None:
|
|
64
|
-
payload["temperature"] = temperature
|
|
65
|
-
|
|
66
|
-
if top_p is not None:
|
|
67
|
-
payload["top_p"] = top_p
|
|
68
|
-
|
|
69
|
-
# Add any additional parameters
|
|
70
|
-
payload.update(kwargs)
|
|
71
|
-
|
|
72
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
73
|
-
created_time = int(time.time())
|
|
74
|
-
|
|
75
|
-
if stream:
|
|
76
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
77
|
-
else:
|
|
78
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
79
|
-
|
|
80
|
-
def _create_stream(
|
|
81
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
82
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
83
|
-
try:
|
|
84
|
-
response = self._client.session.post(
|
|
85
|
-
self._client.api_endpoint,
|
|
86
|
-
headers=self._client.headers,
|
|
87
|
-
json=payload,
|
|
88
|
-
stream=True,
|
|
89
|
-
timeout=self._client.timeout
|
|
90
|
-
)
|
|
91
|
-
response.raise_for_status()
|
|
92
|
-
|
|
93
|
-
# Track token usage across chunks
|
|
94
|
-
prompt_tokens = 0
|
|
95
|
-
completion_tokens = 0
|
|
96
|
-
total_tokens = 0
|
|
97
|
-
|
|
98
|
-
for line in response.iter_lines():
|
|
99
|
-
if line:
|
|
100
|
-
decoded_line = line.decode('utf-8').strip()
|
|
101
|
-
|
|
102
|
-
if decoded_line.startswith("data: "):
|
|
103
|
-
json_str = decoded_line[6:]
|
|
104
|
-
if json_str == "[DONE]":
|
|
105
|
-
# Format the final [DONE] marker in OpenAI format
|
|
106
|
-
# print("data: [DONE]")
|
|
107
|
-
break
|
|
108
|
-
|
|
109
|
-
try:
|
|
110
|
-
data = json.loads(json_str)
|
|
111
|
-
choice_data = data.get('choices', [{}])[0]
|
|
112
|
-
delta_data = choice_data.get('delta', {})
|
|
113
|
-
finish_reason = choice_data.get('finish_reason')
|
|
114
|
-
|
|
115
|
-
# Update token counts if available
|
|
116
|
-
usage_data = data.get('usage', {})
|
|
117
|
-
if usage_data:
|
|
118
|
-
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
119
|
-
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
120
|
-
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
121
|
-
|
|
122
|
-
# Create the delta object
|
|
123
|
-
delta = ChoiceDelta(
|
|
124
|
-
content=delta_data.get('content'),
|
|
125
|
-
role=delta_data.get('role'),
|
|
126
|
-
tool_calls=delta_data.get('tool_calls')
|
|
127
|
-
)
|
|
128
|
-
|
|
129
|
-
# Create the choice object
|
|
130
|
-
choice = Choice(
|
|
131
|
-
index=choice_data.get('index', 0),
|
|
132
|
-
delta=delta,
|
|
133
|
-
finish_reason=finish_reason,
|
|
134
|
-
logprobs=choice_data.get('logprobs')
|
|
135
|
-
)
|
|
136
|
-
|
|
137
|
-
# Create the chunk object
|
|
138
|
-
chunk = ChatCompletionChunk(
|
|
139
|
-
id=request_id,
|
|
140
|
-
choices=[choice],
|
|
141
|
-
created=created_time,
|
|
142
|
-
model=model,
|
|
143
|
-
system_fingerprint=data.get('system_fingerprint')
|
|
144
|
-
)
|
|
145
|
-
|
|
146
|
-
# Convert to dict for proper formatting
|
|
147
|
-
chunk_dict = chunk.to_dict()
|
|
148
|
-
|
|
149
|
-
# Add usage information to match OpenAI format
|
|
150
|
-
# Even if we don't have real token counts, include estimated usage
|
|
151
|
-
# This matches the format in the examples
|
|
152
|
-
usage_dict = {
|
|
153
|
-
"prompt_tokens": prompt_tokens or 10,
|
|
154
|
-
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
155
|
-
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
156
|
-
"estimated_cost": None
|
|
157
|
-
}
|
|
158
|
-
|
|
159
|
-
# Update completion_tokens and total_tokens as we receive more content
|
|
160
|
-
if delta_data.get('content'):
|
|
161
|
-
completion_tokens += 1
|
|
162
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
163
|
-
usage_dict["completion_tokens"] = completion_tokens
|
|
164
|
-
usage_dict["total_tokens"] = total_tokens
|
|
165
|
-
|
|
166
|
-
chunk_dict["usage"] = usage_dict
|
|
167
|
-
|
|
168
|
-
# Format the response in OpenAI format exactly as requested
|
|
169
|
-
# We need to print the raw string and also yield the chunk object
|
|
170
|
-
# This ensures both the console output and the returned object are correct
|
|
171
|
-
# print(f"data: {json.dumps(chunk_dict)}")
|
|
172
|
-
|
|
173
|
-
# Return the chunk object for internal processing
|
|
174
|
-
yield chunk
|
|
175
|
-
except json.JSONDecodeError:
|
|
176
|
-
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
177
|
-
continue
|
|
178
|
-
except requests.exceptions.RequestException as e:
|
|
179
|
-
print(f"Error during Glider stream request: {e}")
|
|
180
|
-
raise IOError(f"Glider request failed: {e}") from e
|
|
181
|
-
except Exception as e:
|
|
182
|
-
print(f"Error processing Glider stream: {e}")
|
|
183
|
-
raise
|
|
184
|
-
|
|
185
|
-
def _create_non_stream(
|
|
186
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
187
|
-
) -> ChatCompletion:
|
|
188
|
-
try:
|
|
189
|
-
response = self._client.session.post(
|
|
190
|
-
self._client.api_endpoint,
|
|
191
|
-
headers=self._client.headers,
|
|
192
|
-
json=payload,
|
|
193
|
-
timeout=self._client.timeout
|
|
194
|
-
)
|
|
195
|
-
response.raise_for_status()
|
|
196
|
-
data = response.json()
|
|
197
|
-
|
|
198
|
-
choices_data = data.get('choices', [])
|
|
199
|
-
usage_data = data.get('usage', {})
|
|
200
|
-
|
|
201
|
-
choices = []
|
|
202
|
-
for choice_d in choices_data:
|
|
203
|
-
message_d = choice_d.get('message', {})
|
|
204
|
-
message = ChatCompletionMessage(
|
|
205
|
-
role=message_d.get('role', 'assistant'),
|
|
206
|
-
content=message_d.get('content', '')
|
|
207
|
-
)
|
|
208
|
-
choice = Choice(
|
|
209
|
-
index=choice_d.get('index', 0),
|
|
210
|
-
message=message,
|
|
211
|
-
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
212
|
-
)
|
|
213
|
-
choices.append(choice)
|
|
214
|
-
|
|
215
|
-
usage = CompletionUsage(
|
|
216
|
-
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
217
|
-
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
218
|
-
total_tokens=usage_data.get('total_tokens', 0)
|
|
219
|
-
)
|
|
220
|
-
|
|
221
|
-
completion = ChatCompletion(
|
|
222
|
-
id=request_id,
|
|
223
|
-
choices=choices,
|
|
224
|
-
created=created_time,
|
|
225
|
-
model=data.get('model', model),
|
|
226
|
-
usage=usage,
|
|
227
|
-
)
|
|
228
|
-
return completion
|
|
229
|
-
|
|
230
|
-
except requests.exceptions.RequestException as e:
|
|
231
|
-
print(f"Error during Glider non-stream request: {e}")
|
|
232
|
-
raise IOError(f"Glider request failed: {e}") from e
|
|
233
|
-
except Exception as e:
|
|
234
|
-
print(f"Error processing Glider response: {e}")
|
|
235
|
-
raise
|
|
236
|
-
|
|
237
|
-
class Chat(BaseChat):
|
|
238
|
-
def __init__(self, client: 'Glider'):
|
|
239
|
-
self.completions = Completions(client)
|
|
240
|
-
|
|
241
|
-
class Glider(OpenAICompatibleProvider):
|
|
242
|
-
"""
|
|
243
|
-
OpenAI-compatible client for Glider.so API.
|
|
244
|
-
|
|
245
|
-
Usage:
|
|
246
|
-
client = Glider()
|
|
247
|
-
response = client.chat.completions.create(
|
|
248
|
-
model="chat-llama-3-1-70b",
|
|
249
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
250
|
-
)
|
|
251
|
-
"""
|
|
252
|
-
|
|
253
|
-
AVAILABLE_MODELS = [
|
|
254
|
-
"chat-llama-3-1-8b",
|
|
255
|
-
"chat-llama-3-2-3b",
|
|
256
|
-
"chat-deepseek-r1-qwen-32b",
|
|
257
|
-
"chat-qwen-2-5-7b",
|
|
258
|
-
"chat-qwen-qwq-32b",
|
|
259
|
-
"deepseek-ai/DeepSeek-R1",
|
|
260
|
-
]
|
|
261
|
-
|
|
262
|
-
# No model mapping needed as we use the model names directly
|
|
263
|
-
|
|
264
|
-
def __init__(self, timeout: Optional[int] = None, browser: str = "chrome"):
|
|
265
|
-
"""
|
|
266
|
-
Initialize the Glider client.
|
|
267
|
-
|
|
268
|
-
Args:
|
|
269
|
-
timeout: Request timeout in seconds (None for no timeout)
|
|
270
|
-
browser: Browser to emulate in user agent
|
|
271
|
-
"""
|
|
272
|
-
self.timeout = timeout
|
|
273
|
-
self.api_endpoint = "https://glider.so/api/chat"
|
|
274
|
-
self.session = requests.Session()
|
|
275
|
-
|
|
276
|
-
agent = LitAgent()
|
|
277
|
-
fingerprint = agent.generate_fingerprint(browser)
|
|
278
|
-
|
|
279
|
-
self.headers = {
|
|
280
|
-
"Accept": fingerprint["accept"],
|
|
281
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
282
|
-
"Accept-Language": fingerprint["accept_language"],
|
|
283
|
-
"Content-Type": "application/json",
|
|
284
|
-
"Cache-Control": "no-cache",
|
|
285
|
-
"Connection": "keep-alive",
|
|
286
|
-
"Origin": "https://glider.so",
|
|
287
|
-
"Pragma": "no-cache",
|
|
288
|
-
"Referer": "https://glider.so/",
|
|
289
|
-
"Sec-Fetch-Dest": "empty",
|
|
290
|
-
"Sec-Fetch-Mode": "cors",
|
|
291
|
-
"Sec-Fetch-Site": "same-site",
|
|
292
|
-
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
293
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
294
|
-
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
295
|
-
"User-Agent": fingerprint["user_agent"],
|
|
296
|
-
}
|
|
297
|
-
self.session.headers.update(self.headers)
|
|
298
|
-
self.chat = Chat(self)
|
|
299
|
-
|
|
300
|
-
def convert_model_name(self, model: str) -> str:
|
|
301
|
-
"""
|
|
302
|
-
Convert model names to ones supported by Glider.
|
|
303
|
-
|
|
304
|
-
Args:
|
|
305
|
-
model: Model name to convert
|
|
306
|
-
|
|
307
|
-
Returns:
|
|
308
|
-
Glider model name
|
|
309
|
-
"""
|
|
310
|
-
# If the model is already a valid Glider model, return it
|
|
311
|
-
if model in self.AVAILABLE_MODELS:
|
|
312
|
-
return model
|
|
313
|
-
|
|
314
|
-
# Default to the most capable model
|
|
315
|
-
print(f"Warning: Unknown model '{model}'. Using 'chat-llama-3-1-70b' instead.")
|
|
316
|
-
return "chat-llama-3-1-70b"
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# Attempt to import LitAgent, fallback if not available
|
|
15
|
+
try:
|
|
16
|
+
from webscout.litagent import LitAgent
|
|
17
|
+
except ImportError:
|
|
18
|
+
# Define a dummy LitAgent if webscout is not installed or accessible
|
|
19
|
+
class LitAgent:
|
|
20
|
+
def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
|
|
21
|
+
# Return minimal default headers if LitAgent is unavailable
|
|
22
|
+
print("Warning: LitAgent not found. Using default minimal headers.")
|
|
23
|
+
return {
|
|
24
|
+
"accept": "*/*",
|
|
25
|
+
"accept_language": "en-US,en;q=0.9",
|
|
26
|
+
"platform": "Windows",
|
|
27
|
+
"sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
|
|
28
|
+
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
|
29
|
+
"browser_type": browser,
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
# --- Glider Client ---
|
|
33
|
+
|
|
34
|
+
class Completions(BaseCompletions):
|
|
35
|
+
def __init__(self, client: 'Glider'):
|
|
36
|
+
self._client = client
|
|
37
|
+
|
|
38
|
+
def create(
|
|
39
|
+
self,
|
|
40
|
+
*,
|
|
41
|
+
model: str,
|
|
42
|
+
messages: List[Dict[str, str]],
|
|
43
|
+
max_tokens: Optional[int] = 2049,
|
|
44
|
+
stream: bool = False,
|
|
45
|
+
temperature: Optional[float] = None,
|
|
46
|
+
top_p: Optional[float] = None,
|
|
47
|
+
**kwargs: Any
|
|
48
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
49
|
+
"""
|
|
50
|
+
Creates a model response for the given chat conversation.
|
|
51
|
+
Mimics openai.chat.completions.create
|
|
52
|
+
"""
|
|
53
|
+
# Prepare the payload for Glider API
|
|
54
|
+
payload = {
|
|
55
|
+
"messages": messages,
|
|
56
|
+
"model": self._client.convert_model_name(model),
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
# Add optional parameters if provided
|
|
60
|
+
if max_tokens is not None and max_tokens > 0:
|
|
61
|
+
payload["max_tokens"] = max_tokens
|
|
62
|
+
|
|
63
|
+
if temperature is not None:
|
|
64
|
+
payload["temperature"] = temperature
|
|
65
|
+
|
|
66
|
+
if top_p is not None:
|
|
67
|
+
payload["top_p"] = top_p
|
|
68
|
+
|
|
69
|
+
# Add any additional parameters
|
|
70
|
+
payload.update(kwargs)
|
|
71
|
+
|
|
72
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
73
|
+
created_time = int(time.time())
|
|
74
|
+
|
|
75
|
+
if stream:
|
|
76
|
+
return self._create_stream(request_id, created_time, model, payload)
|
|
77
|
+
else:
|
|
78
|
+
return self._create_non_stream(request_id, created_time, model, payload)
|
|
79
|
+
|
|
80
|
+
def _create_stream(
|
|
81
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
82
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
83
|
+
try:
|
|
84
|
+
response = self._client.session.post(
|
|
85
|
+
self._client.api_endpoint,
|
|
86
|
+
headers=self._client.headers,
|
|
87
|
+
json=payload,
|
|
88
|
+
stream=True,
|
|
89
|
+
timeout=self._client.timeout
|
|
90
|
+
)
|
|
91
|
+
response.raise_for_status()
|
|
92
|
+
|
|
93
|
+
# Track token usage across chunks
|
|
94
|
+
prompt_tokens = 0
|
|
95
|
+
completion_tokens = 0
|
|
96
|
+
total_tokens = 0
|
|
97
|
+
|
|
98
|
+
for line in response.iter_lines():
|
|
99
|
+
if line:
|
|
100
|
+
decoded_line = line.decode('utf-8').strip()
|
|
101
|
+
|
|
102
|
+
if decoded_line.startswith("data: "):
|
|
103
|
+
json_str = decoded_line[6:]
|
|
104
|
+
if json_str == "[DONE]":
|
|
105
|
+
# Format the final [DONE] marker in OpenAI format
|
|
106
|
+
# print("data: [DONE]")
|
|
107
|
+
break
|
|
108
|
+
|
|
109
|
+
try:
|
|
110
|
+
data = json.loads(json_str)
|
|
111
|
+
choice_data = data.get('choices', [{}])[0]
|
|
112
|
+
delta_data = choice_data.get('delta', {})
|
|
113
|
+
finish_reason = choice_data.get('finish_reason')
|
|
114
|
+
|
|
115
|
+
# Update token counts if available
|
|
116
|
+
usage_data = data.get('usage', {})
|
|
117
|
+
if usage_data:
|
|
118
|
+
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
119
|
+
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
120
|
+
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
121
|
+
|
|
122
|
+
# Create the delta object
|
|
123
|
+
delta = ChoiceDelta(
|
|
124
|
+
content=delta_data.get('content'),
|
|
125
|
+
role=delta_data.get('role'),
|
|
126
|
+
tool_calls=delta_data.get('tool_calls')
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# Create the choice object
|
|
130
|
+
choice = Choice(
|
|
131
|
+
index=choice_data.get('index', 0),
|
|
132
|
+
delta=delta,
|
|
133
|
+
finish_reason=finish_reason,
|
|
134
|
+
logprobs=choice_data.get('logprobs')
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# Create the chunk object
|
|
138
|
+
chunk = ChatCompletionChunk(
|
|
139
|
+
id=request_id,
|
|
140
|
+
choices=[choice],
|
|
141
|
+
created=created_time,
|
|
142
|
+
model=model,
|
|
143
|
+
system_fingerprint=data.get('system_fingerprint')
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
# Convert to dict for proper formatting
|
|
147
|
+
chunk_dict = chunk.to_dict()
|
|
148
|
+
|
|
149
|
+
# Add usage information to match OpenAI format
|
|
150
|
+
# Even if we don't have real token counts, include estimated usage
|
|
151
|
+
# This matches the format in the examples
|
|
152
|
+
usage_dict = {
|
|
153
|
+
"prompt_tokens": prompt_tokens or 10,
|
|
154
|
+
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
155
|
+
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
156
|
+
"estimated_cost": None
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
# Update completion_tokens and total_tokens as we receive more content
|
|
160
|
+
if delta_data.get('content'):
|
|
161
|
+
completion_tokens += 1
|
|
162
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
163
|
+
usage_dict["completion_tokens"] = completion_tokens
|
|
164
|
+
usage_dict["total_tokens"] = total_tokens
|
|
165
|
+
|
|
166
|
+
chunk_dict["usage"] = usage_dict
|
|
167
|
+
|
|
168
|
+
# Format the response in OpenAI format exactly as requested
|
|
169
|
+
# We need to print the raw string and also yield the chunk object
|
|
170
|
+
# This ensures both the console output and the returned object are correct
|
|
171
|
+
# print(f"data: {json.dumps(chunk_dict)}")
|
|
172
|
+
|
|
173
|
+
# Return the chunk object for internal processing
|
|
174
|
+
yield chunk
|
|
175
|
+
except json.JSONDecodeError:
|
|
176
|
+
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
177
|
+
continue
|
|
178
|
+
except requests.exceptions.RequestException as e:
|
|
179
|
+
print(f"Error during Glider stream request: {e}")
|
|
180
|
+
raise IOError(f"Glider request failed: {e}") from e
|
|
181
|
+
except Exception as e:
|
|
182
|
+
print(f"Error processing Glider stream: {e}")
|
|
183
|
+
raise
|
|
184
|
+
|
|
185
|
+
def _create_non_stream(
|
|
186
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
187
|
+
) -> ChatCompletion:
|
|
188
|
+
try:
|
|
189
|
+
response = self._client.session.post(
|
|
190
|
+
self._client.api_endpoint,
|
|
191
|
+
headers=self._client.headers,
|
|
192
|
+
json=payload,
|
|
193
|
+
timeout=self._client.timeout
|
|
194
|
+
)
|
|
195
|
+
response.raise_for_status()
|
|
196
|
+
data = response.json()
|
|
197
|
+
|
|
198
|
+
choices_data = data.get('choices', [])
|
|
199
|
+
usage_data = data.get('usage', {})
|
|
200
|
+
|
|
201
|
+
choices = []
|
|
202
|
+
for choice_d in choices_data:
|
|
203
|
+
message_d = choice_d.get('message', {})
|
|
204
|
+
message = ChatCompletionMessage(
|
|
205
|
+
role=message_d.get('role', 'assistant'),
|
|
206
|
+
content=message_d.get('content', '')
|
|
207
|
+
)
|
|
208
|
+
choice = Choice(
|
|
209
|
+
index=choice_d.get('index', 0),
|
|
210
|
+
message=message,
|
|
211
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
212
|
+
)
|
|
213
|
+
choices.append(choice)
|
|
214
|
+
|
|
215
|
+
usage = CompletionUsage(
|
|
216
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
217
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
218
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
completion = ChatCompletion(
|
|
222
|
+
id=request_id,
|
|
223
|
+
choices=choices,
|
|
224
|
+
created=created_time,
|
|
225
|
+
model=data.get('model', model),
|
|
226
|
+
usage=usage,
|
|
227
|
+
)
|
|
228
|
+
return completion
|
|
229
|
+
|
|
230
|
+
except requests.exceptions.RequestException as e:
|
|
231
|
+
print(f"Error during Glider non-stream request: {e}")
|
|
232
|
+
raise IOError(f"Glider request failed: {e}") from e
|
|
233
|
+
except Exception as e:
|
|
234
|
+
print(f"Error processing Glider response: {e}")
|
|
235
|
+
raise
|
|
236
|
+
|
|
237
|
+
class Chat(BaseChat):
|
|
238
|
+
def __init__(self, client: 'Glider'):
|
|
239
|
+
self.completions = Completions(client)
|
|
240
|
+
|
|
241
|
+
class Glider(OpenAICompatibleProvider):
|
|
242
|
+
"""
|
|
243
|
+
OpenAI-compatible client for Glider.so API.
|
|
244
|
+
|
|
245
|
+
Usage:
|
|
246
|
+
client = Glider()
|
|
247
|
+
response = client.chat.completions.create(
|
|
248
|
+
model="chat-llama-3-1-70b",
|
|
249
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
250
|
+
)
|
|
251
|
+
"""
|
|
252
|
+
|
|
253
|
+
AVAILABLE_MODELS = [
|
|
254
|
+
"chat-llama-3-1-8b",
|
|
255
|
+
"chat-llama-3-2-3b",
|
|
256
|
+
"chat-deepseek-r1-qwen-32b",
|
|
257
|
+
"chat-qwen-2-5-7b",
|
|
258
|
+
"chat-qwen-qwq-32b",
|
|
259
|
+
"deepseek-ai/DeepSeek-R1",
|
|
260
|
+
]
|
|
261
|
+
|
|
262
|
+
# No model mapping needed as we use the model names directly
|
|
263
|
+
|
|
264
|
+
def __init__(self, timeout: Optional[int] = None, browser: str = "chrome"):
|
|
265
|
+
"""
|
|
266
|
+
Initialize the Glider client.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
timeout: Request timeout in seconds (None for no timeout)
|
|
270
|
+
browser: Browser to emulate in user agent
|
|
271
|
+
"""
|
|
272
|
+
self.timeout = timeout
|
|
273
|
+
self.api_endpoint = "https://glider.so/api/chat"
|
|
274
|
+
self.session = requests.Session()
|
|
275
|
+
|
|
276
|
+
agent = LitAgent()
|
|
277
|
+
fingerprint = agent.generate_fingerprint(browser)
|
|
278
|
+
|
|
279
|
+
self.headers = {
|
|
280
|
+
"Accept": fingerprint["accept"],
|
|
281
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
282
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
283
|
+
"Content-Type": "application/json",
|
|
284
|
+
"Cache-Control": "no-cache",
|
|
285
|
+
"Connection": "keep-alive",
|
|
286
|
+
"Origin": "https://glider.so",
|
|
287
|
+
"Pragma": "no-cache",
|
|
288
|
+
"Referer": "https://glider.so/",
|
|
289
|
+
"Sec-Fetch-Dest": "empty",
|
|
290
|
+
"Sec-Fetch-Mode": "cors",
|
|
291
|
+
"Sec-Fetch-Site": "same-site",
|
|
292
|
+
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
293
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
294
|
+
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
295
|
+
"User-Agent": fingerprint["user_agent"],
|
|
296
|
+
}
|
|
297
|
+
self.session.headers.update(self.headers)
|
|
298
|
+
self.chat = Chat(self)
|
|
299
|
+
|
|
300
|
+
def convert_model_name(self, model: str) -> str:
|
|
301
|
+
"""
|
|
302
|
+
Convert model names to ones supported by Glider.
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
model: Model name to convert
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
Glider model name
|
|
309
|
+
"""
|
|
310
|
+
# If the model is already a valid Glider model, return it
|
|
311
|
+
if model in self.AVAILABLE_MODELS:
|
|
312
|
+
return model
|
|
313
|
+
|
|
314
|
+
# Default to the most capable model
|
|
315
|
+
print(f"Warning: Unknown model '{model}'. Using 'chat-llama-3-1-70b' instead.")
|
|
316
|
+
return "chat-llama-3-1-70b"
|
|
317
|
+
|
|
318
|
+
@property
|
|
319
|
+
def models(self):
|
|
320
|
+
class _ModelList:
|
|
321
|
+
def list(inner_self):
|
|
322
|
+
return type(self).AVAILABLE_MODELS
|
|
323
|
+
return _ModelList()
|