webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -239
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
- webscout/Extra/autocoder/autocoder.py +309 -114
- webscout/Extra/autocoder/autocoder_utiles.py +15 -15
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/weather.md +281 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Provider/AISEARCH/DeepFind.py +41 -37
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +0 -1
- webscout/Provider/AISEARCH/genspark_search.py +228 -86
- webscout/Provider/AISEARCH/hika_search.py +11 -11
- webscout/Provider/AISEARCH/scira_search.py +324 -322
- webscout/Provider/AllenAI.py +7 -14
- webscout/Provider/Blackboxai.py +518 -74
- webscout/Provider/Cloudflare.py +0 -1
- webscout/Provider/Deepinfra.py +23 -21
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/GizAI.py +15 -5
- webscout/Provider/Glider.py +11 -8
- webscout/Provider/HeckAI.py +80 -52
- webscout/Provider/Koboldai.py +7 -4
- webscout/Provider/LambdaChat.py +2 -2
- webscout/Provider/Marcus.py +10 -18
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +8 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -286
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +217 -14
- webscout/Provider/OPENAI/c4ai.py +373 -367
- webscout/Provider/OPENAI/chatgpt.py +7 -0
- webscout/Provider/OPENAI/chatgptclone.py +7 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +30 -20
- webscout/Provider/OPENAI/e2b.py +6 -0
- webscout/Provider/OPENAI/exaai.py +7 -0
- webscout/Provider/OPENAI/exachat.py +6 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -352
- webscout/Provider/OPENAI/glider.py +323 -316
- webscout/Provider/OPENAI/groq.py +361 -354
- webscout/Provider/OPENAI/heckai.py +30 -64
- webscout/Provider/OPENAI/llmchatco.py +8 -0
- webscout/Provider/OPENAI/mcpcore.py +7 -0
- webscout/Provider/OPENAI/multichat.py +8 -0
- webscout/Provider/OPENAI/netwrck.py +356 -350
- webscout/Provider/OPENAI/opkfc.py +8 -0
- webscout/Provider/OPENAI/scirachat.py +471 -462
- webscout/Provider/OPENAI/sonus.py +9 -0
- webscout/Provider/OPENAI/standardinput.py +9 -1
- webscout/Provider/OPENAI/textpollinations.py +339 -329
- webscout/Provider/OPENAI/toolbaz.py +7 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -346
- webscout/Provider/OPENAI/uncovrAI.py +7 -0
- webscout/Provider/OPENAI/utils.py +103 -7
- webscout/Provider/OPENAI/venice.py +12 -0
- webscout/Provider/OPENAI/wisecat.py +19 -19
- webscout/Provider/OPENAI/writecream.py +7 -0
- webscout/Provider/OPENAI/x0gpt.py +7 -0
- webscout/Provider/OPENAI/yep.py +50 -21
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/speechma.py +500 -100
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TeachAnything.py +3 -7
- webscout/Provider/TextPollinationsAI.py +4 -2
- webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Writecream.py +11 -2
- webscout/Provider/__init__.py +8 -14
- webscout/Provider/ai4chat.py +4 -58
- webscout/Provider/asksteve.py +17 -9
- webscout/Provider/cerebras.py +3 -1
- webscout/Provider/koala.py +170 -268
- webscout/Provider/llmchat.py +3 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +7 -4
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +4 -2
- webscout/Provider/typefully.py +23 -151
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/scout/README.md +402 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +134 -54
- webscout/zeroart/base.py +19 -13
- webscout/zeroart/effects.py +101 -99
- webscout/zeroart/fonts.py +1239 -816
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.8.dist-info/entry_points.txt +3 -0
- webscout-8.2.8.dist-info/top_level.txt +1 -0
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/scout/core.py +0 -881
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -1,352 +1,359 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import requests
|
|
4
|
-
import json
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import base classes and utility structures
|
|
8
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
-
from .utils import (
|
|
10
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
# Attempt to import LitAgent, fallback if not available
|
|
15
|
-
try:
|
|
16
|
-
from webscout.litagent import LitAgent
|
|
17
|
-
except ImportError:
|
|
18
|
-
pass
|
|
19
|
-
|
|
20
|
-
# --- FreeAIChat Client ---
|
|
21
|
-
|
|
22
|
-
class Completions(BaseCompletions):
|
|
23
|
-
def __init__(self, client: 'FreeAIChat'):
|
|
24
|
-
self._client = client
|
|
25
|
-
|
|
26
|
-
def create(
|
|
27
|
-
self,
|
|
28
|
-
*,
|
|
29
|
-
model: str,
|
|
30
|
-
messages: List[Dict[str, str]],
|
|
31
|
-
max_tokens: Optional[int] = 2049,
|
|
32
|
-
stream: bool = False,
|
|
33
|
-
temperature: Optional[float] = None,
|
|
34
|
-
top_p: Optional[float] = None,
|
|
35
|
-
**kwargs: Any
|
|
36
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
37
|
-
"""
|
|
38
|
-
Creates a model response for the given chat conversation.
|
|
39
|
-
Mimics openai.chat.completions.create
|
|
40
|
-
"""
|
|
41
|
-
payload = {
|
|
42
|
-
"model": model,
|
|
43
|
-
"messages": messages,
|
|
44
|
-
"max_tokens": max_tokens,
|
|
45
|
-
"stream": stream,
|
|
46
|
-
}
|
|
47
|
-
if temperature is not None:
|
|
48
|
-
payload["temperature"] = temperature
|
|
49
|
-
if top_p is not None:
|
|
50
|
-
payload["top_p"] = top_p
|
|
51
|
-
|
|
52
|
-
payload.update(kwargs)
|
|
53
|
-
|
|
54
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
55
|
-
created_time = int(time.time())
|
|
56
|
-
|
|
57
|
-
if stream:
|
|
58
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
59
|
-
else:
|
|
60
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
61
|
-
|
|
62
|
-
def _create_stream(
|
|
63
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
64
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
65
|
-
try:
|
|
66
|
-
response = self._client.session.post(
|
|
67
|
-
self._client.api_endpoint,
|
|
68
|
-
headers=self._client.headers,
|
|
69
|
-
json=payload,
|
|
70
|
-
stream=True,
|
|
71
|
-
timeout=self._client.timeout
|
|
72
|
-
)
|
|
73
|
-
|
|
74
|
-
# Handle non-200 responses
|
|
75
|
-
if not response.ok:
|
|
76
|
-
raise IOError(
|
|
77
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
78
|
-
)
|
|
79
|
-
|
|
80
|
-
# Track token usage across chunks
|
|
81
|
-
prompt_tokens = 0
|
|
82
|
-
completion_tokens = 0
|
|
83
|
-
total_tokens = 0
|
|
84
|
-
|
|
85
|
-
# Estimate prompt tokens based on message length
|
|
86
|
-
for msg in payload.get("messages", []):
|
|
87
|
-
prompt_tokens += len(msg.get("content", "").split())
|
|
88
|
-
|
|
89
|
-
for line in response.iter_lines():
|
|
90
|
-
if not line:
|
|
91
|
-
continue
|
|
92
|
-
|
|
93
|
-
line_str = line.decode('utf-8').strip()
|
|
94
|
-
|
|
95
|
-
if line_str.startswith("data: "):
|
|
96
|
-
json_str = line_str[6:] # Remove "data: " prefix
|
|
97
|
-
if json_str == "[DONE]":
|
|
98
|
-
break
|
|
99
|
-
|
|
100
|
-
try:
|
|
101
|
-
data = json.loads(json_str)
|
|
102
|
-
choice_data = data.get('choices', [{}])[0]
|
|
103
|
-
delta_data = choice_data.get('delta', {})
|
|
104
|
-
finish_reason = choice_data.get('finish_reason')
|
|
105
|
-
|
|
106
|
-
# Update token counts if available
|
|
107
|
-
usage_data = data.get('usage', {})
|
|
108
|
-
if usage_data:
|
|
109
|
-
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
110
|
-
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
111
|
-
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
112
|
-
|
|
113
|
-
# Create the delta object
|
|
114
|
-
delta = ChoiceDelta(
|
|
115
|
-
content=delta_data.get('content'),
|
|
116
|
-
role=delta_data.get('role'),
|
|
117
|
-
tool_calls=delta_data.get('tool_calls')
|
|
118
|
-
)
|
|
119
|
-
|
|
120
|
-
# Create the choice object
|
|
121
|
-
choice = Choice(
|
|
122
|
-
index=choice_data.get('index', 0),
|
|
123
|
-
delta=delta,
|
|
124
|
-
finish_reason=finish_reason,
|
|
125
|
-
logprobs=choice_data.get('logprobs')
|
|
126
|
-
)
|
|
127
|
-
|
|
128
|
-
# Create the chunk object
|
|
129
|
-
chunk = ChatCompletionChunk(
|
|
130
|
-
id=request_id,
|
|
131
|
-
choices=[choice],
|
|
132
|
-
created=created_time,
|
|
133
|
-
model=model,
|
|
134
|
-
system_fingerprint=data.get('system_fingerprint')
|
|
135
|
-
)
|
|
136
|
-
|
|
137
|
-
# Return the chunk object
|
|
138
|
-
yield chunk
|
|
139
|
-
except json.JSONDecodeError:
|
|
140
|
-
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
141
|
-
continue
|
|
142
|
-
|
|
143
|
-
# Final chunk with finish_reason="stop"
|
|
144
|
-
delta = ChoiceDelta(
|
|
145
|
-
content=None,
|
|
146
|
-
role=None,
|
|
147
|
-
tool_calls=None
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
choice = Choice(
|
|
151
|
-
index=0,
|
|
152
|
-
delta=delta,
|
|
153
|
-
finish_reason="stop",
|
|
154
|
-
logprobs=None
|
|
155
|
-
)
|
|
156
|
-
|
|
157
|
-
chunk = ChatCompletionChunk(
|
|
158
|
-
id=request_id,
|
|
159
|
-
choices=[choice],
|
|
160
|
-
created=created_time,
|
|
161
|
-
model=model,
|
|
162
|
-
system_fingerprint=None
|
|
163
|
-
)
|
|
164
|
-
|
|
165
|
-
yield chunk
|
|
166
|
-
|
|
167
|
-
except Exception as e:
|
|
168
|
-
print(f"Error during FreeAIChat stream request: {e}")
|
|
169
|
-
raise IOError(f"FreeAIChat request failed: {e}") from e
|
|
170
|
-
|
|
171
|
-
def _create_non_stream(
|
|
172
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
173
|
-
) -> ChatCompletion:
|
|
174
|
-
try:
|
|
175
|
-
response = self._client.session.post(
|
|
176
|
-
self._client.api_endpoint,
|
|
177
|
-
headers=self._client.headers,
|
|
178
|
-
json=payload,
|
|
179
|
-
timeout=self._client.timeout
|
|
180
|
-
)
|
|
181
|
-
|
|
182
|
-
# Handle non-200 responses
|
|
183
|
-
if not response.ok:
|
|
184
|
-
raise IOError(
|
|
185
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
186
|
-
)
|
|
187
|
-
|
|
188
|
-
# Parse the response
|
|
189
|
-
data = response.json()
|
|
190
|
-
|
|
191
|
-
choices_data = data.get('choices', [])
|
|
192
|
-
usage_data = data.get('usage', {})
|
|
193
|
-
|
|
194
|
-
choices = []
|
|
195
|
-
for choice_d in choices_data:
|
|
196
|
-
message_d = choice_d.get('message', {})
|
|
197
|
-
message = ChatCompletionMessage(
|
|
198
|
-
role=message_d.get('role', 'assistant'),
|
|
199
|
-
content=message_d.get('content', '')
|
|
200
|
-
)
|
|
201
|
-
choice = Choice(
|
|
202
|
-
index=choice_d.get('index', 0),
|
|
203
|
-
message=message,
|
|
204
|
-
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
205
|
-
)
|
|
206
|
-
choices.append(choice)
|
|
207
|
-
|
|
208
|
-
usage = CompletionUsage(
|
|
209
|
-
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
210
|
-
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
211
|
-
total_tokens=usage_data.get('total_tokens', 0)
|
|
212
|
-
)
|
|
213
|
-
|
|
214
|
-
completion = ChatCompletion(
|
|
215
|
-
id=request_id,
|
|
216
|
-
choices=choices,
|
|
217
|
-
created=created_time,
|
|
218
|
-
model=data.get('model', model),
|
|
219
|
-
usage=usage,
|
|
220
|
-
)
|
|
221
|
-
return completion
|
|
222
|
-
|
|
223
|
-
except Exception as e:
|
|
224
|
-
print(f"Error during FreeAIChat non-stream request: {e}")
|
|
225
|
-
raise IOError(f"FreeAIChat request failed: {e}") from e
|
|
226
|
-
|
|
227
|
-
class Chat(BaseChat):
|
|
228
|
-
def __init__(self, client: 'FreeAIChat'):
|
|
229
|
-
self.completions = Completions(client)
|
|
230
|
-
|
|
231
|
-
class FreeAIChat(OpenAICompatibleProvider):
|
|
232
|
-
"""
|
|
233
|
-
OpenAI-compatible client for FreeAIChat API.
|
|
234
|
-
|
|
235
|
-
Usage:
|
|
236
|
-
client = FreeAIChat()
|
|
237
|
-
response = client.chat.completions.create(
|
|
238
|
-
model="GPT 4o",
|
|
239
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
240
|
-
)
|
|
241
|
-
"""
|
|
242
|
-
|
|
243
|
-
AVAILABLE_MODELS = [
|
|
244
|
-
# OpenAI Models
|
|
245
|
-
"GPT 4o",
|
|
246
|
-
"GPT 4.5 Preview",
|
|
247
|
-
"GPT 4o Latest",
|
|
248
|
-
"GPT 4o mini",
|
|
249
|
-
"GPT 4o Search Preview",
|
|
250
|
-
"O1",
|
|
251
|
-
"O1 Mini",
|
|
252
|
-
"O3 Mini",
|
|
253
|
-
"O3 Mini High",
|
|
254
|
-
"O3 Mini Low",
|
|
255
|
-
|
|
256
|
-
# Anthropic Models
|
|
257
|
-
"Claude 3.5 haiku",
|
|
258
|
-
"claude 3.5 sonnet",
|
|
259
|
-
"Claude 3.7 Sonnet",
|
|
260
|
-
"Claude 3.7 Sonnet (Thinking)",
|
|
261
|
-
|
|
262
|
-
# Deepseek Models
|
|
263
|
-
"Deepseek R1",
|
|
264
|
-
"Deepseek R1 Fast",
|
|
265
|
-
"Deepseek V3",
|
|
266
|
-
"Deepseek v3 0324",
|
|
267
|
-
|
|
268
|
-
# Google Models
|
|
269
|
-
"Gemini 1.5 Flash",
|
|
270
|
-
"Gemini 1.5 Pro",
|
|
271
|
-
"Gemini 2.0 Flash",
|
|
272
|
-
"Gemini 2.0 Pro",
|
|
273
|
-
"Gemini 2.5 Pro",
|
|
274
|
-
|
|
275
|
-
# Llama Models
|
|
276
|
-
"Llama 3.1 405B",
|
|
277
|
-
"Llama 3.1 70B Fast",
|
|
278
|
-
"Llama 3.3 70B",
|
|
279
|
-
"Llama 3.2 90B Vision",
|
|
280
|
-
"Llama 4 Scout",
|
|
281
|
-
"Llama 4 Maverick",
|
|
282
|
-
|
|
283
|
-
# Mistral Models
|
|
284
|
-
"Mistral Large",
|
|
285
|
-
"Mistral Nemo",
|
|
286
|
-
"Mixtral 8x22B",
|
|
287
|
-
|
|
288
|
-
# Qwen Models
|
|
289
|
-
"Qwen Max",
|
|
290
|
-
"Qwen Plus",
|
|
291
|
-
"Qwen Turbo",
|
|
292
|
-
"QwQ 32B",
|
|
293
|
-
"QwQ Plus",
|
|
294
|
-
|
|
295
|
-
# XAI Models
|
|
296
|
-
"Grok 2",
|
|
297
|
-
"Grok 3",
|
|
298
|
-
]
|
|
299
|
-
|
|
300
|
-
def __init__(
|
|
301
|
-
self,
|
|
302
|
-
timeout: Optional[int] = None,
|
|
303
|
-
browser: str = "chrome"
|
|
304
|
-
):
|
|
305
|
-
"""
|
|
306
|
-
Initialize the FreeAIChat client.
|
|
307
|
-
|
|
308
|
-
Args:
|
|
309
|
-
timeout: Request timeout in seconds (None for no timeout)
|
|
310
|
-
browser: Browser to emulate in user agent
|
|
311
|
-
"""
|
|
312
|
-
self.timeout = timeout
|
|
313
|
-
self.api_endpoint = "https://freeaichatplayground.com/api/v1/chat/completions"
|
|
314
|
-
self.session = requests.Session()
|
|
315
|
-
|
|
316
|
-
# Initialize LitAgent for user agent generation
|
|
317
|
-
agent = LitAgent()
|
|
318
|
-
self.fingerprint = agent.generate_fingerprint(browser)
|
|
319
|
-
|
|
320
|
-
# Initialize headers
|
|
321
|
-
self.headers = {
|
|
322
|
-
'User-Agent': self.fingerprint["user_agent"],
|
|
323
|
-
'Accept': '*/*',
|
|
324
|
-
'Content-Type': 'application/json',
|
|
325
|
-
'Origin': 'https://freeaichatplayground.com',
|
|
326
|
-
'Referer': 'https://freeaichatplayground.com/',
|
|
327
|
-
'Sec-Fetch-Mode': 'cors',
|
|
328
|
-
'Sec-Fetch-Site': 'same-origin'
|
|
329
|
-
}
|
|
330
|
-
|
|
331
|
-
self.session.headers.update(self.headers)
|
|
332
|
-
|
|
333
|
-
# Initialize the chat interface
|
|
334
|
-
self.chat = Chat(self)
|
|
335
|
-
|
|
336
|
-
def convert_model_name(self, model: str) -> str:
|
|
337
|
-
"""
|
|
338
|
-
Convert model names to ones supported by FreeAIChat.
|
|
339
|
-
|
|
340
|
-
Args:
|
|
341
|
-
model: Model name to convert
|
|
342
|
-
|
|
343
|
-
Returns:
|
|
344
|
-
FreeAIChat model name
|
|
345
|
-
"""
|
|
346
|
-
# If the model is already a valid FreeAIChat model, return it
|
|
347
|
-
if model in self.AVAILABLE_MODELS:
|
|
348
|
-
return model
|
|
349
|
-
|
|
350
|
-
# Default to GPT 4o if model not found
|
|
351
|
-
print(f"Warning: Unknown model '{model}'. Using 'GPT 4o' instead.")
|
|
352
|
-
return "GPT 4o"
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import requests
|
|
4
|
+
import json
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# Attempt to import LitAgent, fallback if not available
|
|
15
|
+
try:
|
|
16
|
+
from webscout.litagent import LitAgent
|
|
17
|
+
except ImportError:
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
# --- FreeAIChat Client ---
|
|
21
|
+
|
|
22
|
+
class Completions(BaseCompletions):
|
|
23
|
+
def __init__(self, client: 'FreeAIChat'):
|
|
24
|
+
self._client = client
|
|
25
|
+
|
|
26
|
+
def create(
|
|
27
|
+
self,
|
|
28
|
+
*,
|
|
29
|
+
model: str,
|
|
30
|
+
messages: List[Dict[str, str]],
|
|
31
|
+
max_tokens: Optional[int] = 2049,
|
|
32
|
+
stream: bool = False,
|
|
33
|
+
temperature: Optional[float] = None,
|
|
34
|
+
top_p: Optional[float] = None,
|
|
35
|
+
**kwargs: Any
|
|
36
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
37
|
+
"""
|
|
38
|
+
Creates a model response for the given chat conversation.
|
|
39
|
+
Mimics openai.chat.completions.create
|
|
40
|
+
"""
|
|
41
|
+
payload = {
|
|
42
|
+
"model": model,
|
|
43
|
+
"messages": messages,
|
|
44
|
+
"max_tokens": max_tokens,
|
|
45
|
+
"stream": stream,
|
|
46
|
+
}
|
|
47
|
+
if temperature is not None:
|
|
48
|
+
payload["temperature"] = temperature
|
|
49
|
+
if top_p is not None:
|
|
50
|
+
payload["top_p"] = top_p
|
|
51
|
+
|
|
52
|
+
payload.update(kwargs)
|
|
53
|
+
|
|
54
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
55
|
+
created_time = int(time.time())
|
|
56
|
+
|
|
57
|
+
if stream:
|
|
58
|
+
return self._create_stream(request_id, created_time, model, payload)
|
|
59
|
+
else:
|
|
60
|
+
return self._create_non_stream(request_id, created_time, model, payload)
|
|
61
|
+
|
|
62
|
+
def _create_stream(
|
|
63
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
64
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
65
|
+
try:
|
|
66
|
+
response = self._client.session.post(
|
|
67
|
+
self._client.api_endpoint,
|
|
68
|
+
headers=self._client.headers,
|
|
69
|
+
json=payload,
|
|
70
|
+
stream=True,
|
|
71
|
+
timeout=self._client.timeout
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# Handle non-200 responses
|
|
75
|
+
if not response.ok:
|
|
76
|
+
raise IOError(
|
|
77
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
# Track token usage across chunks
|
|
81
|
+
prompt_tokens = 0
|
|
82
|
+
completion_tokens = 0
|
|
83
|
+
total_tokens = 0
|
|
84
|
+
|
|
85
|
+
# Estimate prompt tokens based on message length
|
|
86
|
+
for msg in payload.get("messages", []):
|
|
87
|
+
prompt_tokens += len(msg.get("content", "").split())
|
|
88
|
+
|
|
89
|
+
for line in response.iter_lines():
|
|
90
|
+
if not line:
|
|
91
|
+
continue
|
|
92
|
+
|
|
93
|
+
line_str = line.decode('utf-8').strip()
|
|
94
|
+
|
|
95
|
+
if line_str.startswith("data: "):
|
|
96
|
+
json_str = line_str[6:] # Remove "data: " prefix
|
|
97
|
+
if json_str == "[DONE]":
|
|
98
|
+
break
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
data = json.loads(json_str)
|
|
102
|
+
choice_data = data.get('choices', [{}])[0]
|
|
103
|
+
delta_data = choice_data.get('delta', {})
|
|
104
|
+
finish_reason = choice_data.get('finish_reason')
|
|
105
|
+
|
|
106
|
+
# Update token counts if available
|
|
107
|
+
usage_data = data.get('usage', {})
|
|
108
|
+
if usage_data:
|
|
109
|
+
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
110
|
+
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
111
|
+
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
112
|
+
|
|
113
|
+
# Create the delta object
|
|
114
|
+
delta = ChoiceDelta(
|
|
115
|
+
content=delta_data.get('content'),
|
|
116
|
+
role=delta_data.get('role'),
|
|
117
|
+
tool_calls=delta_data.get('tool_calls')
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# Create the choice object
|
|
121
|
+
choice = Choice(
|
|
122
|
+
index=choice_data.get('index', 0),
|
|
123
|
+
delta=delta,
|
|
124
|
+
finish_reason=finish_reason,
|
|
125
|
+
logprobs=choice_data.get('logprobs')
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Create the chunk object
|
|
129
|
+
chunk = ChatCompletionChunk(
|
|
130
|
+
id=request_id,
|
|
131
|
+
choices=[choice],
|
|
132
|
+
created=created_time,
|
|
133
|
+
model=model,
|
|
134
|
+
system_fingerprint=data.get('system_fingerprint')
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# Return the chunk object
|
|
138
|
+
yield chunk
|
|
139
|
+
except json.JSONDecodeError:
|
|
140
|
+
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
141
|
+
continue
|
|
142
|
+
|
|
143
|
+
# Final chunk with finish_reason="stop"
|
|
144
|
+
delta = ChoiceDelta(
|
|
145
|
+
content=None,
|
|
146
|
+
role=None,
|
|
147
|
+
tool_calls=None
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
choice = Choice(
|
|
151
|
+
index=0,
|
|
152
|
+
delta=delta,
|
|
153
|
+
finish_reason="stop",
|
|
154
|
+
logprobs=None
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
chunk = ChatCompletionChunk(
|
|
158
|
+
id=request_id,
|
|
159
|
+
choices=[choice],
|
|
160
|
+
created=created_time,
|
|
161
|
+
model=model,
|
|
162
|
+
system_fingerprint=None
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
yield chunk
|
|
166
|
+
|
|
167
|
+
except Exception as e:
|
|
168
|
+
print(f"Error during FreeAIChat stream request: {e}")
|
|
169
|
+
raise IOError(f"FreeAIChat request failed: {e}") from e
|
|
170
|
+
|
|
171
|
+
def _create_non_stream(
|
|
172
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
173
|
+
) -> ChatCompletion:
|
|
174
|
+
try:
|
|
175
|
+
response = self._client.session.post(
|
|
176
|
+
self._client.api_endpoint,
|
|
177
|
+
headers=self._client.headers,
|
|
178
|
+
json=payload,
|
|
179
|
+
timeout=self._client.timeout
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
# Handle non-200 responses
|
|
183
|
+
if not response.ok:
|
|
184
|
+
raise IOError(
|
|
185
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
# Parse the response
|
|
189
|
+
data = response.json()
|
|
190
|
+
|
|
191
|
+
choices_data = data.get('choices', [])
|
|
192
|
+
usage_data = data.get('usage', {})
|
|
193
|
+
|
|
194
|
+
choices = []
|
|
195
|
+
for choice_d in choices_data:
|
|
196
|
+
message_d = choice_d.get('message', {})
|
|
197
|
+
message = ChatCompletionMessage(
|
|
198
|
+
role=message_d.get('role', 'assistant'),
|
|
199
|
+
content=message_d.get('content', '')
|
|
200
|
+
)
|
|
201
|
+
choice = Choice(
|
|
202
|
+
index=choice_d.get('index', 0),
|
|
203
|
+
message=message,
|
|
204
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
205
|
+
)
|
|
206
|
+
choices.append(choice)
|
|
207
|
+
|
|
208
|
+
usage = CompletionUsage(
|
|
209
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
210
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
211
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
completion = ChatCompletion(
|
|
215
|
+
id=request_id,
|
|
216
|
+
choices=choices,
|
|
217
|
+
created=created_time,
|
|
218
|
+
model=data.get('model', model),
|
|
219
|
+
usage=usage,
|
|
220
|
+
)
|
|
221
|
+
return completion
|
|
222
|
+
|
|
223
|
+
except Exception as e:
|
|
224
|
+
print(f"Error during FreeAIChat non-stream request: {e}")
|
|
225
|
+
raise IOError(f"FreeAIChat request failed: {e}") from e
|
|
226
|
+
|
|
227
|
+
class Chat(BaseChat):
|
|
228
|
+
def __init__(self, client: 'FreeAIChat'):
|
|
229
|
+
self.completions = Completions(client)
|
|
230
|
+
|
|
231
|
+
class FreeAIChat(OpenAICompatibleProvider):
|
|
232
|
+
"""
|
|
233
|
+
OpenAI-compatible client for FreeAIChat API.
|
|
234
|
+
|
|
235
|
+
Usage:
|
|
236
|
+
client = FreeAIChat()
|
|
237
|
+
response = client.chat.completions.create(
|
|
238
|
+
model="GPT 4o",
|
|
239
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
240
|
+
)
|
|
241
|
+
"""
|
|
242
|
+
|
|
243
|
+
AVAILABLE_MODELS = [
|
|
244
|
+
# OpenAI Models
|
|
245
|
+
"GPT 4o",
|
|
246
|
+
"GPT 4.5 Preview",
|
|
247
|
+
"GPT 4o Latest",
|
|
248
|
+
"GPT 4o mini",
|
|
249
|
+
"GPT 4o Search Preview",
|
|
250
|
+
"O1",
|
|
251
|
+
"O1 Mini",
|
|
252
|
+
"O3 Mini",
|
|
253
|
+
"O3 Mini High",
|
|
254
|
+
"O3 Mini Low",
|
|
255
|
+
|
|
256
|
+
# Anthropic Models
|
|
257
|
+
"Claude 3.5 haiku",
|
|
258
|
+
"claude 3.5 sonnet",
|
|
259
|
+
"Claude 3.7 Sonnet",
|
|
260
|
+
"Claude 3.7 Sonnet (Thinking)",
|
|
261
|
+
|
|
262
|
+
# Deepseek Models
|
|
263
|
+
"Deepseek R1",
|
|
264
|
+
"Deepseek R1 Fast",
|
|
265
|
+
"Deepseek V3",
|
|
266
|
+
"Deepseek v3 0324",
|
|
267
|
+
|
|
268
|
+
# Google Models
|
|
269
|
+
"Gemini 1.5 Flash",
|
|
270
|
+
"Gemini 1.5 Pro",
|
|
271
|
+
"Gemini 2.0 Flash",
|
|
272
|
+
"Gemini 2.0 Pro",
|
|
273
|
+
"Gemini 2.5 Pro",
|
|
274
|
+
|
|
275
|
+
# Llama Models
|
|
276
|
+
"Llama 3.1 405B",
|
|
277
|
+
"Llama 3.1 70B Fast",
|
|
278
|
+
"Llama 3.3 70B",
|
|
279
|
+
"Llama 3.2 90B Vision",
|
|
280
|
+
"Llama 4 Scout",
|
|
281
|
+
"Llama 4 Maverick",
|
|
282
|
+
|
|
283
|
+
# Mistral Models
|
|
284
|
+
"Mistral Large",
|
|
285
|
+
"Mistral Nemo",
|
|
286
|
+
"Mixtral 8x22B",
|
|
287
|
+
|
|
288
|
+
# Qwen Models
|
|
289
|
+
"Qwen Max",
|
|
290
|
+
"Qwen Plus",
|
|
291
|
+
"Qwen Turbo",
|
|
292
|
+
"QwQ 32B",
|
|
293
|
+
"QwQ Plus",
|
|
294
|
+
|
|
295
|
+
# XAI Models
|
|
296
|
+
"Grok 2",
|
|
297
|
+
"Grok 3",
|
|
298
|
+
]
|
|
299
|
+
|
|
300
|
+
def __init__(
|
|
301
|
+
self,
|
|
302
|
+
timeout: Optional[int] = None,
|
|
303
|
+
browser: str = "chrome"
|
|
304
|
+
):
|
|
305
|
+
"""
|
|
306
|
+
Initialize the FreeAIChat client.
|
|
307
|
+
|
|
308
|
+
Args:
|
|
309
|
+
timeout: Request timeout in seconds (None for no timeout)
|
|
310
|
+
browser: Browser to emulate in user agent
|
|
311
|
+
"""
|
|
312
|
+
self.timeout = timeout
|
|
313
|
+
self.api_endpoint = "https://freeaichatplayground.com/api/v1/chat/completions"
|
|
314
|
+
self.session = requests.Session()
|
|
315
|
+
|
|
316
|
+
# Initialize LitAgent for user agent generation
|
|
317
|
+
agent = LitAgent()
|
|
318
|
+
self.fingerprint = agent.generate_fingerprint(browser)
|
|
319
|
+
|
|
320
|
+
# Initialize headers
|
|
321
|
+
self.headers = {
|
|
322
|
+
'User-Agent': self.fingerprint["user_agent"],
|
|
323
|
+
'Accept': '*/*',
|
|
324
|
+
'Content-Type': 'application/json',
|
|
325
|
+
'Origin': 'https://freeaichatplayground.com',
|
|
326
|
+
'Referer': 'https://freeaichatplayground.com/',
|
|
327
|
+
'Sec-Fetch-Mode': 'cors',
|
|
328
|
+
'Sec-Fetch-Site': 'same-origin'
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
self.session.headers.update(self.headers)
|
|
332
|
+
|
|
333
|
+
# Initialize the chat interface
|
|
334
|
+
self.chat = Chat(self)
|
|
335
|
+
|
|
336
|
+
def convert_model_name(self, model: str) -> str:
|
|
337
|
+
"""
|
|
338
|
+
Convert model names to ones supported by FreeAIChat.
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
model: Model name to convert
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
FreeAIChat model name
|
|
345
|
+
"""
|
|
346
|
+
# If the model is already a valid FreeAIChat model, return it
|
|
347
|
+
if model in self.AVAILABLE_MODELS:
|
|
348
|
+
return model
|
|
349
|
+
|
|
350
|
+
# Default to GPT 4o if model not found
|
|
351
|
+
print(f"Warning: Unknown model '{model}'. Using 'GPT 4o' instead.")
|
|
352
|
+
return "GPT 4o"
|
|
353
|
+
|
|
354
|
+
@property
|
|
355
|
+
def models(self):
|
|
356
|
+
class _ModelList:
|
|
357
|
+
def list(inner_self):
|
|
358
|
+
return type(self).AVAILABLE_MODELS
|
|
359
|
+
return _ModelList()
|