webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +32 -14
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +153 -35
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +171 -81
- webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
- webscout/Provider/OPENAI/Cloudflare.py +7 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -5
- webscout/Provider/OPENAI/NEMOTRON.py +8 -20
- webscout/Provider/OPENAI/Qwen3.py +283 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +357 -0
- webscout/Provider/OPENAI/__init__.py +5 -1
- webscout/Provider/OPENAI/ai4chat.py +40 -40
- webscout/Provider/OPENAI/api.py +808 -649
- webscout/Provider/OPENAI/c4ai.py +3 -3
- webscout/Provider/OPENAI/chatgpt.py +555 -555
- webscout/Provider/OPENAI/chatgptclone.py +493 -487
- webscout/Provider/OPENAI/chatsandbox.py +4 -3
- webscout/Provider/OPENAI/copilot.py +242 -0
- webscout/Provider/OPENAI/deepinfra.py +5 -2
- webscout/Provider/OPENAI/e2b.py +63 -5
- webscout/Provider/OPENAI/exaai.py +416 -410
- webscout/Provider/OPENAI/exachat.py +444 -443
- webscout/Provider/OPENAI/freeaichat.py +2 -2
- webscout/Provider/OPENAI/glider.py +5 -2
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +308 -307
- webscout/Provider/OPENAI/mcpcore.py +8 -2
- webscout/Provider/OPENAI/multichat.py +4 -4
- webscout/Provider/OPENAI/netwrck.py +6 -5
- webscout/Provider/OPENAI/oivscode.py +287 -0
- webscout/Provider/OPENAI/opkfc.py +496 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +15 -9
- webscout/Provider/OPENAI/sonus.py +304 -303
- webscout/Provider/OPENAI/standardinput.py +433 -433
- webscout/Provider/OPENAI/textpollinations.py +4 -4
- webscout/Provider/OPENAI/toolbaz.py +413 -413
- webscout/Provider/OPENAI/typefully.py +3 -3
- webscout/Provider/OPENAI/typegpt.py +11 -5
- webscout/Provider/OPENAI/uncovrAI.py +463 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +431 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +3 -3
- webscout/Provider/OPENAI/x0gpt.py +365 -378
- webscout/Provider/OPENAI/yep.py +39 -13
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -0
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
|
@@ -10,7 +10,8 @@ from .utils import (
|
|
|
10
10
|
ChatCompletionMessage,
|
|
11
11
|
ChoiceDelta,
|
|
12
12
|
CompletionUsage,
|
|
13
|
-
format_prompt
|
|
13
|
+
format_prompt,
|
|
14
|
+
count_tokens
|
|
14
15
|
)
|
|
15
16
|
import requests
|
|
16
17
|
|
|
@@ -124,8 +125,8 @@ class Completions(BaseCompletions):
|
|
|
124
125
|
content = data.get("reasoning_content", text)
|
|
125
126
|
except Exception:
|
|
126
127
|
content = text
|
|
127
|
-
prompt_tokens =
|
|
128
|
-
completion_tokens =
|
|
128
|
+
prompt_tokens = count_tokens(question)
|
|
129
|
+
completion_tokens = count_tokens(content)
|
|
129
130
|
total_tokens = prompt_tokens + completion_tokens
|
|
130
131
|
usage = CompletionUsage(
|
|
131
132
|
prompt_tokens=prompt_tokens,
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import uuid
|
|
3
|
+
import time
|
|
4
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
5
|
+
from urllib.parse import quote
|
|
6
|
+
from curl_cffi.requests import Session, CurlWsFlag
|
|
7
|
+
|
|
8
|
+
# Import base classes and utility structures
|
|
9
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
+
from .utils import (
|
|
11
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
+
ChatCompletionMessage, CompletionUsage, format_prompt, count_tokens
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
# Attempt to import LitAgent, fallback if not available
|
|
16
|
+
try:
|
|
17
|
+
from webscout.litagent import LitAgent
|
|
18
|
+
except ImportError:
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
# --- Microsoft Copilot Client ---
|
|
22
|
+
|
|
23
|
+
class Completions(BaseCompletions):
|
|
24
|
+
def __init__(self, client: 'Copilot'):
|
|
25
|
+
self._client = client
|
|
26
|
+
|
|
27
|
+
def create(
|
|
28
|
+
self,
|
|
29
|
+
*,
|
|
30
|
+
model: str,
|
|
31
|
+
messages: List[Dict[str, str]],
|
|
32
|
+
max_tokens: Optional[int] = None,
|
|
33
|
+
stream: bool = False,
|
|
34
|
+
temperature: Optional[float] = None,
|
|
35
|
+
top_p: Optional[float] = None,
|
|
36
|
+
**kwargs: Any
|
|
37
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
38
|
+
"""
|
|
39
|
+
Creates a model response for the given chat conversation.
|
|
40
|
+
Mimics openai.chat.completions.create
|
|
41
|
+
"""
|
|
42
|
+
# Format the entire conversation using the utility function
|
|
43
|
+
formatted_prompt = format_prompt(messages, add_special_tokens=True, include_system=True, do_continue=True)
|
|
44
|
+
|
|
45
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
46
|
+
created_time = int(time.time())
|
|
47
|
+
|
|
48
|
+
# Handle image if provided
|
|
49
|
+
image = kwargs.get("image")
|
|
50
|
+
|
|
51
|
+
if stream:
|
|
52
|
+
return self._create_stream(request_id, created_time, model, formatted_prompt, image)
|
|
53
|
+
else:
|
|
54
|
+
return self._create_non_stream(request_id, created_time, model, formatted_prompt, image)
|
|
55
|
+
|
|
56
|
+
def _create_stream(
|
|
57
|
+
self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None
|
|
58
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
59
|
+
try:
|
|
60
|
+
s = self._client.session
|
|
61
|
+
# Create a new conversation if needed
|
|
62
|
+
r = s.post(self._client.conversation_url)
|
|
63
|
+
if r.status_code != 200:
|
|
64
|
+
raise RuntimeError(f"Failed to create conversation: {r.text}")
|
|
65
|
+
conv_id = r.json().get("id")
|
|
66
|
+
|
|
67
|
+
# Handle image upload if provided
|
|
68
|
+
images = []
|
|
69
|
+
if image:
|
|
70
|
+
r = s.post(
|
|
71
|
+
f"{self._client.url}/c/api/attachments",
|
|
72
|
+
headers={"content-type": "image/jpeg"},
|
|
73
|
+
data=image
|
|
74
|
+
)
|
|
75
|
+
if r.status_code != 200:
|
|
76
|
+
raise RuntimeError(f"Image upload failed: {r.text}")
|
|
77
|
+
images.append({"type": "image", "url": r.json().get("url")})
|
|
78
|
+
|
|
79
|
+
# Connect to websocket
|
|
80
|
+
ws = s.ws_connect(self._client.websocket_url)
|
|
81
|
+
|
|
82
|
+
# Use model to set mode ("reasoning" for Think Deeper)
|
|
83
|
+
mode = "reasoning" if "Think" in model else "chat"
|
|
84
|
+
|
|
85
|
+
# Send the message to Copilot
|
|
86
|
+
ws.send(json.dumps({
|
|
87
|
+
"event": "send",
|
|
88
|
+
"conversationId": conv_id,
|
|
89
|
+
"content": images + [{"type": "text", "text": prompt_text}],
|
|
90
|
+
"mode": mode
|
|
91
|
+
}).encode(), CurlWsFlag.TEXT)
|
|
92
|
+
|
|
93
|
+
# Track token usage using count_tokens
|
|
94
|
+
prompt_tokens = count_tokens(prompt_text)
|
|
95
|
+
completion_tokens = 0
|
|
96
|
+
total_tokens = prompt_tokens
|
|
97
|
+
|
|
98
|
+
started = False
|
|
99
|
+
while True:
|
|
100
|
+
try:
|
|
101
|
+
msg = json.loads(ws.recv()[0])
|
|
102
|
+
except Exception:
|
|
103
|
+
break
|
|
104
|
+
|
|
105
|
+
if msg.get("event") == "appendText":
|
|
106
|
+
started = True
|
|
107
|
+
content = msg.get("text", "")
|
|
108
|
+
|
|
109
|
+
# Update token counts using count_tokens
|
|
110
|
+
content_tokens = count_tokens(content)
|
|
111
|
+
completion_tokens += content_tokens
|
|
112
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
113
|
+
|
|
114
|
+
# Create the delta object
|
|
115
|
+
delta = ChoiceDelta(
|
|
116
|
+
content=content,
|
|
117
|
+
role="assistant"
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# Create the choice object
|
|
121
|
+
choice = Choice(
|
|
122
|
+
index=0,
|
|
123
|
+
delta=delta,
|
|
124
|
+
finish_reason=None
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# Create the chunk object
|
|
128
|
+
chunk = ChatCompletionChunk(
|
|
129
|
+
id=request_id,
|
|
130
|
+
choices=[choice],
|
|
131
|
+
created=created_time,
|
|
132
|
+
model=model
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
yield chunk
|
|
136
|
+
elif msg.get("event") == "done":
|
|
137
|
+
# Final chunk with finish_reason
|
|
138
|
+
delta = ChoiceDelta(
|
|
139
|
+
content=None,
|
|
140
|
+
role=None
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
choice = Choice(
|
|
144
|
+
index=0,
|
|
145
|
+
delta=delta,
|
|
146
|
+
finish_reason="stop"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
chunk = ChatCompletionChunk(
|
|
150
|
+
id=request_id,
|
|
151
|
+
choices=[choice],
|
|
152
|
+
created=created_time,
|
|
153
|
+
model=model
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
yield chunk
|
|
157
|
+
break
|
|
158
|
+
elif msg.get("event") == "error":
|
|
159
|
+
raise RuntimeError(f"Copilot error: {msg}")
|
|
160
|
+
|
|
161
|
+
ws.close()
|
|
162
|
+
|
|
163
|
+
if not started:
|
|
164
|
+
raise RuntimeError("No response received from Copilot")
|
|
165
|
+
|
|
166
|
+
except Exception as e:
|
|
167
|
+
raise RuntimeError(f"Stream error: {e}") from e
|
|
168
|
+
|
|
169
|
+
def _create_non_stream(
|
|
170
|
+
self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None
|
|
171
|
+
) -> ChatCompletion:
|
|
172
|
+
result = ""
|
|
173
|
+
for chunk in self._create_stream(request_id, created_time, model, prompt_text, image):
|
|
174
|
+
if hasattr(chunk, 'choices') and chunk.choices and hasattr(chunk.choices[0], 'delta') and chunk.choices[0].delta.content:
|
|
175
|
+
result += chunk.choices[0].delta.content
|
|
176
|
+
|
|
177
|
+
# Create the message object
|
|
178
|
+
message = ChatCompletionMessage(
|
|
179
|
+
role="assistant",
|
|
180
|
+
content=result
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# Create the choice object
|
|
184
|
+
choice = Choice(
|
|
185
|
+
index=0,
|
|
186
|
+
message=message,
|
|
187
|
+
finish_reason="stop"
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
# Estimate token usage using count_tokens
|
|
191
|
+
prompt_tokens = count_tokens(prompt_text)
|
|
192
|
+
completion_tokens = count_tokens(result)
|
|
193
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
194
|
+
|
|
195
|
+
# Create usage object
|
|
196
|
+
usage = CompletionUsage(
|
|
197
|
+
prompt_tokens=prompt_tokens,
|
|
198
|
+
completion_tokens=completion_tokens,
|
|
199
|
+
total_tokens=total_tokens
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
# Create the completion object
|
|
203
|
+
completion = ChatCompletion(
|
|
204
|
+
id=request_id,
|
|
205
|
+
choices=[choice],
|
|
206
|
+
created=created_time,
|
|
207
|
+
model=model,
|
|
208
|
+
usage=usage
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
return completion
|
|
212
|
+
|
|
213
|
+
class Chat(BaseChat):
|
|
214
|
+
def __init__(self, client: 'Copilot'):
|
|
215
|
+
self.completions = Completions(client)
|
|
216
|
+
|
|
217
|
+
class Copilot(OpenAICompatibleProvider):
|
|
218
|
+
|
|
219
|
+
url = "https://copilot.microsoft.com"
|
|
220
|
+
conversation_url = f"{url}/c/api/conversations"
|
|
221
|
+
websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
|
|
222
|
+
|
|
223
|
+
AVAILABLE_MODELS = ["Copilot", "Think Deeper"]
|
|
224
|
+
|
|
225
|
+
def __init__(self, timeout: int = 900, browser: str = "chrome", tools: Optional[List] = None, **kwargs):
|
|
226
|
+
self.timeout = timeout
|
|
227
|
+
self.session = Session(timeout=timeout, impersonate=browser)
|
|
228
|
+
|
|
229
|
+
# Initialize tools
|
|
230
|
+
self.available_tools = {}
|
|
231
|
+
if tools:
|
|
232
|
+
self.register_tools(tools)
|
|
233
|
+
|
|
234
|
+
# Set up the chat interface
|
|
235
|
+
self.chat = Chat(self)
|
|
236
|
+
|
|
237
|
+
@property
|
|
238
|
+
def models(self):
|
|
239
|
+
class _ModelList:
|
|
240
|
+
def list(inner_self):
|
|
241
|
+
return self.AVAILABLE_MODELS
|
|
242
|
+
return _ModelList()
|
|
@@ -125,8 +125,11 @@ class Completions(BaseCompletions):
|
|
|
125
125
|
system_fingerprint=data.get('system_fingerprint')
|
|
126
126
|
)
|
|
127
127
|
|
|
128
|
-
# Convert to dict
|
|
129
|
-
|
|
128
|
+
# Convert chunk to dict using Pydantic's API
|
|
129
|
+
if hasattr(chunk, "model_dump"):
|
|
130
|
+
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
131
|
+
else:
|
|
132
|
+
chunk_dict = chunk.dict(exclude_none=True)
|
|
130
133
|
|
|
131
134
|
# Add usage information to match OpenAI format
|
|
132
135
|
# Even if we don't have real token counts, include estimated usage
|
webscout/Provider/OPENAI/e2b.py
CHANGED
|
@@ -11,7 +11,7 @@ import requests # For bypassing Cloudflare protection
|
|
|
11
11
|
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
12
12
|
from .utils import (
|
|
13
13
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
14
|
-
ChatCompletionMessage, CompletionUsage,
|
|
14
|
+
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
15
15
|
)
|
|
16
16
|
|
|
17
17
|
# Attempt to import LitAgent, fallback if not available
|
|
@@ -899,7 +899,65 @@ MODEL_PROMPT = {
|
|
|
899
899
|
}
|
|
900
900
|
}
|
|
901
901
|
}
|
|
902
|
-
}
|
|
902
|
+
},
|
|
903
|
+
"claude-opus-4-20250514": {
|
|
904
|
+
"apiUrl": "https://fragments.e2b.dev/api/chat",
|
|
905
|
+
"id": "claude-opus-4-20250514",
|
|
906
|
+
"name": "Claude Opus 4 (2025-05-14)",
|
|
907
|
+
"Knowledge": "2025-05",
|
|
908
|
+
"provider": "Anthropic",
|
|
909
|
+
"providerId": "anthropic",
|
|
910
|
+
"multiModal": True,
|
|
911
|
+
"templates": {
|
|
912
|
+
"system": {
|
|
913
|
+
"intro": "You are Claude Opus 4, a large language model trained by Anthropic",
|
|
914
|
+
"principles": ["honesty", "ethics", "diligence"],
|
|
915
|
+
"latex": {
|
|
916
|
+
"inline": "$x^2$",
|
|
917
|
+
"block": "$e=mc^2$"
|
|
918
|
+
}
|
|
919
|
+
}
|
|
920
|
+
},
|
|
921
|
+
"requestConfig": {
|
|
922
|
+
"template": {
|
|
923
|
+
"txt": {
|
|
924
|
+
"name": "chat with users and start role-playing, Above of all: Follow the latest news from users",
|
|
925
|
+
"lib": [""],
|
|
926
|
+
"file": "pages/ChatWithUsers.txt",
|
|
927
|
+
"port": 3000
|
|
928
|
+
}
|
|
929
|
+
}
|
|
930
|
+
}
|
|
931
|
+
},
|
|
932
|
+
"claude-sonnet-4": {
|
|
933
|
+
"apiUrl": "https://fragments.e2b.dev/api/chat",
|
|
934
|
+
"id": "claude-sonnet-4",
|
|
935
|
+
"name": "Claude Sonnet 4",
|
|
936
|
+
"Knowledge": "2025-05",
|
|
937
|
+
"provider": "Anthropic",
|
|
938
|
+
"providerId": "anthropic",
|
|
939
|
+
"multiModal": True,
|
|
940
|
+
"templates": {
|
|
941
|
+
"system": {
|
|
942
|
+
"intro": "You are Claude Sonnet 4, a large language model trained by Anthropic",
|
|
943
|
+
"principles": ["honesty", "ethics", "diligence"],
|
|
944
|
+
"latex": {
|
|
945
|
+
"inline": "$x^2$",
|
|
946
|
+
"block": "$e=mc^2$"
|
|
947
|
+
}
|
|
948
|
+
}
|
|
949
|
+
},
|
|
950
|
+
"requestConfig": {
|
|
951
|
+
"template": {
|
|
952
|
+
"txt": {
|
|
953
|
+
"name": "chat with users and start role-playing, Above of all: Follow the latest news from users",
|
|
954
|
+
"lib": [""],
|
|
955
|
+
"file": "pages/ChatWithUsers.txt",
|
|
956
|
+
"port": 3000
|
|
957
|
+
}
|
|
958
|
+
}
|
|
959
|
+
}
|
|
960
|
+
},
|
|
903
961
|
}
|
|
904
962
|
|
|
905
963
|
class Completions(BaseCompletions):
|
|
@@ -1039,9 +1097,9 @@ class Completions(BaseCompletions):
|
|
|
1039
1097
|
model_config = self._client.MODEL_PROMPT[model_id]
|
|
1040
1098
|
full_response_text = self._send_request(request_body, model_config)
|
|
1041
1099
|
|
|
1042
|
-
# Estimate token counts
|
|
1043
|
-
prompt_tokens =
|
|
1044
|
-
completion_tokens =
|
|
1100
|
+
# Estimate token counts using count_tokens
|
|
1101
|
+
prompt_tokens = count_tokens([msg.get("content", [{"text": ""}])[0].get("text", "") for msg in request_body.get("messages", [])])
|
|
1102
|
+
completion_tokens = count_tokens(full_response_text)
|
|
1045
1103
|
total_tokens = prompt_tokens + completion_tokens
|
|
1046
1104
|
|
|
1047
1105
|
message = ChatCompletionMessage(role="assistant", content=full_response_text)
|