webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +34 -16
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +155 -35
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +172 -82
- webscout/Provider/LambdaChat.py +1 -0
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
- webscout/Provider/OPENAI/Cloudflare.py +38 -21
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +35 -18
- webscout/Provider/OPENAI/NEMOTRON.py +34 -34
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +304 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +374 -0
- webscout/Provider/OPENAI/__init__.py +7 -1
- webscout/Provider/OPENAI/ai4chat.py +73 -63
- webscout/Provider/OPENAI/api.py +869 -644
- webscout/Provider/OPENAI/base.py +2 -0
- webscout/Provider/OPENAI/c4ai.py +34 -13
- webscout/Provider/OPENAI/chatgpt.py +575 -556
- webscout/Provider/OPENAI/chatgptclone.py +512 -487
- webscout/Provider/OPENAI/chatsandbox.py +11 -6
- webscout/Provider/OPENAI/copilot.py +258 -0
- webscout/Provider/OPENAI/deepinfra.py +327 -318
- webscout/Provider/OPENAI/e2b.py +140 -104
- webscout/Provider/OPENAI/exaai.py +420 -411
- webscout/Provider/OPENAI/exachat.py +448 -443
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +12 -8
- webscout/Provider/OPENAI/glider.py +15 -8
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +311 -307
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +18 -9
- webscout/Provider/OPENAI/multichat.py +7 -5
- webscout/Provider/OPENAI/netwrck.py +16 -11
- webscout/Provider/OPENAI/oivscode.py +290 -0
- webscout/Provider/OPENAI/opkfc.py +507 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +29 -17
- webscout/Provider/OPENAI/sonus.py +308 -303
- webscout/Provider/OPENAI/standardinput.py +442 -433
- webscout/Provider/OPENAI/textpollinations.py +18 -11
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +17 -10
- webscout/Provider/OPENAI/typegpt.py +21 -11
- webscout/Provider/OPENAI/uncovrAI.py +477 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +435 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +166 -163
- webscout/Provider/OPENAI/x0gpt.py +26 -37
- webscout/Provider/OPENAI/yep.py +384 -356
- webscout/Provider/PI.py +2 -1
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/granite.py +41 -6
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
- webscout-8.3.dist-info/RECORD +290 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- webscout-8.2.8.dist-info/RECORD +0 -334
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
|
@@ -10,7 +10,8 @@ from .utils import (
|
|
|
10
10
|
ChatCompletionMessage,
|
|
11
11
|
ChoiceDelta,
|
|
12
12
|
CompletionUsage,
|
|
13
|
-
format_prompt
|
|
13
|
+
format_prompt,
|
|
14
|
+
count_tokens
|
|
14
15
|
)
|
|
15
16
|
import requests
|
|
16
17
|
|
|
@@ -32,6 +33,8 @@ class Completions(BaseCompletions):
|
|
|
32
33
|
stream: bool = False,
|
|
33
34
|
temperature: Optional[float] = None,
|
|
34
35
|
top_p: Optional[float] = None,
|
|
36
|
+
timeout: Optional[int] = None,
|
|
37
|
+
proxies: Optional[dict] = None,
|
|
35
38
|
**kwargs: Any
|
|
36
39
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
37
40
|
"""
|
|
@@ -69,13 +72,15 @@ class Completions(BaseCompletions):
|
|
|
69
72
|
}
|
|
70
73
|
session = requests.Session()
|
|
71
74
|
session.headers.update(headers)
|
|
75
|
+
session.proxies = proxies if proxies is not None else {}
|
|
76
|
+
|
|
72
77
|
def for_stream():
|
|
73
78
|
try:
|
|
74
79
|
response = session.post(
|
|
75
80
|
url,
|
|
76
81
|
json=payload,
|
|
77
82
|
stream=True,
|
|
78
|
-
timeout=30
|
|
83
|
+
timeout=timeout if timeout is not None else 30
|
|
79
84
|
)
|
|
80
85
|
response.raise_for_status()
|
|
81
86
|
streaming_text = ""
|
|
@@ -115,7 +120,7 @@ class Completions(BaseCompletions):
|
|
|
115
120
|
response = session.post(
|
|
116
121
|
url,
|
|
117
122
|
json=payload,
|
|
118
|
-
timeout=30
|
|
123
|
+
timeout=timeout if timeout is not None else 30
|
|
119
124
|
)
|
|
120
125
|
response.raise_for_status()
|
|
121
126
|
text = response.text
|
|
@@ -124,8 +129,8 @@ class Completions(BaseCompletions):
|
|
|
124
129
|
content = data.get("reasoning_content", text)
|
|
125
130
|
except Exception:
|
|
126
131
|
content = text
|
|
127
|
-
prompt_tokens =
|
|
128
|
-
completion_tokens =
|
|
132
|
+
prompt_tokens = count_tokens(question)
|
|
133
|
+
completion_tokens = count_tokens(content)
|
|
129
134
|
total_tokens = prompt_tokens + completion_tokens
|
|
130
135
|
usage = CompletionUsage(
|
|
131
136
|
prompt_tokens=prompt_tokens,
|
|
@@ -151,7 +156,7 @@ class Chat(BaseChat):
|
|
|
151
156
|
self.completions = Completions(client)
|
|
152
157
|
|
|
153
158
|
class ChatSandbox(OpenAICompatibleProvider):
|
|
154
|
-
AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large"]
|
|
159
|
+
AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large", "deepseek-r1", "deepseek-r1-full", "gemini-thinking", "openai-o1-mini", "llama", "mistral", "gemma-3"]
|
|
155
160
|
chat: Chat
|
|
156
161
|
def __init__(self):
|
|
157
162
|
self.chat = Chat(self)
|
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import uuid
|
|
3
|
+
import time
|
|
4
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
5
|
+
from urllib.parse import quote
|
|
6
|
+
from curl_cffi.requests import Session, CurlWsFlag
|
|
7
|
+
|
|
8
|
+
# Import base classes and utility structures
|
|
9
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
+
from .utils import (
|
|
11
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
+
ChatCompletionMessage, CompletionUsage, format_prompt, count_tokens
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
# Attempt to import LitAgent, fallback if not available
|
|
16
|
+
try:
|
|
17
|
+
from webscout.litagent import LitAgent
|
|
18
|
+
except ImportError:
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
# --- Microsoft Copilot Client ---
|
|
22
|
+
|
|
23
|
+
class Completions(BaseCompletions):
|
|
24
|
+
def __init__(self, client: 'Copilot'):
|
|
25
|
+
self._client = client
|
|
26
|
+
|
|
27
|
+
def create(
|
|
28
|
+
self,
|
|
29
|
+
*,
|
|
30
|
+
model: str,
|
|
31
|
+
messages: List[Dict[str, str]],
|
|
32
|
+
max_tokens: Optional[int] = None,
|
|
33
|
+
stream: bool = False,
|
|
34
|
+
temperature: Optional[float] = None,
|
|
35
|
+
top_p: Optional[float] = None,
|
|
36
|
+
timeout: Optional[int] = None,
|
|
37
|
+
proxies: Optional[dict] = None,
|
|
38
|
+
**kwargs: Any
|
|
39
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
40
|
+
"""
|
|
41
|
+
Creates a model response for the given chat conversation.
|
|
42
|
+
Mimics openai.chat.completions.create
|
|
43
|
+
"""
|
|
44
|
+
# Format the entire conversation using the utility function
|
|
45
|
+
formatted_prompt = format_prompt(messages, add_special_tokens=True, include_system=True, do_continue=True)
|
|
46
|
+
|
|
47
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
48
|
+
created_time = int(time.time())
|
|
49
|
+
|
|
50
|
+
# Handle image if provided
|
|
51
|
+
image = kwargs.get("image")
|
|
52
|
+
|
|
53
|
+
if stream:
|
|
54
|
+
return self._create_stream(request_id, created_time, model, formatted_prompt, image, timeout=timeout, proxies=proxies)
|
|
55
|
+
else:
|
|
56
|
+
return self._create_non_stream(request_id, created_time, model, formatted_prompt, image, timeout=timeout, proxies=proxies)
|
|
57
|
+
|
|
58
|
+
def _create_stream(
|
|
59
|
+
self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None,
|
|
60
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
61
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
62
|
+
original_proxies = self._client.session.proxies
|
|
63
|
+
if proxies is not None:
|
|
64
|
+
self._client.session.proxies = proxies
|
|
65
|
+
else:
|
|
66
|
+
self._client.session.proxies = {}
|
|
67
|
+
try:
|
|
68
|
+
timeout_val = timeout if timeout is not None else self._client.timeout
|
|
69
|
+
s = self._client.session
|
|
70
|
+
# Create a new conversation if needed
|
|
71
|
+
r = s.post(self._client.conversation_url, timeout=timeout_val)
|
|
72
|
+
if r.status_code != 200:
|
|
73
|
+
raise RuntimeError(f"Failed to create conversation: {r.text}")
|
|
74
|
+
conv_id = r.json().get("id")
|
|
75
|
+
|
|
76
|
+
# Handle image upload if provided
|
|
77
|
+
images = []
|
|
78
|
+
if image:
|
|
79
|
+
r = s.post(
|
|
80
|
+
f"{self._client.url}/c/api/attachments",
|
|
81
|
+
headers={"content-type": "image/jpeg"},
|
|
82
|
+
data=image,
|
|
83
|
+
timeout=timeout_val
|
|
84
|
+
)
|
|
85
|
+
if r.status_code != 200:
|
|
86
|
+
raise RuntimeError(f"Image upload failed: {r.text}")
|
|
87
|
+
images.append({"type": "image", "url": r.json().get("url")})
|
|
88
|
+
|
|
89
|
+
# Connect to websocket
|
|
90
|
+
# Note: ws_connect might not use timeout in the same way as POST/GET
|
|
91
|
+
ws = s.ws_connect(self._client.websocket_url)
|
|
92
|
+
|
|
93
|
+
# Use model to set mode ("reasoning" for Think Deeper)
|
|
94
|
+
mode = "reasoning" if "Think" in model else "chat"
|
|
95
|
+
|
|
96
|
+
# Send the message to Copilot
|
|
97
|
+
ws.send(json.dumps({
|
|
98
|
+
"event": "send",
|
|
99
|
+
"conversationId": conv_id,
|
|
100
|
+
"content": images + [{"type": "text", "text": prompt_text}],
|
|
101
|
+
"mode": mode
|
|
102
|
+
}).encode(), CurlWsFlag.TEXT)
|
|
103
|
+
|
|
104
|
+
# Track token usage using count_tokens
|
|
105
|
+
prompt_tokens = count_tokens(prompt_text)
|
|
106
|
+
completion_tokens = 0
|
|
107
|
+
total_tokens = prompt_tokens
|
|
108
|
+
|
|
109
|
+
started = False
|
|
110
|
+
while True:
|
|
111
|
+
try:
|
|
112
|
+
msg = json.loads(ws.recv()[0])
|
|
113
|
+
except Exception:
|
|
114
|
+
break
|
|
115
|
+
|
|
116
|
+
if msg.get("event") == "appendText":
|
|
117
|
+
started = True
|
|
118
|
+
content = msg.get("text", "")
|
|
119
|
+
|
|
120
|
+
# Update token counts using count_tokens
|
|
121
|
+
content_tokens = count_tokens(content)
|
|
122
|
+
completion_tokens += content_tokens
|
|
123
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
124
|
+
|
|
125
|
+
# Create the delta object
|
|
126
|
+
delta = ChoiceDelta(
|
|
127
|
+
content=content,
|
|
128
|
+
role="assistant"
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
# Create the choice object
|
|
132
|
+
choice = Choice(
|
|
133
|
+
index=0,
|
|
134
|
+
delta=delta,
|
|
135
|
+
finish_reason=None
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
# Create the chunk object
|
|
139
|
+
chunk = ChatCompletionChunk(
|
|
140
|
+
id=request_id,
|
|
141
|
+
choices=[choice],
|
|
142
|
+
created=created_time,
|
|
143
|
+
model=model
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
yield chunk
|
|
147
|
+
elif msg.get("event") == "done":
|
|
148
|
+
# Final chunk with finish_reason
|
|
149
|
+
delta = ChoiceDelta(
|
|
150
|
+
content=None,
|
|
151
|
+
role=None
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
choice = Choice(
|
|
155
|
+
index=0,
|
|
156
|
+
delta=delta,
|
|
157
|
+
finish_reason="stop"
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
chunk = ChatCompletionChunk(
|
|
161
|
+
id=request_id,
|
|
162
|
+
choices=[choice],
|
|
163
|
+
created=created_time,
|
|
164
|
+
model=model
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
yield chunk
|
|
168
|
+
break
|
|
169
|
+
elif msg.get("event") == "error":
|
|
170
|
+
raise RuntimeError(f"Copilot error: {msg}")
|
|
171
|
+
|
|
172
|
+
ws.close()
|
|
173
|
+
|
|
174
|
+
if not started:
|
|
175
|
+
raise RuntimeError("No response received from Copilot")
|
|
176
|
+
|
|
177
|
+
except Exception as e:
|
|
178
|
+
raise RuntimeError(f"Stream error: {e}") from e
|
|
179
|
+
finally:
|
|
180
|
+
self._client.session.proxies = original_proxies
|
|
181
|
+
|
|
182
|
+
def _create_non_stream(
|
|
183
|
+
self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None,
|
|
184
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
185
|
+
) -> ChatCompletion:
|
|
186
|
+
result = ""
|
|
187
|
+
# Pass timeout and proxies to the underlying _create_stream call
|
|
188
|
+
for chunk in self._create_stream(request_id, created_time, model, prompt_text, image, timeout=timeout, proxies=proxies):
|
|
189
|
+
if hasattr(chunk, 'choices') and chunk.choices and hasattr(chunk.choices[0], 'delta') and chunk.choices[0].delta.content:
|
|
190
|
+
result += chunk.choices[0].delta.content
|
|
191
|
+
|
|
192
|
+
# Create the message object
|
|
193
|
+
message = ChatCompletionMessage(
|
|
194
|
+
role="assistant",
|
|
195
|
+
content=result
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# Create the choice object
|
|
199
|
+
choice = Choice(
|
|
200
|
+
index=0,
|
|
201
|
+
message=message,
|
|
202
|
+
finish_reason="stop"
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
# Estimate token usage using count_tokens
|
|
206
|
+
prompt_tokens = count_tokens(prompt_text)
|
|
207
|
+
completion_tokens = count_tokens(result)
|
|
208
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
209
|
+
|
|
210
|
+
# Create usage object
|
|
211
|
+
usage = CompletionUsage(
|
|
212
|
+
prompt_tokens=prompt_tokens,
|
|
213
|
+
completion_tokens=completion_tokens,
|
|
214
|
+
total_tokens=total_tokens
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
# Create the completion object
|
|
218
|
+
completion = ChatCompletion(
|
|
219
|
+
id=request_id,
|
|
220
|
+
choices=[choice],
|
|
221
|
+
created=created_time,
|
|
222
|
+
model=model,
|
|
223
|
+
usage=usage
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
return completion
|
|
227
|
+
|
|
228
|
+
class Chat(BaseChat):
|
|
229
|
+
def __init__(self, client: 'Copilot'):
|
|
230
|
+
self.completions = Completions(client)
|
|
231
|
+
|
|
232
|
+
class Copilot(OpenAICompatibleProvider):
|
|
233
|
+
|
|
234
|
+
url = "https://copilot.microsoft.com"
|
|
235
|
+
conversation_url = f"{url}/c/api/conversations"
|
|
236
|
+
websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
|
|
237
|
+
|
|
238
|
+
AVAILABLE_MODELS = ["Copilot", "Think Deeper"]
|
|
239
|
+
|
|
240
|
+
def __init__(self, browser: str = "chrome", tools: Optional[List] = None, **kwargs):
|
|
241
|
+
self.timeout = 900
|
|
242
|
+
self.session = Session(impersonate=browser)
|
|
243
|
+
self.session.proxies = {}
|
|
244
|
+
|
|
245
|
+
# Initialize tools
|
|
246
|
+
self.available_tools = {}
|
|
247
|
+
if tools:
|
|
248
|
+
self.register_tools(tools)
|
|
249
|
+
|
|
250
|
+
# Set up the chat interface
|
|
251
|
+
self.chat = Chat(self)
|
|
252
|
+
|
|
253
|
+
@property
|
|
254
|
+
def models(self):
|
|
255
|
+
class _ModelList:
|
|
256
|
+
def list(inner_self):
|
|
257
|
+
return self.AVAILABLE_MODELS
|
|
258
|
+
return _ModelList()
|