webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +7 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +3 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +3 -2
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/TogetherAI.py +2 -2
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -58
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +6 -6
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +1 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -11
- webscout/Provider/OPENAI/toolbaz.py +14 -11
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTS/__init__.py +18 -10
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -12
- webscout/Provider/TogetherAI.py +86 -87
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -86
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +115 -9
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -12
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
|
@@ -1,22 +1,27 @@
|
|
|
1
1
|
import time
|
|
2
2
|
import uuid
|
|
3
3
|
import requests
|
|
4
|
-
import json
|
|
5
4
|
import re
|
|
5
|
+
import json
|
|
6
6
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
7
|
|
|
8
|
+
# Import base classes and utility structures
|
|
8
9
|
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
10
|
from webscout.Provider.OPENAI.utils import (
|
|
10
11
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
12
|
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
12
13
|
)
|
|
13
14
|
|
|
15
|
+
# Import LitAgent
|
|
14
16
|
from webscout.litagent import LitAgent
|
|
15
17
|
|
|
16
|
-
#
|
|
18
|
+
# Import logger
|
|
19
|
+
from webscout.Litlogger import Logger, LogLevel
|
|
20
|
+
|
|
21
|
+
logger = Logger(name="K2Think", level=LogLevel.INFO)
|
|
17
22
|
|
|
18
23
|
class Completions(BaseCompletions):
|
|
19
|
-
def __init__(self, client: '
|
|
24
|
+
def __init__(self, client: 'K2Think'):
|
|
20
25
|
self._client = client
|
|
21
26
|
|
|
22
27
|
def create(
|
|
@@ -29,77 +34,111 @@ class Completions(BaseCompletions):
|
|
|
29
34
|
temperature: Optional[float] = None,
|
|
30
35
|
top_p: Optional[float] = None,
|
|
31
36
|
timeout: Optional[int] = None,
|
|
32
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
33
37
|
**kwargs: Any
|
|
34
38
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
35
39
|
"""
|
|
36
40
|
Creates a model response for the given chat conversation.
|
|
37
41
|
Mimics openai.chat.completions.create
|
|
38
42
|
"""
|
|
39
|
-
# Prepare the payload for
|
|
43
|
+
# Prepare the payload for K2Think API
|
|
40
44
|
payload = {
|
|
45
|
+
"stream": stream,
|
|
46
|
+
"model": model,
|
|
41
47
|
"messages": messages,
|
|
42
|
-
"
|
|
48
|
+
"params": {}
|
|
43
49
|
}
|
|
50
|
+
|
|
51
|
+
# Add optional parameters if provided
|
|
44
52
|
if max_tokens is not None and max_tokens > 0:
|
|
45
|
-
payload["max_tokens"] = max_tokens
|
|
53
|
+
payload["params"]["max_tokens"] = max_tokens
|
|
54
|
+
|
|
46
55
|
if temperature is not None:
|
|
47
|
-
payload["temperature"] = temperature
|
|
56
|
+
payload["params"]["temperature"] = temperature
|
|
57
|
+
|
|
48
58
|
if top_p is not None:
|
|
49
|
-
payload["top_p"] = top_p
|
|
59
|
+
payload["params"]["top_p"] = top_p
|
|
60
|
+
|
|
61
|
+
# Add any additional parameters
|
|
50
62
|
payload.update(kwargs)
|
|
51
63
|
|
|
52
64
|
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
53
65
|
created_time = int(time.time())
|
|
54
66
|
|
|
55
67
|
if stream:
|
|
56
|
-
return self._create_stream(request_id, created_time, model, payload, timeout
|
|
68
|
+
return self._create_stream(request_id, created_time, model, payload, timeout)
|
|
57
69
|
else:
|
|
58
|
-
return self._create_non_stream(request_id, created_time, model, payload, timeout
|
|
70
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout)
|
|
59
71
|
|
|
60
72
|
def _create_stream(
|
|
61
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None
|
|
73
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None
|
|
62
74
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
63
75
|
try:
|
|
64
76
|
response = self._client.session.post(
|
|
65
|
-
self._client.
|
|
77
|
+
self._client.base_url,
|
|
66
78
|
headers=self._client.headers,
|
|
67
79
|
json=payload,
|
|
68
80
|
stream=True,
|
|
69
|
-
timeout=timeout or self._client.timeout
|
|
70
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
81
|
+
timeout=timeout or self._client.timeout
|
|
71
82
|
)
|
|
83
|
+
|
|
84
|
+
# Handle non-200 responses
|
|
72
85
|
if not response.ok:
|
|
73
86
|
raise IOError(
|
|
74
87
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
75
88
|
)
|
|
76
89
|
|
|
90
|
+
# Use count_tokens for prompt tokens
|
|
77
91
|
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
78
92
|
completion_tokens = 0
|
|
79
93
|
total_tokens = 0
|
|
94
|
+
seen_content = set() # Track seen content to avoid duplicates
|
|
80
95
|
|
|
81
96
|
for line in response.iter_lines():
|
|
82
97
|
if line:
|
|
83
98
|
decoded_line = line.decode('utf-8').strip()
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
99
|
+
|
|
100
|
+
# Extract content using regex patterns (similar to x0gpt)
|
|
101
|
+
extract_regexes = [
|
|
102
|
+
r'<answer>([\s\S]*?)<\/answer>',
|
|
103
|
+
]
|
|
104
|
+
|
|
105
|
+
content = ""
|
|
106
|
+
for regex in extract_regexes:
|
|
107
|
+
match = re.search(regex, decoded_line)
|
|
108
|
+
if match:
|
|
109
|
+
content = match.group(1)
|
|
110
|
+
break
|
|
111
|
+
|
|
112
|
+
if content:
|
|
113
|
+
# Format the content
|
|
88
114
|
content = self._client.format_text(content)
|
|
115
|
+
|
|
116
|
+
# Skip if we've already seen this exact content
|
|
117
|
+
if content in seen_content:
|
|
118
|
+
continue
|
|
119
|
+
|
|
120
|
+
seen_content.add(content)
|
|
121
|
+
|
|
122
|
+
# Update token counts using count_tokens
|
|
89
123
|
completion_tokens += count_tokens(content)
|
|
90
124
|
total_tokens = prompt_tokens + completion_tokens
|
|
91
125
|
|
|
126
|
+
# Create the delta object
|
|
92
127
|
delta = ChoiceDelta(
|
|
93
128
|
content=content,
|
|
94
129
|
role="assistant",
|
|
95
130
|
tool_calls=None
|
|
96
131
|
)
|
|
132
|
+
|
|
133
|
+
# Create the choice object
|
|
97
134
|
choice = Choice(
|
|
98
135
|
index=0,
|
|
99
136
|
delta=delta,
|
|
100
137
|
finish_reason=None,
|
|
101
138
|
logprobs=None
|
|
102
139
|
)
|
|
140
|
+
|
|
141
|
+
# Create the chunk object
|
|
103
142
|
chunk = ChatCompletionChunk(
|
|
104
143
|
id=request_id,
|
|
105
144
|
choices=[choice],
|
|
@@ -107,12 +146,16 @@ class Completions(BaseCompletions):
|
|
|
107
146
|
model=model,
|
|
108
147
|
system_fingerprint=None
|
|
109
148
|
)
|
|
149
|
+
|
|
150
|
+
# Set usage directly on the chunk object
|
|
110
151
|
chunk.usage = {
|
|
111
152
|
"prompt_tokens": prompt_tokens,
|
|
112
153
|
"completion_tokens": completion_tokens,
|
|
113
154
|
"total_tokens": total_tokens,
|
|
114
155
|
"estimated_cost": None
|
|
115
156
|
}
|
|
157
|
+
|
|
158
|
+
# Return the chunk object with usage information
|
|
116
159
|
yield chunk
|
|
117
160
|
|
|
118
161
|
# Final chunk with finish_reason="stop"
|
|
@@ -121,12 +164,14 @@ class Completions(BaseCompletions):
|
|
|
121
164
|
role=None,
|
|
122
165
|
tool_calls=None
|
|
123
166
|
)
|
|
167
|
+
|
|
124
168
|
choice = Choice(
|
|
125
169
|
index=0,
|
|
126
170
|
delta=delta,
|
|
127
171
|
finish_reason="stop",
|
|
128
172
|
logprobs=None
|
|
129
173
|
)
|
|
174
|
+
|
|
130
175
|
chunk = ChatCompletionChunk(
|
|
131
176
|
id=request_id,
|
|
132
177
|
choices=[choice],
|
|
@@ -134,63 +179,89 @@ class Completions(BaseCompletions):
|
|
|
134
179
|
model=model,
|
|
135
180
|
system_fingerprint=None
|
|
136
181
|
)
|
|
182
|
+
|
|
183
|
+
# Set usage directly on the chunk object
|
|
137
184
|
chunk.usage = {
|
|
138
185
|
"prompt_tokens": prompt_tokens,
|
|
139
186
|
"completion_tokens": completion_tokens,
|
|
140
187
|
"total_tokens": total_tokens,
|
|
141
188
|
"estimated_cost": None
|
|
142
189
|
}
|
|
190
|
+
|
|
143
191
|
yield chunk
|
|
144
192
|
|
|
145
193
|
except Exception as e:
|
|
146
|
-
print(f"Error during
|
|
147
|
-
raise IOError(f"
|
|
194
|
+
print(f"Error during K2Think stream request: {e}")
|
|
195
|
+
raise IOError(f"K2Think request failed: {e}") from e
|
|
148
196
|
|
|
149
197
|
def _create_non_stream(
|
|
150
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None
|
|
198
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None
|
|
151
199
|
) -> ChatCompletion:
|
|
152
200
|
try:
|
|
153
201
|
response = self._client.session.post(
|
|
154
|
-
self._client.
|
|
202
|
+
self._client.base_url,
|
|
155
203
|
headers=self._client.headers,
|
|
156
204
|
json=payload,
|
|
157
205
|
stream=True,
|
|
158
|
-
timeout=timeout or self._client.timeout
|
|
159
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
206
|
+
timeout=timeout or self._client.timeout
|
|
160
207
|
)
|
|
208
|
+
|
|
209
|
+
# Handle non-200 responses
|
|
161
210
|
if not response.ok:
|
|
162
211
|
raise IOError(
|
|
163
212
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
164
213
|
)
|
|
165
214
|
|
|
215
|
+
# Collect the full response
|
|
166
216
|
full_text = ""
|
|
217
|
+
seen_content_parts = set() # Track seen content parts to avoid duplicates
|
|
218
|
+
|
|
167
219
|
for line in response.iter_lines(decode_unicode=True):
|
|
168
220
|
if line:
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
221
|
+
# Extract content using regex patterns
|
|
222
|
+
extract_regexes = [
|
|
223
|
+
r'<answer>([\s\S]*?)<\/answer>',
|
|
224
|
+
]
|
|
225
|
+
|
|
226
|
+
for regex in extract_regexes:
|
|
227
|
+
match = re.search(regex, line)
|
|
228
|
+
if match:
|
|
229
|
+
content = match.group(1)
|
|
230
|
+
# Only add if we haven't seen this exact content before
|
|
231
|
+
if content not in seen_content_parts:
|
|
232
|
+
seen_content_parts.add(content)
|
|
233
|
+
full_text += content
|
|
234
|
+
break
|
|
235
|
+
|
|
236
|
+
# Format the text
|
|
174
237
|
full_text = self._client.format_text(full_text)
|
|
175
238
|
|
|
239
|
+
# Use count_tokens for accurate token counts
|
|
176
240
|
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
177
241
|
completion_tokens = count_tokens(full_text)
|
|
178
242
|
total_tokens = prompt_tokens + completion_tokens
|
|
179
243
|
|
|
244
|
+
# Create the message object
|
|
180
245
|
message = ChatCompletionMessage(
|
|
181
246
|
role="assistant",
|
|
182
247
|
content=full_text
|
|
183
248
|
)
|
|
249
|
+
|
|
250
|
+
# Create the choice object
|
|
184
251
|
choice = Choice(
|
|
185
252
|
index=0,
|
|
186
253
|
message=message,
|
|
187
254
|
finish_reason="stop"
|
|
188
255
|
)
|
|
256
|
+
|
|
257
|
+
# Create the usage object
|
|
189
258
|
usage = CompletionUsage(
|
|
190
259
|
prompt_tokens=prompt_tokens,
|
|
191
260
|
completion_tokens=completion_tokens,
|
|
192
261
|
total_tokens=total_tokens
|
|
193
262
|
)
|
|
263
|
+
|
|
264
|
+
# Create the completion object
|
|
194
265
|
completion = ChatCompletion(
|
|
195
266
|
id=request_id,
|
|
196
267
|
choices=[choice],
|
|
@@ -198,83 +269,97 @@ class Completions(BaseCompletions):
|
|
|
198
269
|
model=model,
|
|
199
270
|
usage=usage,
|
|
200
271
|
)
|
|
272
|
+
|
|
201
273
|
return completion
|
|
202
274
|
|
|
203
275
|
except Exception as e:
|
|
204
|
-
print(f"Error during
|
|
205
|
-
raise IOError(f"
|
|
276
|
+
print(f"Error during K2Think non-stream request: {e}")
|
|
277
|
+
raise IOError(f"K2Think request failed: {e}") from e
|
|
206
278
|
|
|
207
279
|
class Chat(BaseChat):
|
|
208
|
-
def __init__(self, client: '
|
|
280
|
+
def __init__(self, client: 'K2Think'):
|
|
209
281
|
self.completions = Completions(client)
|
|
210
282
|
|
|
211
|
-
class
|
|
283
|
+
class Models:
|
|
284
|
+
"""Models class to mimic OpenAI models.list()"""
|
|
285
|
+
def __init__(self):
|
|
286
|
+
self.available_models = [
|
|
287
|
+
"MBZUAI-IFM/K2-Think",
|
|
288
|
+
]
|
|
289
|
+
|
|
290
|
+
def list(self):
|
|
291
|
+
"""Return list of available models"""
|
|
292
|
+
return [
|
|
293
|
+
{
|
|
294
|
+
"id": model,
|
|
295
|
+
"object": "model",
|
|
296
|
+
"created": 0,
|
|
297
|
+
"owned_by": "k2think"
|
|
298
|
+
}
|
|
299
|
+
for model in self.available_models
|
|
300
|
+
]
|
|
301
|
+
|
|
302
|
+
class K2Think(OpenAICompatibleProvider):
|
|
212
303
|
"""
|
|
213
|
-
OpenAI-compatible client for
|
|
304
|
+
OpenAI-compatible client for K2Think API.
|
|
214
305
|
|
|
215
306
|
Usage:
|
|
216
|
-
client =
|
|
307
|
+
client = K2Think()
|
|
217
308
|
response = client.chat.completions.create(
|
|
218
|
-
model="
|
|
309
|
+
model="MBZUAI-IFM/K2-Think",
|
|
219
310
|
messages=[{"role": "user", "content": "Hello!"}]
|
|
220
311
|
)
|
|
221
312
|
"""
|
|
222
313
|
|
|
223
|
-
AVAILABLE_MODELS = [
|
|
224
|
-
"deepseek-r1",
|
|
225
|
-
"deepseek-v3",
|
|
226
|
-
"uncensored-r1-32b",
|
|
227
|
-
"o3-pro",
|
|
228
|
-
"o4-mini",
|
|
229
|
-
"o3",
|
|
230
|
-
"gpt-4.5-preview",
|
|
231
|
-
"gpt-4.1",
|
|
232
|
-
"gpt-4.1-mini",
|
|
233
|
-
"gpt-4.1-nano",
|
|
234
|
-
"gpt-4o",
|
|
235
|
-
"gpt-4o-mini",
|
|
236
|
-
"gpt-4o-search-preview",
|
|
237
|
-
"gpt-4o-mini-search-preview",
|
|
238
|
-
"gpt-4-turbo"
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
]
|
|
314
|
+
AVAILABLE_MODELS = ["MBZUAI-IFM/K2-Think"]
|
|
242
315
|
|
|
243
316
|
def __init__(
|
|
244
317
|
self,
|
|
245
|
-
browser: str = "chrome"
|
|
318
|
+
browser: str = "chrome",
|
|
319
|
+
proxies: Optional[dict] = None
|
|
246
320
|
):
|
|
247
321
|
"""
|
|
248
|
-
Initialize the
|
|
322
|
+
Initialize the K2Think client.
|
|
249
323
|
|
|
250
324
|
Args:
|
|
251
325
|
browser: Browser to emulate in user agent
|
|
326
|
+
proxies: Optional proxy configuration dictionary
|
|
252
327
|
"""
|
|
253
|
-
|
|
254
|
-
self.
|
|
255
|
-
self.
|
|
328
|
+
super().__init__(proxies=proxies)
|
|
329
|
+
self.timeout = 30
|
|
330
|
+
self.base_url = "https://www.k2think.ai/api/guest/chat/completions"
|
|
256
331
|
|
|
332
|
+
# Initialize LitAgent for user agent generation
|
|
257
333
|
agent = LitAgent()
|
|
258
334
|
self.fingerprint = agent.generate_fingerprint(browser)
|
|
259
335
|
|
|
260
336
|
self.headers = {
|
|
261
|
-
"
|
|
262
|
-
"
|
|
263
|
-
"
|
|
264
|
-
"
|
|
265
|
-
"
|
|
266
|
-
"
|
|
267
|
-
"
|
|
337
|
+
"Accept": "*/*",
|
|
338
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
339
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
340
|
+
"Content-Type": "application/json",
|
|
341
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
342
|
+
"Origin": "https://www.k2think.ai",
|
|
343
|
+
"Referer": "https://www.k2think.ai/guest",
|
|
344
|
+
"Sec-Fetch-Dest": "empty",
|
|
345
|
+
"Sec-Fetch-Mode": "cors",
|
|
346
|
+
"Sec-Fetch-Site": "same-origin",
|
|
347
|
+
"Sec-Ch-Ua": '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
|
|
348
|
+
"Sec-Ch-Ua-Mobile": "?0",
|
|
349
|
+
"Sec-Ch-Ua-Platform": f'"{self.fingerprint["platform"]}"',
|
|
350
|
+
"Priority": "u=1, i"
|
|
268
351
|
}
|
|
269
352
|
|
|
270
353
|
self.session.headers.update(self.headers)
|
|
354
|
+
|
|
355
|
+
# Initialize the chat interface
|
|
271
356
|
self.chat = Chat(self)
|
|
272
357
|
|
|
273
358
|
@property
|
|
274
359
|
def models(self):
|
|
275
360
|
class _ModelList:
|
|
276
361
|
def list(inner_self):
|
|
277
|
-
return
|
|
362
|
+
return K2Think.AVAILABLE_MODELS
|
|
278
363
|
return _ModelList()
|
|
279
364
|
|
|
280
365
|
def format_text(self, text: str) -> str:
|
|
@@ -287,43 +372,61 @@ class MonoChat(OpenAICompatibleProvider):
|
|
|
287
372
|
Returns:
|
|
288
373
|
Formatted text
|
|
289
374
|
"""
|
|
375
|
+
# Use a more comprehensive approach to handle all escape sequences
|
|
290
376
|
try:
|
|
377
|
+
# First handle double backslashes to avoid issues
|
|
291
378
|
text = text.replace('\\\\', '\\')
|
|
379
|
+
|
|
380
|
+
# Handle common escape sequences
|
|
292
381
|
text = text.replace('\\n', '\n')
|
|
293
382
|
text = text.replace('\\r', '\r')
|
|
294
383
|
text = text.replace('\\t', '\t')
|
|
295
384
|
text = text.replace('\\"', '"')
|
|
296
385
|
text = text.replace("\\'", "'")
|
|
386
|
+
|
|
387
|
+
# Handle any remaining escape sequences using JSON decoding
|
|
388
|
+
# This is a fallback in case there are other escape sequences
|
|
297
389
|
try:
|
|
390
|
+
# Add quotes to make it a valid JSON string
|
|
298
391
|
json_str = f'"{text}"'
|
|
392
|
+
# Use json module to decode all escape sequences
|
|
299
393
|
decoded = json.loads(json_str)
|
|
300
394
|
return decoded
|
|
301
395
|
except json.JSONDecodeError:
|
|
396
|
+
# If JSON decoding fails, return the text with the replacements we've already done
|
|
302
397
|
return text
|
|
303
398
|
except Exception as e:
|
|
399
|
+
# If any error occurs, return the original text
|
|
304
400
|
print(f"Warning: Error formatting text: {e}")
|
|
305
401
|
return text
|
|
306
402
|
|
|
307
403
|
def convert_model_name(self, model: str) -> str:
|
|
308
404
|
"""
|
|
309
|
-
Convert model names to ones supported by
|
|
405
|
+
Convert model names to ones supported by K2Think.
|
|
310
406
|
|
|
311
407
|
Args:
|
|
312
408
|
model: Model name to convert
|
|
313
409
|
|
|
314
410
|
Returns:
|
|
315
|
-
|
|
411
|
+
K2Think model name
|
|
316
412
|
"""
|
|
413
|
+
# K2Think doesn't actually use model names, but we'll keep this for compatibility
|
|
317
414
|
return model
|
|
318
415
|
|
|
416
|
+
# Convenience function for backward compatibility
|
|
417
|
+
def K2ThinkClient(**kwargs):
|
|
418
|
+
"""Create a new K2Think client instance"""
|
|
419
|
+
return K2Think(**kwargs)
|
|
420
|
+
|
|
319
421
|
if __name__ == "__main__":
|
|
320
|
-
|
|
422
|
+
from rich import print
|
|
423
|
+
client = K2Think()
|
|
321
424
|
response = client.chat.completions.create(
|
|
322
|
-
model="
|
|
323
|
-
messages=[{"role": "user", "content": "
|
|
324
|
-
max_tokens=1000,
|
|
425
|
+
model="MBZUAI-IFM/K2-Think",
|
|
426
|
+
messages=[{"role": "user", "content": "Hello!"}],
|
|
325
427
|
stream=True
|
|
326
428
|
)
|
|
429
|
+
|
|
327
430
|
for chunk in response:
|
|
328
|
-
if chunk.choices
|
|
329
|
-
print(chunk.choices[0].delta.content, end=
|
|
431
|
+
if chunk.choices[0].delta.content:
|
|
432
|
+
print(chunk.choices[0].delta.content, end='', flush=True)
|
|
@@ -120,12 +120,9 @@ class NEMOTRON(OpenAICompatibleProvider):
|
|
|
120
120
|
]
|
|
121
121
|
|
|
122
122
|
API_BASE_URL = "https://nemotron.one/api/chat"
|
|
123
|
-
def __init__(
|
|
124
|
-
|
|
125
|
-
):
|
|
126
|
-
self.session = requests.Session()
|
|
123
|
+
def __init__(self, proxies: Optional[dict] = None):
|
|
124
|
+
super().__init__(proxies=proxies)
|
|
127
125
|
self.timeout = 30
|
|
128
|
-
self.session.proxies = {}
|
|
129
126
|
agent = LitAgent()
|
|
130
127
|
user_agent = agent.random()
|
|
131
128
|
self.base_headers = {
|
|
@@ -241,4 +238,4 @@ class NEMOTRON(OpenAICompatibleProvider):
|
|
|
241
238
|
class _ModelList:
|
|
242
239
|
def list(inner_self):
|
|
243
240
|
return type(self).AVAILABLE_MODELS
|
|
244
|
-
return _ModelList()
|
|
241
|
+
return _ModelList()
|
webscout/Provider/OPENAI/PI.py
CHANGED
|
@@ -289,12 +289,10 @@ class PiAI(OpenAICompatibleProvider):
|
|
|
289
289
|
proxies: Proxy configuration
|
|
290
290
|
**kwargs: Additional arguments
|
|
291
291
|
"""
|
|
292
|
+
super().__init__(proxies=proxies)
|
|
292
293
|
self.timeout = timeout
|
|
293
294
|
self.conversation_id = None
|
|
294
295
|
|
|
295
|
-
# Initialize curl_cffi Session
|
|
296
|
-
self.session = Session()
|
|
297
|
-
|
|
298
296
|
# Setup URLs
|
|
299
297
|
self.primary_url = 'https://pi.ai/api/chat'
|
|
300
298
|
self.fallback_url = 'https://pi.ai/api/v2/chat'
|
|
@@ -320,6 +318,9 @@ class PiAI(OpenAICompatibleProvider):
|
|
|
320
318
|
'__cf_bm': uuid4().hex
|
|
321
319
|
}
|
|
322
320
|
|
|
321
|
+
# Replace the base session with curl_cffi Session
|
|
322
|
+
self.session = Session()
|
|
323
|
+
|
|
323
324
|
# Configure session
|
|
324
325
|
self.session.headers.update(self.headers)
|
|
325
326
|
if proxies:
|
|
@@ -424,4 +425,4 @@ if __name__ == "__main__":
|
|
|
424
425
|
)
|
|
425
426
|
|
|
426
427
|
print(response.choices[0].message.content)
|
|
427
|
-
print(f"Usage: {response.usage}")
|
|
428
|
+
print(f"Usage: {response.usage}")
|
|
@@ -251,10 +251,9 @@ class Qwen3(OpenAICompatibleProvider):
|
|
|
251
251
|
"qwen-3-0.6b": "qwen3-0.6b"
|
|
252
252
|
}
|
|
253
253
|
|
|
254
|
-
def __init__(self):
|
|
254
|
+
def __init__(self, proxies: Optional[Dict[str, str]] = None):
|
|
255
|
+
super().__init__(proxies=proxies)
|
|
255
256
|
self.timeout = 30
|
|
256
|
-
self.session = requests.Session()
|
|
257
|
-
self.session.proxies = {}
|
|
258
257
|
self.headers = {
|
|
259
258
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0',
|
|
260
259
|
'Accept': '*/*',
|
|
@@ -267,11 +267,11 @@ class TogetherAI(OpenAICompatibleProvider):
|
|
|
267
267
|
"zai-org/GLM-4.5-Air-FP8"
|
|
268
268
|
]
|
|
269
269
|
|
|
270
|
-
def __init__(self, browser: str = "chrome"):
|
|
270
|
+
def __init__(self, browser: str = "chrome", proxies: Optional[Dict[str, str]] = None):
|
|
271
|
+
super().__init__(proxies=proxies)
|
|
271
272
|
self.timeout = 60
|
|
272
273
|
self.api_endpoint = "https://api.together.xyz/v1/chat/completions"
|
|
273
274
|
self.activation_endpoint = "https://www.codegeneration.ai/activate-v2"
|
|
274
|
-
self.session = requests.Session()
|
|
275
275
|
self.headers = LitAgent().generate_fingerprint(browser=browser)
|
|
276
276
|
self.session.headers.update(self.headers)
|
|
277
277
|
self.chat = Chat(self)
|
|
@@ -394,13 +394,12 @@ class TwoAI(OpenAICompatibleProvider):
|
|
|
394
394
|
raise RuntimeError("Failed to get API key from confirmation email")
|
|
395
395
|
return api_key
|
|
396
396
|
|
|
397
|
-
def __init__(self, browser: str = "chrome"):
|
|
397
|
+
def __init__(self, browser: str = "chrome", proxies: Optional[Dict[str, str]] = None):
|
|
398
|
+
super().__init__(proxies=proxies)
|
|
398
399
|
api_key = self.get_cached_api_key()
|
|
399
400
|
self.timeout = 30
|
|
400
401
|
self.base_url = "https://api.two.ai/v2/chat/completions"
|
|
401
402
|
self.api_key = api_key
|
|
402
|
-
self.session = Session()
|
|
403
|
-
self.session.proxies = {}
|
|
404
403
|
|
|
405
404
|
headers: Dict[str, str] = {
|
|
406
405
|
"Content-Type": "application/json",
|
|
@@ -464,4 +463,4 @@ if __name__ == "__main__":
|
|
|
464
463
|
stream=True
|
|
465
464
|
)
|
|
466
465
|
for chunk in resp:
|
|
467
|
-
print(chunk, end="")
|
|
466
|
+
print(chunk, end="")
|
|
@@ -1,59 +1,18 @@
|
|
|
1
1
|
# This file marks the directory as a Python package.
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
from
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
from .opkfc import *
|
|
20
|
-
from .chatgpt import *
|
|
21
|
-
from .textpollinations import *
|
|
22
|
-
from .typefully import * # Add TypefullyAI
|
|
23
|
-
from .e2b import *
|
|
24
|
-
from .multichat import * # Add MultiChatAI
|
|
25
|
-
from .ai4chat import * # Add AI4Chat
|
|
26
|
-
from .mcpcore import *
|
|
27
|
-
from .flowith import *
|
|
28
|
-
from .chatsandbox import *
|
|
29
|
-
from .flowith import *
|
|
30
|
-
from .Cloudflare import *
|
|
31
|
-
from .NEMOTRON import *
|
|
32
|
-
from .BLACKBOXAI import *
|
|
33
|
-
from .copilot import * # Add Microsoft Copilot
|
|
34
|
-
from .TwoAI import *
|
|
35
|
-
from .oivscode import * # Add OnRender provider
|
|
36
|
-
from .Qwen3 import *
|
|
37
|
-
from .FalconH1 import *
|
|
38
|
-
from .PI import * # Add PI.ai provider
|
|
39
|
-
from .TogetherAI import * # Add TogetherAI provider
|
|
40
|
-
from .xenai import * # Add XenAI provider
|
|
41
|
-
from .GeminiProxy import * # Add GeminiProxy provider
|
|
42
|
-
from .friendli import *
|
|
43
|
-
from .monochat import *
|
|
44
|
-
from .MiniMax import * # Add MiniMaxAI provider
|
|
45
|
-
from .qodo import * # Add QodoAI provider
|
|
46
|
-
from .kimi import * # Add Kimi provider
|
|
47
|
-
from .gptoss import * # Add GPT-OSS provider
|
|
48
|
-
from .refact import * # Add Refact provider
|
|
49
|
-
# Export auto-proxy functionality
|
|
50
|
-
from .autoproxy import (
|
|
51
|
-
get_auto_proxy,
|
|
52
|
-
get_proxy_dict,
|
|
53
|
-
get_working_proxy,
|
|
54
|
-
test_proxy,
|
|
55
|
-
get_proxy_stats,
|
|
56
|
-
refresh_proxy_cache,
|
|
57
|
-
set_proxy_cache_duration,
|
|
58
|
-
ProxyAutoMeta
|
|
59
|
-
)
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import importlib
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
# Get current directory
|
|
8
|
+
current_dir = Path(__file__).parent
|
|
9
|
+
|
|
10
|
+
# Auto-import all .py files (except __init__.py)
|
|
11
|
+
for file_path in current_dir.glob("*.py"):
|
|
12
|
+
if file_path.name != "__init__.py":
|
|
13
|
+
module_name = file_path.stem
|
|
14
|
+
try:
|
|
15
|
+
module = importlib.import_module(f".{module_name}", package=__name__)
|
|
16
|
+
globals().update(vars(module))
|
|
17
|
+
except ImportError:
|
|
18
|
+
pass # Skip files that can't be imported
|