webscout 8.2.9__py3-none-any.whl → 8.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +6 -6
- webscout/AIbase.py +61 -1
- webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
- webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
- webscout/Extra/YTToolkit/ytapi/video.py +10 -10
- webscout/Extra/autocoder/autocoder_utiles.py +1 -1
- webscout/Litlogger/formats.py +9 -0
- webscout/Litlogger/handlers.py +18 -0
- webscout/Litlogger/logger.py +43 -1
- webscout/Provider/AISEARCH/scira_search.py +3 -2
- webscout/Provider/Blackboxai.py +2 -0
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +1 -1
- webscout/Provider/HeckAI.py +1 -1
- webscout/Provider/LambdaChat.py +8 -1
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +396 -113
- webscout/Provider/OPENAI/Cloudflare.py +31 -14
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +29 -13
- webscout/Provider/OPENAI/NEMOTRON.py +26 -14
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +161 -140
- webscout/Provider/OPENAI/README.md +3 -0
- webscout/Provider/OPENAI/TogetherAI.py +355 -0
- webscout/Provider/OPENAI/TwoAI.py +29 -12
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +33 -23
- webscout/Provider/OPENAI/api.py +375 -24
- webscout/Provider/OPENAI/autoproxy.py +39 -0
- webscout/Provider/OPENAI/base.py +91 -12
- webscout/Provider/OPENAI/c4ai.py +31 -10
- webscout/Provider/OPENAI/chatgpt.py +56 -24
- webscout/Provider/OPENAI/chatgptclone.py +46 -16
- webscout/Provider/OPENAI/chatsandbox.py +7 -3
- webscout/Provider/OPENAI/copilot.py +26 -10
- webscout/Provider/OPENAI/deepinfra.py +29 -12
- webscout/Provider/OPENAI/e2b.py +358 -158
- webscout/Provider/OPENAI/exaai.py +13 -10
- webscout/Provider/OPENAI/exachat.py +10 -6
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +10 -6
- webscout/Provider/OPENAI/glider.py +10 -6
- webscout/Provider/OPENAI/heckai.py +11 -8
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +10 -7
- webscout/Provider/OPENAI/multichat.py +3 -1
- webscout/Provider/OPENAI/netwrck.py +10 -6
- webscout/Provider/OPENAI/oivscode.py +12 -9
- webscout/Provider/OPENAI/opkfc.py +31 -8
- webscout/Provider/OPENAI/scirachat.py +17 -10
- webscout/Provider/OPENAI/sonus.py +10 -6
- webscout/Provider/OPENAI/standardinput.py +18 -9
- webscout/Provider/OPENAI/textpollinations.py +14 -7
- webscout/Provider/OPENAI/toolbaz.py +16 -11
- webscout/Provider/OPENAI/typefully.py +14 -7
- webscout/Provider/OPENAI/typegpt.py +10 -6
- webscout/Provider/OPENAI/uncovrAI.py +22 -8
- webscout/Provider/OPENAI/venice.py +10 -6
- webscout/Provider/OPENAI/writecream.py +13 -10
- webscout/Provider/OPENAI/x0gpt.py +11 -9
- webscout/Provider/OPENAI/yep.py +12 -10
- webscout/Provider/PI.py +2 -1
- webscout/Provider/STT/__init__.py +3 -0
- webscout/Provider/STT/base.py +281 -0
- webscout/Provider/STT/elevenlabs.py +265 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +399 -365
- webscout/Provider/TTI/base.py +74 -2
- webscout/Provider/TTI/fastflux.py +63 -30
- webscout/Provider/TTI/gpt1image.py +149 -0
- webscout/Provider/TTI/imagen.py +196 -0
- webscout/Provider/TTI/magicstudio.py +60 -29
- webscout/Provider/TTI/piclumen.py +43 -32
- webscout/Provider/TTI/pixelmuse.py +232 -225
- webscout/Provider/TTI/pollinations.py +43 -32
- webscout/Provider/TTI/together.py +287 -0
- webscout/Provider/TTI/utils.py +2 -1
- webscout/Provider/TTS/README.md +1 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/freetts.py +140 -0
- webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
- webscout/Provider/__init__.py +3 -2
- webscout/Provider/granite.py +41 -6
- webscout/Provider/oivscode.py +37 -37
- webscout/Provider/scira_chat.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/toolbaz.py +0 -1
- webscout/litagent/Readme.md +12 -3
- webscout/litagent/agent.py +99 -62
- webscout/version.py +1 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/METADATA +2 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/RECORD +98 -87
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/WHEEL +1 -1
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/artbit.py +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
2
|
+
|
|
3
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
4
|
+
from webscout.Provider.OPENAI.utils import (
|
|
5
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
6
|
+
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
import requests
|
|
10
|
+
import uuid
|
|
11
|
+
import time
|
|
12
|
+
import json
|
|
13
|
+
from webscout.litagent import LitAgent
|
|
14
|
+
|
|
15
|
+
class Completions(BaseCompletions):
|
|
16
|
+
def __init__(self, client: 'TogetherAI'):
|
|
17
|
+
self._client = client
|
|
18
|
+
|
|
19
|
+
def create(
|
|
20
|
+
self,
|
|
21
|
+
*,
|
|
22
|
+
model: str,
|
|
23
|
+
messages: List[Dict[str, str]],
|
|
24
|
+
max_tokens: Optional[int] = None,
|
|
25
|
+
stream: bool = False,
|
|
26
|
+
temperature: Optional[float] = None,
|
|
27
|
+
top_p: Optional[float] = None,
|
|
28
|
+
timeout: Optional[int] = None,
|
|
29
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
30
|
+
stop: Optional[Union[str, List[str]]] = None,
|
|
31
|
+
**kwargs: Any
|
|
32
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
33
|
+
"""
|
|
34
|
+
Creates a model response for the given chat conversation.
|
|
35
|
+
Mimics openai.chat.completions.create
|
|
36
|
+
"""
|
|
37
|
+
# Get API key if not already set
|
|
38
|
+
if not self._client.headers.get("Authorization"):
|
|
39
|
+
api_key = self._client.get_activation_key()
|
|
40
|
+
self._client.headers["Authorization"] = f"Bearer {api_key}"
|
|
41
|
+
self._client.session.headers.update(self._client.headers)
|
|
42
|
+
|
|
43
|
+
model_name = self._client.convert_model_name(model)
|
|
44
|
+
payload = {
|
|
45
|
+
"model": model_name,
|
|
46
|
+
"messages": messages,
|
|
47
|
+
"stream": stream,
|
|
48
|
+
}
|
|
49
|
+
if max_tokens is not None:
|
|
50
|
+
payload["max_tokens"] = max_tokens
|
|
51
|
+
if temperature is not None:
|
|
52
|
+
payload["temperature"] = temperature
|
|
53
|
+
if top_p is not None:
|
|
54
|
+
payload["top_p"] = top_p
|
|
55
|
+
if stop is not None:
|
|
56
|
+
payload["stop"] = stop
|
|
57
|
+
payload.update(kwargs)
|
|
58
|
+
|
|
59
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
60
|
+
created_time = int(time.time())
|
|
61
|
+
|
|
62
|
+
if stream:
|
|
63
|
+
return self._create_stream(request_id, created_time, model_name, payload, timeout, proxies)
|
|
64
|
+
else:
|
|
65
|
+
return self._create_non_stream(request_id, created_time, model_name, payload, timeout, proxies)
|
|
66
|
+
|
|
67
|
+
def _create_stream(
|
|
68
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
69
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
70
|
+
try:
|
|
71
|
+
response = self._client.session.post(
|
|
72
|
+
self._client.api_endpoint,
|
|
73
|
+
headers=self._client.headers,
|
|
74
|
+
json=payload,
|
|
75
|
+
stream=True,
|
|
76
|
+
timeout=timeout or self._client.timeout,
|
|
77
|
+
proxies=proxies
|
|
78
|
+
)
|
|
79
|
+
response.raise_for_status()
|
|
80
|
+
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
81
|
+
completion_tokens = 0
|
|
82
|
+
total_tokens = prompt_tokens
|
|
83
|
+
|
|
84
|
+
for line in response.iter_lines():
|
|
85
|
+
if line:
|
|
86
|
+
line = line.decode('utf-8')
|
|
87
|
+
if line.startswith('data: '):
|
|
88
|
+
line = line[6:]
|
|
89
|
+
if line.strip() == '[DONE]':
|
|
90
|
+
break
|
|
91
|
+
try:
|
|
92
|
+
chunk_data = json.loads(line)
|
|
93
|
+
if 'choices' in chunk_data and chunk_data['choices']:
|
|
94
|
+
delta = chunk_data['choices'][0].get('delta', {})
|
|
95
|
+
content = delta.get('content')
|
|
96
|
+
if content:
|
|
97
|
+
completion_tokens += count_tokens(content)
|
|
98
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
99
|
+
choice_delta = ChoiceDelta(
|
|
100
|
+
content=content,
|
|
101
|
+
role=delta.get('role', 'assistant'),
|
|
102
|
+
tool_calls=delta.get('tool_calls')
|
|
103
|
+
)
|
|
104
|
+
choice = Choice(
|
|
105
|
+
index=0,
|
|
106
|
+
delta=choice_delta,
|
|
107
|
+
finish_reason=None,
|
|
108
|
+
logprobs=None
|
|
109
|
+
)
|
|
110
|
+
chunk = ChatCompletionChunk(
|
|
111
|
+
id=request_id,
|
|
112
|
+
choices=[choice],
|
|
113
|
+
created=created_time,
|
|
114
|
+
model=model
|
|
115
|
+
)
|
|
116
|
+
chunk.usage = {
|
|
117
|
+
"prompt_tokens": prompt_tokens,
|
|
118
|
+
"completion_tokens": completion_tokens,
|
|
119
|
+
"total_tokens": total_tokens,
|
|
120
|
+
"estimated_cost": None
|
|
121
|
+
}
|
|
122
|
+
yield chunk
|
|
123
|
+
except Exception:
|
|
124
|
+
continue
|
|
125
|
+
|
|
126
|
+
# Final chunk with finish_reason="stop"
|
|
127
|
+
delta = ChoiceDelta(content=None, role=None, tool_calls=None)
|
|
128
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
|
|
129
|
+
chunk = ChatCompletionChunk(
|
|
130
|
+
id=request_id,
|
|
131
|
+
choices=[choice],
|
|
132
|
+
created=created_time,
|
|
133
|
+
model=model
|
|
134
|
+
)
|
|
135
|
+
chunk.usage = {
|
|
136
|
+
"prompt_tokens": prompt_tokens,
|
|
137
|
+
"completion_tokens": completion_tokens,
|
|
138
|
+
"total_tokens": total_tokens,
|
|
139
|
+
"estimated_cost": None
|
|
140
|
+
}
|
|
141
|
+
yield chunk
|
|
142
|
+
except Exception as e:
|
|
143
|
+
raise IOError(f"TogetherAI stream request failed: {e}") from e
|
|
144
|
+
|
|
145
|
+
def _create_non_stream(
|
|
146
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
147
|
+
) -> ChatCompletion:
|
|
148
|
+
try:
|
|
149
|
+
payload_copy = payload.copy()
|
|
150
|
+
payload_copy["stream"] = False
|
|
151
|
+
response = self._client.session.post(
|
|
152
|
+
self._client.api_endpoint,
|
|
153
|
+
headers=self._client.headers,
|
|
154
|
+
json=payload_copy,
|
|
155
|
+
timeout=timeout or self._client.timeout,
|
|
156
|
+
proxies=proxies
|
|
157
|
+
)
|
|
158
|
+
response.raise_for_status()
|
|
159
|
+
data = response.json()
|
|
160
|
+
|
|
161
|
+
full_text = ""
|
|
162
|
+
finish_reason = "stop"
|
|
163
|
+
if 'choices' in data and data['choices']:
|
|
164
|
+
full_text = data['choices'][0]['message']['content']
|
|
165
|
+
finish_reason = data['choices'][0].get('finish_reason', 'stop')
|
|
166
|
+
|
|
167
|
+
message = ChatCompletionMessage(
|
|
168
|
+
role="assistant",
|
|
169
|
+
content=full_text
|
|
170
|
+
)
|
|
171
|
+
choice = Choice(
|
|
172
|
+
index=0,
|
|
173
|
+
message=message,
|
|
174
|
+
finish_reason=finish_reason
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
178
|
+
completion_tokens = count_tokens(full_text)
|
|
179
|
+
usage = CompletionUsage(
|
|
180
|
+
prompt_tokens=prompt_tokens,
|
|
181
|
+
completion_tokens=completion_tokens,
|
|
182
|
+
total_tokens=prompt_tokens + completion_tokens
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
completion = ChatCompletion(
|
|
186
|
+
id=request_id,
|
|
187
|
+
choices=[choice],
|
|
188
|
+
created=created_time,
|
|
189
|
+
model=model,
|
|
190
|
+
usage=usage,
|
|
191
|
+
)
|
|
192
|
+
return completion
|
|
193
|
+
except Exception as e:
|
|
194
|
+
raise IOError(f"TogetherAI non-stream request failed: {e}") from e
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
class Chat(BaseChat):
|
|
198
|
+
def __init__(self, client: 'TogetherAI'):
|
|
199
|
+
self.completions = Completions(client)
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
class TogetherAI(OpenAICompatibleProvider):
|
|
203
|
+
"""
|
|
204
|
+
OpenAI-compatible client for TogetherAI API.
|
|
205
|
+
"""
|
|
206
|
+
class TogetherAI(OpenAICompatibleProvider):
|
|
207
|
+
"""
|
|
208
|
+
OpenAI-compatible client for TogetherAI API.
|
|
209
|
+
"""
|
|
210
|
+
AVAILABLE_MODELS = [
|
|
211
|
+
"Gryphe/MythoMax-L2-13b",
|
|
212
|
+
"Gryphe/MythoMax-L2-13b-Lite",
|
|
213
|
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
|
214
|
+
"Qwen/QwQ-32B",
|
|
215
|
+
"Qwen/Qwen2-72B-Instruct",
|
|
216
|
+
"Qwen/Qwen2-VL-72B-Instruct",
|
|
217
|
+
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
218
|
+
"Qwen/Qwen2.5-7B-Instruct-Turbo",
|
|
219
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
220
|
+
"Qwen/Qwen2.5-VL-72B-Instruct",
|
|
221
|
+
"Qwen/Qwen3-235B-A22B-fp8",
|
|
222
|
+
"Qwen/Qwen3-235B-A22B-fp8-tput",
|
|
223
|
+
"Rrrr/meta-llama/Llama-3-70b-chat-hf-6f9ad551",
|
|
224
|
+
"Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-03dc18e1",
|
|
225
|
+
"Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-6c92f39d",
|
|
226
|
+
"arcee-ai/arcee-blitz",
|
|
227
|
+
"arcee-ai/caller",
|
|
228
|
+
"arcee-ai/coder-large",
|
|
229
|
+
"arcee-ai/maestro-reasoning",
|
|
230
|
+
"arcee-ai/virtuoso-large",
|
|
231
|
+
"arcee-ai/virtuoso-medium-v2",
|
|
232
|
+
"arcee_ai/arcee-spotlight",
|
|
233
|
+
"blackbox/meta-llama-3-1-8b",
|
|
234
|
+
"deepseek-ai/DeepSeek-R1",
|
|
235
|
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
236
|
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
|
|
237
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
|
238
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
|
239
|
+
"deepseek-ai/DeepSeek-V3",
|
|
240
|
+
"deepseek-ai/DeepSeek-V3-p-dp",
|
|
241
|
+
"google/gemma-2-27b-it",
|
|
242
|
+
"google/gemma-2b-it",
|
|
243
|
+
"lgai/exaone-3-5-32b-instruct",
|
|
244
|
+
"lgai/exaone-deep-32b",
|
|
245
|
+
"marin-community/marin-8b-instruct",
|
|
246
|
+
"meta-llama/Llama-3-70b-chat-hf",
|
|
247
|
+
"meta-llama/Llama-3-8b-chat-hf",
|
|
248
|
+
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
|
249
|
+
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
|
250
|
+
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
|
251
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
252
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
|
|
253
|
+
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
254
|
+
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
255
|
+
"meta-llama/Llama-Vision-Free",
|
|
256
|
+
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
|
|
257
|
+
"meta-llama/Meta-Llama-3-8B-Instruct-Lite",
|
|
258
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
|
259
|
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
260
|
+
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
261
|
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
|
262
|
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
|
263
|
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
264
|
+
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
265
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
266
|
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
|
267
|
+
"perplexity-ai/r1-1776",
|
|
268
|
+
"roberizk@gmail.com/meta-llama/Llama-3-70b-chat-hf-26ee936b",
|
|
269
|
+
"roberizk@gmail.com/meta-llama/Meta-Llama-3-70B-Instruct-6feb41f7",
|
|
270
|
+
"roberizk@gmail.com/meta-llama/Meta-Llama-3-8B-Instruct-8ced8839",
|
|
271
|
+
"scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
|
|
272
|
+
"scb10x/scb10x-llama3-1-typhoon2-8b-instruct",
|
|
273
|
+
"togethercomputer/MoA-1",
|
|
274
|
+
"togethercomputer/MoA-1-Turbo",
|
|
275
|
+
"togethercomputer/Refuel-Llm-V2",
|
|
276
|
+
"togethercomputer/Refuel-Llm-V2-Small",
|
|
277
|
+
]
|
|
278
|
+
|
|
279
|
+
def __init__(self, browser: str = "chrome"):
|
|
280
|
+
self.timeout = 60
|
|
281
|
+
self.api_endpoint = "https://api.together.xyz/v1/chat/completions"
|
|
282
|
+
self.activation_endpoint = "https://www.codegeneration.ai/activate-v2"
|
|
283
|
+
self.session = requests.Session()
|
|
284
|
+
self.headers = LitAgent().generate_fingerprint(browser=browser)
|
|
285
|
+
self.session.headers.update(self.headers)
|
|
286
|
+
self.chat = Chat(self)
|
|
287
|
+
self._api_key_cache = None
|
|
288
|
+
|
|
289
|
+
@property
|
|
290
|
+
def models(self):
|
|
291
|
+
class _ModelList:
|
|
292
|
+
def list(inner_self):
|
|
293
|
+
return TogetherAI.AVAILABLE_MODELS
|
|
294
|
+
return _ModelList()
|
|
295
|
+
|
|
296
|
+
def get_activation_key(self) -> str:
|
|
297
|
+
"""Get API key from activation endpoint"""
|
|
298
|
+
if self._api_key_cache:
|
|
299
|
+
return self._api_key_cache
|
|
300
|
+
|
|
301
|
+
try:
|
|
302
|
+
response = requests.get(
|
|
303
|
+
self.activation_endpoint,
|
|
304
|
+
headers={"Accept": "application/json"},
|
|
305
|
+
timeout=30
|
|
306
|
+
)
|
|
307
|
+
response.raise_for_status()
|
|
308
|
+
activation_data = response.json()
|
|
309
|
+
self._api_key_cache = activation_data["openAIParams"]["apiKey"]
|
|
310
|
+
return self._api_key_cache
|
|
311
|
+
except Exception as e:
|
|
312
|
+
raise Exception(f"Failed to get activation key: {e}")
|
|
313
|
+
|
|
314
|
+
def convert_model_name(self, model: str) -> str:
|
|
315
|
+
"""Convert model name - returns model if valid, otherwise default"""
|
|
316
|
+
if model in self.AVAILABLE_MODELS:
|
|
317
|
+
return model
|
|
318
|
+
|
|
319
|
+
# Default to first available model if not found
|
|
320
|
+
return self.AVAILABLE_MODELS[0]
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
if __name__ == "__main__":
|
|
324
|
+
from rich import print
|
|
325
|
+
|
|
326
|
+
client = TogetherAI()
|
|
327
|
+
messages = [
|
|
328
|
+
{"role": "user", "content": "Hello, how are you?"},
|
|
329
|
+
{"role": "assistant", "content": "I'm fine, thank you! How can I help you today?"},
|
|
330
|
+
{"role": "user", "content": "Tell me a short joke."}
|
|
331
|
+
]
|
|
332
|
+
|
|
333
|
+
# Non-streaming example
|
|
334
|
+
response = client.chat.completions.create(
|
|
335
|
+
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
336
|
+
messages=messages,
|
|
337
|
+
max_tokens=50,
|
|
338
|
+
stream=False
|
|
339
|
+
)
|
|
340
|
+
print("Non-streaming response:")
|
|
341
|
+
print(response)
|
|
342
|
+
|
|
343
|
+
# Streaming example
|
|
344
|
+
print("\nStreaming response:")
|
|
345
|
+
stream = client.chat.completions.create(
|
|
346
|
+
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
347
|
+
messages=messages,
|
|
348
|
+
max_tokens=50,
|
|
349
|
+
stream=True
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
for chunk in stream:
|
|
353
|
+
if chunk.choices[0].delta.content:
|
|
354
|
+
print(chunk.choices[0].delta.content, end="")
|
|
355
|
+
print()
|
|
@@ -39,6 +39,8 @@ class Completions(BaseCompletions):
|
|
|
39
39
|
stream: bool = False,
|
|
40
40
|
temperature: Optional[float] = None,
|
|
41
41
|
top_p: Optional[float] = None,
|
|
42
|
+
timeout: Optional[int] = None,
|
|
43
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
42
44
|
**kwargs: Any
|
|
43
45
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
44
46
|
"""Create a chat completion using TwoAI."""
|
|
@@ -59,19 +61,26 @@ class Completions(BaseCompletions):
|
|
|
59
61
|
created_time = int(time.time())
|
|
60
62
|
|
|
61
63
|
if stream:
|
|
62
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
63
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
64
|
+
return self._create_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
65
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
64
66
|
|
|
65
67
|
def _create_stream(
|
|
66
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
68
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
69
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
67
70
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
71
|
+
original_proxies = self._client.session.proxies.copy()
|
|
72
|
+
if proxies is not None:
|
|
73
|
+
self._client.session.proxies = proxies
|
|
74
|
+
else:
|
|
75
|
+
self._client.session.proxies = {}
|
|
68
76
|
try:
|
|
69
77
|
response = self._client.session.post(
|
|
70
78
|
self._client.base_url,
|
|
71
79
|
headers=self._client.headers,
|
|
72
80
|
json=payload,
|
|
73
81
|
stream=True,
|
|
74
|
-
timeout=self._client.timeout,
|
|
82
|
+
timeout=timeout if timeout is not None else self._client.timeout,
|
|
83
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
75
84
|
)
|
|
76
85
|
response.raise_for_status()
|
|
77
86
|
|
|
@@ -129,18 +138,25 @@ class Completions(BaseCompletions):
|
|
|
129
138
|
yield chunk
|
|
130
139
|
except Exception as e:
|
|
131
140
|
raise IOError(f"TwoAI request failed: {e}") from e
|
|
132
|
-
|
|
133
|
-
|
|
141
|
+
finally:
|
|
142
|
+
self._client.session.proxies = original_proxies
|
|
134
143
|
|
|
135
144
|
def _create_non_stream(
|
|
136
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
145
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
146
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
137
147
|
) -> ChatCompletion:
|
|
148
|
+
original_proxies = self._client.session.proxies.copy()
|
|
149
|
+
if proxies is not None:
|
|
150
|
+
self._client.session.proxies = proxies
|
|
151
|
+
else:
|
|
152
|
+
self._client.session.proxies = {}
|
|
138
153
|
try:
|
|
139
154
|
response = self._client.session.post(
|
|
140
155
|
self._client.base_url,
|
|
141
156
|
headers=self._client.headers,
|
|
142
157
|
json=payload,
|
|
143
|
-
timeout=self._client.timeout,
|
|
158
|
+
timeout=timeout if timeout is not None else self._client.timeout,
|
|
159
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
144
160
|
)
|
|
145
161
|
response.raise_for_status()
|
|
146
162
|
data = response.json()
|
|
@@ -179,8 +195,8 @@ class Completions(BaseCompletions):
|
|
|
179
195
|
return completion
|
|
180
196
|
except Exception as e:
|
|
181
197
|
raise IOError(f"TwoAI request failed: {e}") from e
|
|
182
|
-
|
|
183
|
-
|
|
198
|
+
finally:
|
|
199
|
+
self._client.session.proxies = original_proxies
|
|
184
200
|
|
|
185
201
|
|
|
186
202
|
class Chat(BaseChat):
|
|
@@ -285,12 +301,13 @@ class TwoAI(OpenAICompatibleProvider):
|
|
|
285
301
|
raise RuntimeError("Failed to get API key from confirmation email")
|
|
286
302
|
return api_key
|
|
287
303
|
|
|
288
|
-
def __init__(self,
|
|
304
|
+
def __init__(self, browser: str = "chrome"):
|
|
289
305
|
api_key = self.generate_api_key()
|
|
290
|
-
self.timeout =
|
|
306
|
+
self.timeout = 30
|
|
291
307
|
self.base_url = "https://api.two.ai/v2/chat/completions"
|
|
292
308
|
self.api_key = api_key
|
|
293
309
|
self.session = Session()
|
|
310
|
+
self.session.proxies = {}
|
|
294
311
|
|
|
295
312
|
headers: Dict[str, str] = {
|
|
296
313
|
"Content-Type": "application/json",
|
|
@@ -37,4 +37,7 @@ from .BLACKBOXAI import *
|
|
|
37
37
|
from .copilot import * # Add Microsoft Copilot
|
|
38
38
|
from .TwoAI import *
|
|
39
39
|
from .oivscode import * # Add OnRender provider
|
|
40
|
-
from .Qwen3 import *
|
|
40
|
+
from .Qwen3 import *
|
|
41
|
+
from .FalconH1 import *
|
|
42
|
+
from .PI import * # Add PI.ai provider
|
|
43
|
+
from .TogetherAI import * # Add TogetherAI provider
|
|
@@ -26,6 +26,8 @@ class Completions(BaseCompletions):
|
|
|
26
26
|
stream: bool = False,
|
|
27
27
|
temperature: Optional[float] = None,
|
|
28
28
|
top_p: Optional[float] = None,
|
|
29
|
+
timeout: Optional[int] = None,
|
|
30
|
+
proxies: Optional[dict] = None,
|
|
29
31
|
**kwargs: Any
|
|
30
32
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
31
33
|
"""
|
|
@@ -48,18 +50,19 @@ class Completions(BaseCompletions):
|
|
|
48
50
|
|
|
49
51
|
# AI4Chat doesn't support streaming, so we'll simulate it if requested
|
|
50
52
|
if stream:
|
|
51
|
-
return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
53
|
+
return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param, timeout=timeout, proxies=proxies)
|
|
52
54
|
else:
|
|
53
|
-
return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
55
|
+
return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param, timeout=timeout, proxies=proxies)
|
|
54
56
|
|
|
55
57
|
def _create_stream(
|
|
56
58
|
self, request_id: str, created_time: int, model: str,
|
|
57
|
-
conversation_prompt: str, country: str, user_id: str
|
|
59
|
+
conversation_prompt: str, country: str, user_id: str,
|
|
60
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
58
61
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
59
62
|
"""Simulate streaming by breaking up the full response into fixed-size character chunks."""
|
|
60
63
|
try:
|
|
61
64
|
# Get the full response first
|
|
62
|
-
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
65
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id, timeout=timeout, proxies=proxies)
|
|
63
66
|
|
|
64
67
|
# Track token usage
|
|
65
68
|
prompt_tokens = count_tokens(conversation_prompt)
|
|
@@ -133,12 +136,13 @@ class Completions(BaseCompletions):
|
|
|
133
136
|
|
|
134
137
|
def _create_non_stream(
|
|
135
138
|
self, request_id: str, created_time: int, model: str,
|
|
136
|
-
conversation_prompt: str, country: str, user_id: str
|
|
139
|
+
conversation_prompt: str, country: str, user_id: str,
|
|
140
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
137
141
|
) -> ChatCompletion:
|
|
138
142
|
"""Get a complete response from AI4Chat."""
|
|
139
143
|
try:
|
|
140
144
|
# Get the full response
|
|
141
|
-
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
145
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id, timeout=timeout, proxies=proxies)
|
|
142
146
|
|
|
143
147
|
# Estimate token counts
|
|
144
148
|
prompt_tokens = count_tokens(conversation_prompt)
|
|
@@ -183,22 +187,31 @@ class Completions(BaseCompletions):
|
|
|
183
187
|
print(f"Unexpected error during AI4Chat non-stream request: {e}")
|
|
184
188
|
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
185
189
|
|
|
186
|
-
def _get_ai4chat_response(self, prompt: str, country: str, user_id: str
|
|
190
|
+
def _get_ai4chat_response(self, prompt: str, country: str, user_id: str,
|
|
191
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None) -> str:
|
|
187
192
|
"""Make the actual API request to AI4Chat."""
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
193
|
+
timeout_val = timeout if timeout is not None else self._client.timeout
|
|
194
|
+
original_proxies = self._client.session.proxies
|
|
195
|
+
if proxies is not None:
|
|
196
|
+
self._client.session.proxies = proxies
|
|
197
|
+
|
|
198
|
+
try:
|
|
199
|
+
# URL encode parameters
|
|
200
|
+
encoded_text = urllib.parse.quote(prompt)
|
|
201
|
+
encoded_country = urllib.parse.quote(country)
|
|
202
|
+
encoded_user_id = urllib.parse.quote(user_id)
|
|
192
203
|
|
|
193
|
-
|
|
194
|
-
|
|
204
|
+
# Construct the API URL
|
|
205
|
+
url = f"{self._client.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
|
|
195
206
|
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
response = self._client.session.get(url, headers=self._client.headers, timeout=self._client.timeout)
|
|
207
|
+
# Make the request
|
|
208
|
+
response = self._client.session.get(url, headers=self._client.headers, timeout=timeout_val)
|
|
199
209
|
response.raise_for_status()
|
|
200
210
|
except RequestsError as e:
|
|
201
211
|
raise IOError(f"Failed to generate response: {e}")
|
|
212
|
+
finally:
|
|
213
|
+
if proxies is not None:
|
|
214
|
+
self._client.session.proxies = original_proxies
|
|
202
215
|
|
|
203
216
|
# Process the response text
|
|
204
217
|
response_text = response.text
|
|
@@ -235,8 +248,6 @@ class AI4Chat(OpenAICompatibleProvider):
|
|
|
235
248
|
|
|
236
249
|
def __init__(
|
|
237
250
|
self,
|
|
238
|
-
timeout: int = 30,
|
|
239
|
-
proxies: dict = {},
|
|
240
251
|
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
241
252
|
country: str = "Asia",
|
|
242
253
|
user_id: str = "usersmjb2oaz7y"
|
|
@@ -245,14 +256,11 @@ class AI4Chat(OpenAICompatibleProvider):
|
|
|
245
256
|
Initialize the AI4Chat client.
|
|
246
257
|
|
|
247
258
|
Args:
|
|
248
|
-
timeout: Request timeout in seconds
|
|
249
|
-
proxies: Optional proxy configuration
|
|
250
259
|
system_prompt: System prompt to guide the AI's behavior
|
|
251
260
|
country: Country parameter for API
|
|
252
261
|
user_id: User ID for API
|
|
253
262
|
"""
|
|
254
|
-
self.timeout =
|
|
255
|
-
self.proxies = proxies
|
|
263
|
+
self.timeout = 30
|
|
256
264
|
self.system_prompt = system_prompt
|
|
257
265
|
self.country = country
|
|
258
266
|
self.user_id = user_id
|
|
@@ -261,7 +269,9 @@ class AI4Chat(OpenAICompatibleProvider):
|
|
|
261
269
|
self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
|
|
262
270
|
|
|
263
271
|
# Initialize session
|
|
264
|
-
self.session = Session(
|
|
272
|
+
self.session = Session()
|
|
273
|
+
self.session.proxies = {}
|
|
274
|
+
# self.session.timeout = self.timeout # Timeout is per-request for curl_cffi
|
|
265
275
|
|
|
266
276
|
# Set headers
|
|
267
277
|
self.headers = {
|