webscout 2025.10.17__py3-none-any.whl → 2025.10.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Provider/Gradient.py +231 -0
- webscout/Provider/TogetherAI.py +139 -199
- webscout/version.py +1 -1
- webscout/version.py.bak +1 -1
- {webscout-2025.10.17.dist-info → webscout-2025.10.18.dist-info}/METADATA +1 -1
- {webscout-2025.10.17.dist-info → webscout-2025.10.18.dist-info}/RECORD +10 -9
- {webscout-2025.10.17.dist-info → webscout-2025.10.18.dist-info}/WHEEL +0 -0
- {webscout-2025.10.17.dist-info → webscout-2025.10.18.dist-info}/entry_points.txt +0 -0
- {webscout-2025.10.17.dist-info → webscout-2025.10.18.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-2025.10.17.dist-info → webscout-2025.10.18.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Gradient Network Chat API Provider
|
|
3
|
+
Reverse engineered from https://chat.gradient.network/
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from curl_cffi.requests import Session
|
|
7
|
+
from curl_cffi import CurlError
|
|
8
|
+
from typing import Optional, Generator, Dict, Any, Union
|
|
9
|
+
|
|
10
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
|
|
11
|
+
from webscout.AIbase import Provider
|
|
12
|
+
from webscout import exceptions
|
|
13
|
+
from webscout.litagent import LitAgent
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Gradient(Provider):
|
|
17
|
+
"""
|
|
18
|
+
Provider for Gradient Network chat API
|
|
19
|
+
Supports real-time streaming responses from distributed GPU clusters
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
required_auth = False
|
|
23
|
+
AVAILABLE_MODELS = [
|
|
24
|
+
"GPT-OSS-120B",
|
|
25
|
+
"Qwen3-235B",
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
model: str = "GPT-OSS-120B",
|
|
31
|
+
is_conversation: bool = True,
|
|
32
|
+
max_tokens: int = 2049,
|
|
33
|
+
timeout: int = 30,
|
|
34
|
+
intro: str = None,
|
|
35
|
+
filepath: str = None,
|
|
36
|
+
update_file: bool = True,
|
|
37
|
+
proxies: dict = {},
|
|
38
|
+
history_offset: int = 10250,
|
|
39
|
+
act: str = None,
|
|
40
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
41
|
+
cluster_mode: str = "nvidia",
|
|
42
|
+
enable_thinking: bool = True,
|
|
43
|
+
):
|
|
44
|
+
if model not in self.AVAILABLE_MODELS:
|
|
45
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
46
|
+
|
|
47
|
+
self.model = model
|
|
48
|
+
self.is_conversation = is_conversation
|
|
49
|
+
self.max_tokens_to_sample = max_tokens
|
|
50
|
+
self.timeout = timeout
|
|
51
|
+
self.proxies = proxies
|
|
52
|
+
self.system_prompt = system_prompt
|
|
53
|
+
self.cluster_mode = cluster_mode
|
|
54
|
+
self.enable_thinking = enable_thinking
|
|
55
|
+
|
|
56
|
+
self.session = Session()
|
|
57
|
+
self.session.proxies = proxies
|
|
58
|
+
|
|
59
|
+
self.agent = LitAgent()
|
|
60
|
+
self.fingerprint = self.agent.generate_fingerprint("chrome")
|
|
61
|
+
|
|
62
|
+
self.headers = {
|
|
63
|
+
"User-Agent": self.fingerprint.get("user_agent", ""),
|
|
64
|
+
"Accept": "*/*",
|
|
65
|
+
"Accept-Language": self.fingerprint.get("accept_language", ""),
|
|
66
|
+
"Content-Type": "application/json",
|
|
67
|
+
"Origin": "https://chat.gradient.network",
|
|
68
|
+
"Referer": "https://chat.gradient.network/",
|
|
69
|
+
"Sec-Fetch-Dest": "empty",
|
|
70
|
+
"Sec-Fetch-Mode": "cors",
|
|
71
|
+
"Sec-Fetch-Site": "same-origin",
|
|
72
|
+
}
|
|
73
|
+
self.session.headers.update(self.headers)
|
|
74
|
+
|
|
75
|
+
self.__available_optimizers = (
|
|
76
|
+
method
|
|
77
|
+
for method in dir(Optimizers)
|
|
78
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
79
|
+
)
|
|
80
|
+
Conversation.intro = (
|
|
81
|
+
AwesomePrompts().get_act(
|
|
82
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
83
|
+
)
|
|
84
|
+
if act
|
|
85
|
+
else intro or Conversation.intro
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
self.conversation = Conversation(
|
|
89
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
90
|
+
)
|
|
91
|
+
self.conversation.history_offset = history_offset
|
|
92
|
+
|
|
93
|
+
def ask(
|
|
94
|
+
self,
|
|
95
|
+
prompt: str,
|
|
96
|
+
stream: bool = False,
|
|
97
|
+
raw: bool = False,
|
|
98
|
+
optimizer: str = None,
|
|
99
|
+
conversationally: bool = False,
|
|
100
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
101
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
102
|
+
if optimizer:
|
|
103
|
+
if optimizer in self.__available_optimizers:
|
|
104
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
105
|
+
conversation_prompt if conversationally else prompt
|
|
106
|
+
)
|
|
107
|
+
else:
|
|
108
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
109
|
+
|
|
110
|
+
messages = [
|
|
111
|
+
{"role": "system", "content": self.system_prompt},
|
|
112
|
+
{"role": "user", "content": conversation_prompt},
|
|
113
|
+
]
|
|
114
|
+
|
|
115
|
+
payload = {
|
|
116
|
+
"model": self.model,
|
|
117
|
+
"clusterMode": self.cluster_mode,
|
|
118
|
+
"messages": messages,
|
|
119
|
+
"enableThinking": self.enable_thinking,
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
def for_stream():
|
|
123
|
+
streaming_text = ""
|
|
124
|
+
try:
|
|
125
|
+
response = self.session.post(
|
|
126
|
+
"https://chat.gradient.network/api/generate",
|
|
127
|
+
json=payload,
|
|
128
|
+
stream=True,
|
|
129
|
+
timeout=self.timeout,
|
|
130
|
+
impersonate="chrome110",
|
|
131
|
+
)
|
|
132
|
+
response.raise_for_status()
|
|
133
|
+
|
|
134
|
+
processed_stream = sanitize_stream(
|
|
135
|
+
data=response.iter_content(chunk_size=None),
|
|
136
|
+
intro_value=None,
|
|
137
|
+
to_json=True,
|
|
138
|
+
skip_markers=[],
|
|
139
|
+
content_extractor=self._Gradient_extractor,
|
|
140
|
+
yield_raw_on_error=False,
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
for content_chunk in processed_stream:
|
|
144
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
145
|
+
streaming_text += content_chunk
|
|
146
|
+
resp = dict(text=content_chunk)
|
|
147
|
+
yield resp if not raw else content_chunk
|
|
148
|
+
|
|
149
|
+
except CurlError as e:
|
|
150
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
151
|
+
except Exception as e:
|
|
152
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
|
|
153
|
+
finally:
|
|
154
|
+
if streaming_text:
|
|
155
|
+
self.last_response = {"text": streaming_text}
|
|
156
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
157
|
+
|
|
158
|
+
def for_non_stream():
|
|
159
|
+
try:
|
|
160
|
+
full_response = ""
|
|
161
|
+
for chunk in for_stream():
|
|
162
|
+
full_response += self.get_message(chunk)
|
|
163
|
+
|
|
164
|
+
self.last_response = {"text": full_response}
|
|
165
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
166
|
+
return self.last_response if not raw else full_response
|
|
167
|
+
|
|
168
|
+
except Exception as e:
|
|
169
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
|
|
170
|
+
|
|
171
|
+
return for_stream() if stream else for_non_stream()
|
|
172
|
+
|
|
173
|
+
def chat(
|
|
174
|
+
self,
|
|
175
|
+
prompt: str,
|
|
176
|
+
stream: bool = False,
|
|
177
|
+
optimizer: str = None,
|
|
178
|
+
conversationally: bool = False,
|
|
179
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
180
|
+
def for_stream_chat():
|
|
181
|
+
gen = self.ask(
|
|
182
|
+
prompt, stream=True, raw=False,
|
|
183
|
+
optimizer=optimizer, conversationally=conversationally
|
|
184
|
+
)
|
|
185
|
+
for response_dict in gen:
|
|
186
|
+
yield self.get_message(response_dict)
|
|
187
|
+
|
|
188
|
+
def for_non_stream_chat():
|
|
189
|
+
response_data = self.ask(
|
|
190
|
+
prompt, stream=False, raw=False,
|
|
191
|
+
optimizer=optimizer, conversationally=conversationally
|
|
192
|
+
)
|
|
193
|
+
return self.get_message(response_data)
|
|
194
|
+
|
|
195
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
196
|
+
|
|
197
|
+
def get_message(self, response: dict) -> str:
|
|
198
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
199
|
+
return response.get("text", "")
|
|
200
|
+
|
|
201
|
+
@staticmethod
|
|
202
|
+
def _Gradient_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
203
|
+
if isinstance(chunk, dict):
|
|
204
|
+
chunk_type = chunk.get("type")
|
|
205
|
+
if chunk_type == "reply":
|
|
206
|
+
return chunk.get("data", {}).get("reasoningContent", "")
|
|
207
|
+
return None
|
|
208
|
+
|
|
209
|
+
if __name__ == "__main__":
|
|
210
|
+
print("-" * 80)
|
|
211
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
212
|
+
print("-" * 80)
|
|
213
|
+
|
|
214
|
+
for model in Gradient.AVAILABLE_MODELS:
|
|
215
|
+
try:
|
|
216
|
+
test_ai = Gradient(model=model, timeout=60)
|
|
217
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
218
|
+
response_text = ""
|
|
219
|
+
for chunk in response:
|
|
220
|
+
response_text += chunk
|
|
221
|
+
|
|
222
|
+
if response_text and len(response_text.strip()) > 0:
|
|
223
|
+
status = "v"
|
|
224
|
+
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
225
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
226
|
+
else:
|
|
227
|
+
status = "x"
|
|
228
|
+
display_text = "Empty or invalid response"
|
|
229
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
230
|
+
except Exception as e:
|
|
231
|
+
print(f"\r{model:<50} {'x':<10} {str(e)}")
|
webscout/Provider/TogetherAI.py
CHANGED
|
@@ -1,142 +1,45 @@
|
|
|
1
1
|
from curl_cffi.requests import Session
|
|
2
2
|
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
3
4
|
from typing import Any, Dict, Optional, Generator, Union
|
|
4
|
-
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
7
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
10
11
|
|
|
11
12
|
class TogetherAI(Provider):
|
|
12
13
|
"""
|
|
13
|
-
A class to interact with the
|
|
14
|
+
A class to interact with the Together AI Chat API (https://chat.together.ai/).
|
|
15
|
+
Uses the chat interface API endpoint with model UUIDs.
|
|
14
16
|
"""
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
"
|
|
21
|
-
"
|
|
22
|
-
"
|
|
23
|
-
"
|
|
24
|
-
|
|
25
|
-
"Qwen/Qwen2.5-7B-Instruct-Turbo",
|
|
26
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
27
|
-
"Qwen/Qwen2.5-VL-72B-Instruct",
|
|
28
|
-
"Qwen/Qwen3-235B-A22B-Instruct-2507-tput",
|
|
29
|
-
"Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
30
|
-
"Qwen/Qwen3-235B-A22B-fp8-tput",
|
|
31
|
-
"Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
|
|
32
|
-
"Salesforce/Llama-Rank-V1",
|
|
33
|
-
"Virtue-AI/VirtueGuard-Text-Lite",
|
|
34
|
-
"arcee-ai/AFM-4.5B",
|
|
35
|
-
"arcee-ai/coder-large",
|
|
36
|
-
"arcee-ai/maestro-reasoning",
|
|
37
|
-
"arcee-ai/virtuoso-large",
|
|
38
|
-
"arcee_ai/arcee-spotlight",
|
|
39
|
-
"blackbox/meta-llama-3-1-8b",
|
|
40
|
-
"deepcogito/cogito-v2-preview-deepseek-671b",
|
|
41
|
-
"deepseek-ai/DeepSeek-R1",
|
|
42
|
-
"deepseek-ai/DeepSeek-R1-0528-tput",
|
|
43
|
-
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
44
|
-
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
|
|
45
|
-
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
|
46
|
-
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
|
47
|
-
"deepseek-ai/DeepSeek-V3",
|
|
48
|
-
"google/gemma-2-27b-it",
|
|
49
|
-
"google/gemma-3n-E4B-it",
|
|
50
|
-
"lgai/exaone-3-5-32b-instruct",
|
|
51
|
-
"lgai/exaone-deep-32b",
|
|
52
|
-
"marin-community/marin-8b-instruct",
|
|
53
|
-
"meta-llama/Llama-2-70b-hf",
|
|
54
|
-
"meta-llama/Llama-3-70b-chat-hf",
|
|
55
|
-
"meta-llama/Llama-3-8b-chat-hf",
|
|
56
|
-
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
|
57
|
-
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
|
58
|
-
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
|
59
|
-
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
60
|
-
"meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
|
|
61
|
-
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
62
|
-
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
63
|
-
"meta-llama/Llama-Vision-Free",
|
|
64
|
-
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
|
|
65
|
-
"meta-llama/Meta-Llama-3-8B-Instruct-Lite",
|
|
66
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
|
67
|
-
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
68
|
-
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
69
|
-
"mistralai/Mistral-7B-Instruct-v0.1",
|
|
70
|
-
"mistralai/Mistral-7B-Instruct-v0.2",
|
|
71
|
-
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
72
|
-
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
73
|
-
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
74
|
-
"moonshotai/Kimi-K2-Instruct",
|
|
75
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
|
76
|
-
"perplexity-ai/r1-1776",
|
|
77
|
-
"scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
|
|
78
|
-
"scb10x/scb10x-typhoon-2-1-gemma3-12b",
|
|
79
|
-
"togethercomputer/Refuel-Llm-V2-Small",
|
|
80
|
-
"zai-org/GLM-4.5-Air-FP8"
|
|
81
|
-
]
|
|
82
|
-
|
|
83
|
-
@classmethod
|
|
84
|
-
def get_models(cls, api_key: str = None):
|
|
85
|
-
"""Fetch available models from TogetherAI API.
|
|
86
|
-
|
|
87
|
-
Args:
|
|
88
|
-
api_key (str, optional): TogetherAI API key. If not provided, returns default models.
|
|
89
|
-
|
|
90
|
-
Returns:
|
|
91
|
-
list: List of available model IDs
|
|
92
|
-
"""
|
|
93
|
-
if not api_key:
|
|
94
|
-
return cls.AVAILABLE_MODELS
|
|
95
|
-
|
|
96
|
-
try:
|
|
97
|
-
# Use a temporary curl_cffi session for this class method
|
|
98
|
-
temp_session = Session()
|
|
99
|
-
headers = {
|
|
100
|
-
"Content-Type": "application/json",
|
|
101
|
-
"Authorization": f"Bearer {api_key}",
|
|
102
|
-
}
|
|
103
|
-
|
|
104
|
-
response = temp_session.get(
|
|
105
|
-
"https://api.together.xyz/v1/models",
|
|
106
|
-
headers=headers,
|
|
107
|
-
impersonate="chrome110"
|
|
108
|
-
)
|
|
109
|
-
|
|
110
|
-
if response.status_code != 200:
|
|
111
|
-
return cls.AVAILABLE_MODELS
|
|
112
|
-
|
|
113
|
-
data = response.json()
|
|
114
|
-
if "data" in data and isinstance(data["data"], list):
|
|
115
|
-
return [model["id"] for model in data["data"]]
|
|
116
|
-
return cls.AVAILABLE_MODELS
|
|
117
|
-
|
|
118
|
-
except (CurlError, Exception):
|
|
119
|
-
# Fallback to default models list if fetching fails
|
|
120
|
-
return cls.AVAILABLE_MODELS
|
|
121
|
-
|
|
122
|
-
def update_available_models(self, api_key: str):
|
|
123
|
-
"""Update available models by fetching from TogetherAI API.
|
|
124
|
-
|
|
125
|
-
Args:
|
|
126
|
-
api_key (str): TogetherAI API key for fetching models.
|
|
127
|
-
"""
|
|
128
|
-
self.AVAILABLE_MODELS = self.get_models(api_key)
|
|
17
|
+
required_auth = False
|
|
18
|
+
AVAILABLE_MODELS = {
|
|
19
|
+
"DeepSeek R1 (0528)": "dc11fae1-a7a2-4bed-9bd5-bd31bd8f5053",
|
|
20
|
+
"DeepSeek V3 (0324)": "3e26fb3e-5b59-454d-b4af-dcd10d8c91a4",
|
|
21
|
+
"GPT OSS 120B": "34dd95c6-5b8b-42f8-b7a9-99cc01b27a39",
|
|
22
|
+
"Kimi K2 Instruct (0905)": "e91f96e5-fc2f-4e15-95af-43edaa1c6549",
|
|
23
|
+
"Qwen3 Coder 480B": "4fbbacb9-2b02-42db-827e-4c4dd6e12f84",
|
|
24
|
+
"GLM-4.5-Air": "37fb891c-1c2c-43f5-8cc0-f2a80b1f4a36",
|
|
25
|
+
"Llama 4 Maverick": "84b39408-ac91-43a0-a2f0-6e4fb72e9800",
|
|
26
|
+
}
|
|
129
27
|
|
|
130
28
|
@staticmethod
|
|
131
|
-
def
|
|
132
|
-
"""Extracts content from
|
|
29
|
+
def _together_ai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
30
|
+
"""Extracts content from Together AI stream JSON objects."""
|
|
133
31
|
if isinstance(chunk, dict):
|
|
134
|
-
|
|
32
|
+
# Extract from streaming response format
|
|
33
|
+
choices = chunk.get("choices", [])
|
|
34
|
+
if choices and len(choices) > 0:
|
|
35
|
+
delta = choices[0].get("delta", {})
|
|
36
|
+
if isinstance(delta, dict):
|
|
37
|
+
return delta.get("content")
|
|
135
38
|
return None
|
|
136
39
|
|
|
137
40
|
def __init__(
|
|
138
41
|
self,
|
|
139
|
-
api_key: str,
|
|
42
|
+
api_key: Optional[str] = None,
|
|
140
43
|
is_conversation: bool = True,
|
|
141
44
|
max_tokens: int = 2049,
|
|
142
45
|
timeout: int = 30,
|
|
@@ -146,50 +49,60 @@ class TogetherAI(Provider):
|
|
|
146
49
|
proxies: dict = {},
|
|
147
50
|
history_offset: int = 10250,
|
|
148
51
|
act: str = None,
|
|
149
|
-
model: str = "
|
|
52
|
+
model: str = "DeepSeek R1 (0528)",
|
|
150
53
|
system_prompt: str = "You are a helpful assistant.",
|
|
54
|
+
temperature: float = 0.6,
|
|
55
|
+
top_p: float = 0.95,
|
|
56
|
+
browser: str = "chrome"
|
|
151
57
|
):
|
|
152
|
-
"""Initializes the
|
|
153
|
-
|
|
154
|
-
Args:
|
|
155
|
-
api_key (str): TogetherAI API key.
|
|
156
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
157
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 2049.
|
|
158
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
159
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
160
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
161
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
162
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
163
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
164
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
165
|
-
model (str, optional): LLM model name. Defaults to "meta-llama/Llama-3.1-8B-Instruct-Turbo".
|
|
166
|
-
system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
|
|
167
|
-
"""
|
|
168
|
-
# Update available models from API
|
|
169
|
-
self.update_available_models(api_key)
|
|
170
|
-
|
|
171
|
-
# Validate model after updating available models
|
|
58
|
+
"""Initializes the Together AI chat client."""
|
|
172
59
|
if model not in self.AVAILABLE_MODELS:
|
|
173
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
60
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {list(self.AVAILABLE_MODELS.keys())}")
|
|
174
61
|
|
|
175
|
-
self.
|
|
176
|
-
self.
|
|
177
|
-
self.
|
|
178
|
-
self.
|
|
179
|
-
self.
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
self.
|
|
183
|
-
self.
|
|
62
|
+
self.url = "https://chat.together.ai/api/chat-completion"
|
|
63
|
+
self.model_name = model
|
|
64
|
+
self.model_id = self.AVAILABLE_MODELS[model]
|
|
65
|
+
self.temperature = temperature
|
|
66
|
+
self.top_p = top_p
|
|
67
|
+
|
|
68
|
+
# Initialize LitAgent
|
|
69
|
+
self.agent = LitAgent()
|
|
70
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
71
|
+
self.api = api_key
|
|
184
72
|
|
|
73
|
+
# Setup headers
|
|
185
74
|
self.headers = {
|
|
75
|
+
"Accept": self.fingerprint["accept"],
|
|
76
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
186
77
|
"Content-Type": "application/json",
|
|
187
|
-
"
|
|
78
|
+
"Cache-Control": "no-cache",
|
|
79
|
+
"Origin": "https://chat.together.ai",
|
|
80
|
+
"Pragma": "no-cache",
|
|
81
|
+
"Referer": "https://chat.together.ai/",
|
|
82
|
+
"Sec-Fetch-Dest": "empty",
|
|
83
|
+
"Sec-Fetch-Mode": "cors",
|
|
84
|
+
"Sec-Fetch-Site": "same-origin",
|
|
85
|
+
"User-Agent": self.fingerprint.get("user_agent", ""),
|
|
86
|
+
"Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
|
|
87
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
88
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
|
|
89
|
+
"X-Forwarded-For": self.fingerprint.get("x-forwarded-for", ""),
|
|
90
|
+
"X-Real-IP": self.fingerprint.get("x-real-ip", ""),
|
|
91
|
+
"X-Client-IP": self.fingerprint.get("x-client-ip", ""),
|
|
188
92
|
}
|
|
189
93
|
|
|
190
|
-
|
|
94
|
+
if self.api is not None:
|
|
95
|
+
self.headers["Authorization"] = f"Bearer {self.api}"
|
|
96
|
+
|
|
97
|
+
# Initialize curl_cffi Session
|
|
98
|
+
self.session = Session()
|
|
191
99
|
self.session.headers.update(self.headers)
|
|
192
100
|
self.session.proxies = proxies
|
|
101
|
+
self.system_prompt = system_prompt
|
|
102
|
+
self.is_conversation = is_conversation
|
|
103
|
+
self.max_tokens_to_sample = max_tokens
|
|
104
|
+
self.timeout = timeout
|
|
105
|
+
self.last_response = {}
|
|
193
106
|
|
|
194
107
|
self.__available_optimizers = (
|
|
195
108
|
method
|
|
@@ -209,7 +122,26 @@ class TogetherAI(Provider):
|
|
|
209
122
|
)
|
|
210
123
|
self.conversation.history_offset = history_offset
|
|
211
124
|
|
|
125
|
+
def refresh_identity(self, browser: str = None):
|
|
126
|
+
"""
|
|
127
|
+
Refreshes the browser identity fingerprint.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
browser: Specific browser to use for the new fingerprint
|
|
131
|
+
"""
|
|
132
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
133
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
134
|
+
|
|
135
|
+
# Update headers with new fingerprint
|
|
136
|
+
self.headers.update({
|
|
137
|
+
"Accept": self.fingerprint["accept"],
|
|
138
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
139
|
+
"User-Agent": self.fingerprint.get("user_agent", ""),
|
|
140
|
+
"Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
|
|
141
|
+
})
|
|
212
142
|
|
|
143
|
+
self.session.headers.update(self.headers)
|
|
144
|
+
return self.fingerprint
|
|
213
145
|
|
|
214
146
|
def ask(
|
|
215
147
|
self,
|
|
@@ -219,9 +151,6 @@ class TogetherAI(Provider):
|
|
|
219
151
|
optimizer: str = None,
|
|
220
152
|
conversationally: bool = False,
|
|
221
153
|
) -> Union[Dict[str, Any], Generator]:
|
|
222
|
-
"""
|
|
223
|
-
Sends a prompt to the TogetherAI API and returns the response.
|
|
224
|
-
"""
|
|
225
154
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
226
155
|
if optimizer:
|
|
227
156
|
if optimizer in self.__available_optimizers:
|
|
@@ -231,46 +160,47 @@ class TogetherAI(Provider):
|
|
|
231
160
|
else:
|
|
232
161
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
233
162
|
|
|
163
|
+
# Payload construction
|
|
234
164
|
payload = {
|
|
235
|
-
"
|
|
165
|
+
"modelId": self.model_id,
|
|
166
|
+
"temperature": self.temperature,
|
|
167
|
+
"topP": self.top_p,
|
|
236
168
|
"messages": [
|
|
237
169
|
{"role": "system", "content": self.system_prompt},
|
|
238
170
|
{"role": "user", "content": conversation_prompt},
|
|
239
171
|
],
|
|
240
|
-
"stream": stream
|
|
172
|
+
"stream": stream,
|
|
173
|
+
"maxTokens": self.max_tokens_to_sample,
|
|
174
|
+
"options": {}
|
|
241
175
|
}
|
|
176
|
+
|
|
242
177
|
def for_stream():
|
|
243
178
|
streaming_text = ""
|
|
244
179
|
try:
|
|
245
180
|
response = self.session.post(
|
|
246
|
-
self.
|
|
247
|
-
json
|
|
181
|
+
self.url,
|
|
182
|
+
data=json.dumps(payload),
|
|
248
183
|
stream=True,
|
|
249
184
|
timeout=self.timeout,
|
|
250
185
|
impersonate="chrome110"
|
|
251
186
|
)
|
|
252
187
|
response.raise_for_status()
|
|
188
|
+
|
|
253
189
|
processed_stream = sanitize_stream(
|
|
254
190
|
data=response.iter_content(chunk_size=None),
|
|
255
191
|
intro_value="data:",
|
|
256
192
|
to_json=True,
|
|
257
193
|
skip_markers=["[DONE]"],
|
|
258
|
-
content_extractor=self.
|
|
259
|
-
yield_raw_on_error=False
|
|
260
|
-
raw=raw
|
|
194
|
+
content_extractor=self._together_ai_extractor,
|
|
195
|
+
yield_raw_on_error=False
|
|
261
196
|
)
|
|
197
|
+
|
|
262
198
|
for content_chunk in processed_stream:
|
|
263
|
-
if isinstance(content_chunk,
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
yield content_chunk
|
|
269
|
-
else:
|
|
270
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
271
|
-
streaming_text += content_chunk
|
|
272
|
-
resp = dict(text=content_chunk)
|
|
273
|
-
yield resp
|
|
199
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
200
|
+
streaming_text += content_chunk
|
|
201
|
+
resp = dict(text=content_chunk)
|
|
202
|
+
yield resp if not raw else content_chunk
|
|
203
|
+
|
|
274
204
|
except CurlError as e:
|
|
275
205
|
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
276
206
|
except Exception as e:
|
|
@@ -279,34 +209,39 @@ class TogetherAI(Provider):
|
|
|
279
209
|
if streaming_text:
|
|
280
210
|
self.last_response = {"text": streaming_text}
|
|
281
211
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
212
|
+
|
|
282
213
|
def for_non_stream():
|
|
283
214
|
try:
|
|
284
215
|
response = self.session.post(
|
|
285
|
-
self.
|
|
286
|
-
json
|
|
216
|
+
self.url,
|
|
217
|
+
data=json.dumps(payload),
|
|
287
218
|
timeout=self.timeout,
|
|
288
219
|
impersonate="chrome110"
|
|
289
220
|
)
|
|
290
221
|
response.raise_for_status()
|
|
222
|
+
|
|
291
223
|
response_text = response.text
|
|
224
|
+
|
|
292
225
|
processed_stream = sanitize_stream(
|
|
293
226
|
data=response_text,
|
|
294
227
|
to_json=True,
|
|
295
228
|
intro_value=None,
|
|
296
229
|
content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
|
|
297
|
-
yield_raw_on_error=False
|
|
298
|
-
raw=raw
|
|
230
|
+
yield_raw_on_error=False
|
|
299
231
|
)
|
|
300
|
-
content = next(
|
|
232
|
+
content = next(processed_stream, None)
|
|
301
233
|
content = content if isinstance(content, str) else ""
|
|
234
|
+
|
|
302
235
|
self.last_response = {"text": content}
|
|
303
236
|
self.conversation.update_chat_history(prompt, content)
|
|
304
237
|
return self.last_response if not raw else content
|
|
238
|
+
|
|
305
239
|
except CurlError as e:
|
|
306
240
|
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
307
241
|
except Exception as e:
|
|
308
242
|
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
309
243
|
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
244
|
+
|
|
310
245
|
return for_stream() if stream else for_non_stream()
|
|
311
246
|
|
|
312
247
|
def chat(
|
|
@@ -315,43 +250,48 @@ class TogetherAI(Provider):
|
|
|
315
250
|
stream: bool = False,
|
|
316
251
|
optimizer: str = None,
|
|
317
252
|
conversationally: bool = False,
|
|
318
|
-
raw: bool = False, # Added raw parameter
|
|
319
253
|
) -> Union[str, Generator[str, None, None]]:
|
|
320
254
|
def for_stream_chat():
|
|
321
255
|
gen = self.ask(
|
|
322
|
-
prompt, stream=True, raw=
|
|
256
|
+
prompt, stream=True, raw=False,
|
|
323
257
|
optimizer=optimizer, conversationally=conversationally
|
|
324
258
|
)
|
|
325
|
-
for
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
else:
|
|
329
|
-
yield self.get_message(response)
|
|
259
|
+
for response_dict in gen:
|
|
260
|
+
yield self.get_message(response_dict)
|
|
261
|
+
|
|
330
262
|
def for_non_stream_chat():
|
|
331
263
|
response_data = self.ask(
|
|
332
|
-
prompt, stream=False, raw=
|
|
264
|
+
prompt, stream=False, raw=False,
|
|
333
265
|
optimizer=optimizer, conversationally=conversationally
|
|
334
266
|
)
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
else:
|
|
338
|
-
return self.get_message(response_data)
|
|
267
|
+
return self.get_message(response_data)
|
|
268
|
+
|
|
339
269
|
return for_stream_chat() if stream else for_non_stream_chat()
|
|
340
270
|
|
|
341
271
|
def get_message(self, response: dict) -> str:
|
|
342
|
-
"""Retrieves message only from response"""
|
|
343
272
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
344
273
|
return response["text"]
|
|
345
274
|
|
|
346
|
-
|
|
347
275
|
if __name__ == "__main__":
|
|
348
|
-
print("-" *
|
|
276
|
+
print("-" * 100)
|
|
349
277
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
350
|
-
print("-" *
|
|
278
|
+
print("-" * 100)
|
|
351
279
|
|
|
352
|
-
for
|
|
280
|
+
for model_name in TogetherAI.AVAILABLE_MODELS:
|
|
353
281
|
try:
|
|
354
|
-
|
|
355
|
-
|
|
282
|
+
test_ai = TogetherAI(model=model_name, timeout=60)
|
|
283
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
284
|
+
response_text = ""
|
|
285
|
+
for chunk in response:
|
|
286
|
+
response_text += chunk
|
|
287
|
+
|
|
288
|
+
if response_text and len(response_text.strip()) > 0:
|
|
289
|
+
status = "✓"
|
|
290
|
+
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
291
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
292
|
+
else:
|
|
293
|
+
status = "✗"
|
|
294
|
+
display_text = "Empty or invalid response"
|
|
295
|
+
print(f"\r{model_name:<50} {status:<10} {display_text}")
|
|
356
296
|
except Exception as e:
|
|
357
|
-
print(f"\r{
|
|
297
|
+
print(f"\r{model_name:<50} {'✗':<10} {str(e)[:50]}")
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "2025.10.
|
|
1
|
+
__version__ = "2025.10.18"
|
|
2
2
|
__prog__ = "webscout"
|
webscout/version.py.bak
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "2025.10.
|
|
1
|
+
__version__ = "2025.10.17"
|
|
2
2
|
__prog__ = "webscout"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 2025.10.
|
|
3
|
+
Version: 2025.10.18
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
|
|
5
5
|
Author-email: OEvortex <helpingai5@gmail.com>
|
|
6
6
|
License: HelpingAI
|
|
@@ -14,8 +14,8 @@ webscout/prompt_manager.py,sha256=ysKFgPhkV3uqrOCilqcS9rG8xhzdU_d2wx0grC9WCCc,98
|
|
|
14
14
|
webscout/sanitize.py,sha256=pw2Dzn-Jw9mOD4mpALYAvAf-medA-9AqdzsOmdXQbl0,46577
|
|
15
15
|
webscout/update_checker.py,sha256=bz0TzRxip9DOIVMFyNz9HsGj4RKB0xZgo57AUVSJINo,3708
|
|
16
16
|
webscout/utils.py,sha256=o2hU3qaVPk25sog3e4cyVZO3l8xwaZpYRziZPotEzNo,3075
|
|
17
|
-
webscout/version.py,sha256=
|
|
18
|
-
webscout/version.py.bak,sha256=
|
|
17
|
+
webscout/version.py,sha256=WEk_fzNkSNIF2RkQXtSLmMbXYzM5oZVYXSNn6xYMp8w,51
|
|
18
|
+
webscout/version.py.bak,sha256=3D9mFWY4P-ZGCKSuDPkkzHusmyndScdoNbUtry1vdCE,51
|
|
19
19
|
webscout/Extra/Act.md,sha256=_C2VW_Dc-dc7eejpGYKAOZhImHKPiQ7NSwE3bkzr6fg,18952
|
|
20
20
|
webscout/Extra/__init__.py,sha256=KvJRsRBRO-fZp2jSCl6KQnPppi93hriA6O_U1O1s31c,177
|
|
21
21
|
webscout/Extra/gguf.md,sha256=McXGz5sTfzOO9X4mH8yIqu5K3CgjzyXKi4_HQtezdZ4,12435
|
|
@@ -78,6 +78,7 @@ webscout/Provider/GMI.py,sha256=NkFVwrGFPu_dqy4YWPAjgPHmNVX-jltYuBH623ooZvs,1136
|
|
|
78
78
|
webscout/Provider/Gemini.py,sha256=Idpl9B_2yF2hK8agb6B4Qnvg6jmaQT008aOx8M2w2O4,6288
|
|
79
79
|
webscout/Provider/GeminiProxy.py,sha256=JzOnUMNEcriTXbVZvp9SauYWx4ekgCj2DyRyD-jUj9M,6515
|
|
80
80
|
webscout/Provider/GithubChat.py,sha256=FeRQfy1C9gxPlDmfH0VfBgd6CSCmN1XI6YES1Mp9mQM,14374
|
|
81
|
+
webscout/Provider/Gradient.py,sha256=N8gZstvX5a4xAfaptMFug_HNzDd6TJm3jZSSlm9VfJg,8467
|
|
81
82
|
webscout/Provider/Groq.py,sha256=0WM9Gvronx3KV3xyWVyzzgHxjB8DQczK89yg2d1XVDg,36104
|
|
82
83
|
webscout/Provider/HeckAI.py,sha256=0QX8xOtMDwpx-ZsBvm-_4f7pbbiCNTtK1v-J-FBCvLQ,16297
|
|
83
84
|
webscout/Provider/Jadve.py,sha256=qzAjLXSpeXtozslVAI0hf-5YjYmtY2BgUXq51jpiJKM,12005
|
|
@@ -96,7 +97,7 @@ webscout/Provider/Sambanova.py,sha256=BHavJ89LTAD3pJHhQ2xV8USJPoIxKLl0OgdkuXrFf-
|
|
|
96
97
|
webscout/Provider/StandardInput.py,sha256=OcFdbPbWadbkTmECSiu1-RH97tzSQoJJYZnXA-FzudY,14800
|
|
97
98
|
webscout/Provider/TeachAnything.py,sha256=qYEgn4slKI-kebcnG6UG0_YCmEq1KKlwiO72_qkHqMs,10206
|
|
98
99
|
webscout/Provider/TextPollinationsAI.py,sha256=B9ZbgEAGreEnGKoEFJYXICJmWngDYsczYNeJlNQJujM,12873
|
|
99
|
-
webscout/Provider/TogetherAI.py,sha256=
|
|
100
|
+
webscout/Provider/TogetherAI.py,sha256=YDV6-IcQhyHEvbEbIazEMkhYyhvkEFBosYVfDTjRFu0,11794
|
|
100
101
|
webscout/Provider/TwoAI.py,sha256=S_DBCv0RlV0c3RFdlzL_OgTfdVvua1bBOY12S8vYnZA,12788
|
|
101
102
|
webscout/Provider/TypliAI.py,sha256=yZ7Ep6VsQzpAzlvrVYuLjr4QrqgllaGnGJw5MJwQo5k,11866
|
|
102
103
|
webscout/Provider/Venice.py,sha256=zmr1xggyTx03UxuP9HT1gA7qnCXnDLKcZ-GNbKgFxnI,10033
|
|
@@ -329,9 +330,9 @@ webscout/zeroart/__init__.py,sha256=Cy9AUtXnOaFBQjNvCpN19IXJo7Lg15VTaNcTBxOTFek,
|
|
|
329
330
|
webscout/zeroart/base.py,sha256=I-xhDEfArBb6q7hiF5oPoyXeu2hzL6orp7uWgS_YtG8,2299
|
|
330
331
|
webscout/zeroart/effects.py,sha256=XUNZY1-wMPd6GNL3glFXtWaF9wDis_z55qTyCdnzHDo,5063
|
|
331
332
|
webscout/zeroart/fonts.py,sha256=S7qDhUmDXl1makMreZl_eVW_7-sqVQiGn-kQKl0Hg_A,51006
|
|
332
|
-
webscout-2025.10.
|
|
333
|
-
webscout-2025.10.
|
|
334
|
-
webscout-2025.10.
|
|
335
|
-
webscout-2025.10.
|
|
336
|
-
webscout-2025.10.
|
|
337
|
-
webscout-2025.10.
|
|
333
|
+
webscout-2025.10.18.dist-info/licenses/LICENSE.md,sha256=hyfFlVn7pWcrvuvs-piB8k4J8DlXdOsYje9RyPxc6Ik,7543
|
|
334
|
+
webscout-2025.10.18.dist-info/METADATA,sha256=Qz4hrs-Q3oHXETc75nh8P7UKYBKttZxLfHgrBCt8gJs,21660
|
|
335
|
+
webscout-2025.10.18.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
336
|
+
webscout-2025.10.18.dist-info/entry_points.txt,sha256=4xAgKHWwNhAvJyShLCFs_IU8Reb8zR3wqf8egrsDr8g,118
|
|
337
|
+
webscout-2025.10.18.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
|
|
338
|
+
webscout-2025.10.18.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|