webscout 8.1__py3-none-any.whl → 8.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/__init__.py +6 -0
- inferno/__main__.py +9 -0
- inferno/cli.py +6 -0
- webscout/Local/__init__.py +6 -0
- webscout/Local/__main__.py +9 -0
- webscout/Local/api.py +576 -0
- webscout/Local/cli.py +338 -0
- webscout/Local/config.py +75 -0
- webscout/Local/llm.py +188 -0
- webscout/Local/model_manager.py +205 -0
- webscout/Local/server.py +187 -0
- webscout/Local/utils.py +93 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/__init__.py +2 -1
- webscout/Provider/AISEARCH/scira_search.py +8 -4
- webscout/Provider/ExaChat.py +18 -8
- webscout/Provider/GithubChat.py +5 -1
- webscout/Provider/Glider.py +4 -2
- webscout/Provider/OPENAI/__init__.py +8 -1
- webscout/Provider/OPENAI/chatgpt.py +549 -0
- webscout/Provider/OPENAI/exachat.py +20 -8
- webscout/Provider/OPENAI/glider.py +3 -1
- webscout/Provider/OPENAI/llmchatco.py +3 -1
- webscout/Provider/OPENAI/opkfc.py +488 -0
- webscout/Provider/OPENAI/scirachat.py +11 -7
- webscout/Provider/OPENAI/standardinput.py +425 -0
- webscout/Provider/OPENAI/textpollinations.py +285 -0
- webscout/Provider/OPENAI/toolbaz.py +405 -0
- webscout/Provider/OPENAI/uncovrAI.py +455 -0
- webscout/Provider/OPENAI/writecream.py +158 -0
- webscout/Provider/StandardInput.py +278 -0
- webscout/Provider/TextPollinationsAI.py +27 -28
- webscout/Provider/Writecream.py +211 -0
- webscout/Provider/WritingMate.py +197 -0
- webscout/Provider/Youchat.py +30 -26
- webscout/Provider/__init__.py +10 -2
- webscout/Provider/koala.py +2 -2
- webscout/Provider/llmchatco.py +5 -0
- webscout/Provider/scira_chat.py +5 -2
- webscout/Provider/scnet.py +187 -0
- webscout/Provider/toolbaz.py +320 -0
- webscout/Provider/uncovr.py +3 -3
- webscout/conversation.py +32 -32
- webscout/version.py +1 -1
- {webscout-8.1.dist-info → webscout-8.2.dist-info}/METADATA +54 -3
- {webscout-8.1.dist-info → webscout-8.2.dist-info}/RECORD +50 -25
- webscout-8.2.dist-info/entry_points.txt +5 -0
- {webscout-8.1.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
- webscout-8.1.dist-info/entry_points.txt +0 -3
- {webscout-8.1.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
- {webscout-8.1.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import requests, json
|
|
3
|
+
from typing import Union, Any, Dict, Generator, Optional
|
|
4
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
5
|
+
from webscout.AIbase import Provider
|
|
6
|
+
from webscout import exceptions
|
|
7
|
+
from webscout.litagent import LitAgent
|
|
8
|
+
|
|
9
|
+
class WritingMate(Provider):
|
|
10
|
+
AVAILABLE_MODELS = [
|
|
11
|
+
"claude-3-haiku-20240307",
|
|
12
|
+
"gemini-1.5-flash-latest",
|
|
13
|
+
"llama3-8b-8192",
|
|
14
|
+
"llama3-70b-8192",
|
|
15
|
+
"google/gemini-flash-1.5-8b-exp",
|
|
16
|
+
"gpt-4o-mini"
|
|
17
|
+
]
|
|
18
|
+
"""
|
|
19
|
+
Provider for WritingMate streaming API.
|
|
20
|
+
"""
|
|
21
|
+
api_endpoint = "https://chat.writingmate.ai/api/chat/tools-stream"
|
|
22
|
+
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
cookies_path: str = "cookies.json",
|
|
26
|
+
is_conversation: bool = True,
|
|
27
|
+
max_tokens: int = 4096,
|
|
28
|
+
timeout: int = 60,
|
|
29
|
+
intro: str = None,
|
|
30
|
+
filepath: str = None,
|
|
31
|
+
update_file: bool = True,
|
|
32
|
+
act: str = None,
|
|
33
|
+
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
34
|
+
model: str = "gpt-4o-mini"
|
|
35
|
+
):
|
|
36
|
+
self.cookies_path = cookies_path
|
|
37
|
+
self.cookies = self._load_cookies(cookies_path)
|
|
38
|
+
self.session = requests.Session()
|
|
39
|
+
self.timeout = timeout
|
|
40
|
+
self.system_prompt = system_prompt
|
|
41
|
+
self.model = model
|
|
42
|
+
if self.model not in self.AVAILABLE_MODELS:
|
|
43
|
+
raise ValueError(f"Unknown model: {self.model}. Choose from {self.AVAILABLE_MODELS}")
|
|
44
|
+
self.last_response = {}
|
|
45
|
+
self.headers = {
|
|
46
|
+
"Accept": "*/*",
|
|
47
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
48
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
49
|
+
"Content-Type": "text/plain;charset=UTF-8",
|
|
50
|
+
"Origin": "https://chat.writingmate.ai",
|
|
51
|
+
"Referer": "https://chat.writingmate.ai/chat",
|
|
52
|
+
"Cookie": self.cookies,
|
|
53
|
+
"DNT": "1",
|
|
54
|
+
"sec-ch-ua": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
|
|
55
|
+
"sec-ch-ua-mobile": "?0",
|
|
56
|
+
"sec-ch-ua-platform": "\"Windows\"",
|
|
57
|
+
"Sec-Fetch-Dest": "empty",
|
|
58
|
+
"Sec-Fetch-Mode": "cors",
|
|
59
|
+
"Sec-Fetch-Site": "same-origin",
|
|
60
|
+
"Sec-GPC": "1",
|
|
61
|
+
"User-Agent": LitAgent().random()
|
|
62
|
+
}
|
|
63
|
+
self.session.headers.update(self.headers)
|
|
64
|
+
self.__available_optimizers = (
|
|
65
|
+
m for m in dir(Optimizers)
|
|
66
|
+
if callable(getattr(Optimizers, m)) and not m.startswith("__")
|
|
67
|
+
)
|
|
68
|
+
Conversation.intro = (
|
|
69
|
+
AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
|
|
70
|
+
if act else intro or Conversation.intro
|
|
71
|
+
)
|
|
72
|
+
self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
|
|
73
|
+
self.conversation.history_offset = 10250
|
|
74
|
+
|
|
75
|
+
def _load_cookies(self, path: str) -> str:
|
|
76
|
+
try:
|
|
77
|
+
with open(path, 'r') as f:
|
|
78
|
+
data = json.load(f)
|
|
79
|
+
return '; '.join(f"{c['name']}={c['value']}" for c in data)
|
|
80
|
+
except (FileNotFoundError, json.JSONDecodeError):
|
|
81
|
+
raise RuntimeError(f"Failed to load cookies from {path}")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def ask(
|
|
85
|
+
self,
|
|
86
|
+
prompt: str,
|
|
87
|
+
stream: bool = True,
|
|
88
|
+
raw: bool = False,
|
|
89
|
+
optimizer: str = None,
|
|
90
|
+
conversationally: bool = False
|
|
91
|
+
) -> Union[Dict[str,Any], Generator[Any,None,None]]:
|
|
92
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
93
|
+
if optimizer:
|
|
94
|
+
if optimizer in self.__available_optimizers:
|
|
95
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
96
|
+
conversation_prompt if conversationally else prompt
|
|
97
|
+
)
|
|
98
|
+
else:
|
|
99
|
+
raise exceptions.FailedToGenerateResponseError(f"Unknown optimizer: {optimizer}")
|
|
100
|
+
|
|
101
|
+
body = {
|
|
102
|
+
"chatSettings": {
|
|
103
|
+
"model": self.model,
|
|
104
|
+
"prompt": self.system_prompt,
|
|
105
|
+
"temperature": 0.5,
|
|
106
|
+
"contextLength": 4096,
|
|
107
|
+
"includeProfileContext": True,
|
|
108
|
+
"includeWorkspaceInstructions": True,
|
|
109
|
+
"embeddingsProvider": "openai"
|
|
110
|
+
},
|
|
111
|
+
"messages": [
|
|
112
|
+
{"role": "system", "content": self.system_prompt},
|
|
113
|
+
{"role": "user", "content": conversation_prompt}
|
|
114
|
+
],
|
|
115
|
+
"selectedTools": []
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
def for_stream():
|
|
119
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=body, stream=True, timeout=self.timeout)
|
|
120
|
+
if not response.ok:
|
|
121
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
122
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
123
|
+
)
|
|
124
|
+
streaming_response = ""
|
|
125
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
126
|
+
if line:
|
|
127
|
+
match = re.search(r'0:"(.*?)"', line)
|
|
128
|
+
if match:
|
|
129
|
+
content = match.group(1)
|
|
130
|
+
streaming_response += content
|
|
131
|
+
yield content if raw else dict(text=content)
|
|
132
|
+
self.last_response.update(dict(text=streaming_response))
|
|
133
|
+
self.conversation.update_chat_history(
|
|
134
|
+
prompt, self.get_message(self.last_response)
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
def for_non_stream():
|
|
138
|
+
for _ in for_stream():
|
|
139
|
+
pass
|
|
140
|
+
return self.last_response
|
|
141
|
+
|
|
142
|
+
return for_stream() if stream else for_non_stream()
|
|
143
|
+
|
|
144
|
+
def chat(
|
|
145
|
+
self,
|
|
146
|
+
prompt: str,
|
|
147
|
+
stream: bool = False,
|
|
148
|
+
optimizer: str = None,
|
|
149
|
+
conversationally: bool = False
|
|
150
|
+
) -> Union[str, Generator[str,None,None]]:
|
|
151
|
+
if stream:
|
|
152
|
+
# yield raw SSE lines
|
|
153
|
+
def raw_stream():
|
|
154
|
+
for line in self.ask(
|
|
155
|
+
prompt, stream=True, raw=True,
|
|
156
|
+
optimizer=optimizer, conversationally=conversationally
|
|
157
|
+
):
|
|
158
|
+
yield line
|
|
159
|
+
return raw_stream()
|
|
160
|
+
# non‐stream: return aggregated text
|
|
161
|
+
return self.get_message(
|
|
162
|
+
self.ask(
|
|
163
|
+
prompt,
|
|
164
|
+
False,
|
|
165
|
+
raw=False,
|
|
166
|
+
optimizer=optimizer,
|
|
167
|
+
conversationally=conversationally,
|
|
168
|
+
)
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
def get_message(self, response: dict) -> str:
|
|
172
|
+
"""
|
|
173
|
+
Extracts the message from the API response.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
response (dict): The API response.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
str: The message content.
|
|
180
|
+
|
|
181
|
+
Examples:
|
|
182
|
+
>>> ai = X0GPT()
|
|
183
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
184
|
+
>>> message = ai.get_message(response)
|
|
185
|
+
>>> print(message)
|
|
186
|
+
'Why did the scarecrow win an award? Because he was outstanding in his field!'
|
|
187
|
+
"""
|
|
188
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
189
|
+
formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
190
|
+
return formatted_text
|
|
191
|
+
|
|
192
|
+
if __name__ == "__main__":
|
|
193
|
+
from rich import print
|
|
194
|
+
ai = WritingMate(cookies_path="cookies.json")
|
|
195
|
+
response = ai.chat(input(">>> "), stream=True)
|
|
196
|
+
for chunk in response:
|
|
197
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Youchat.py
CHANGED
|
@@ -53,11 +53,11 @@ class YouChat(Provider):
|
|
|
53
53
|
"command_r_plus",
|
|
54
54
|
|
|
55
55
|
# Free models not enabled for user chat modes
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
56
|
+
"llama3_3_70b", # isAllowedForUserChatModes: false
|
|
57
|
+
"llama3_2_90b", # isAllowedForUserChatModes: false
|
|
58
|
+
"databricks_dbrx_instruct", # isAllowedForUserChatModes: false
|
|
59
|
+
"solar_1_mini", # isAllowedForUserChatModes: false
|
|
60
|
+
"dolphin_2_5", # isAllowedForUserChatModes: false, isUncensoredModel: true
|
|
61
61
|
]
|
|
62
62
|
|
|
63
63
|
def __init__(
|
|
@@ -108,6 +108,7 @@ class YouChat(Provider):
|
|
|
108
108
|
"Content-Type": "text/plain;charset=UTF-8",
|
|
109
109
|
}
|
|
110
110
|
self.cookies = {
|
|
111
|
+
"uuid_guest": uuid4().hex,
|
|
111
112
|
"uuid_guest_backup": uuid4().hex,
|
|
112
113
|
"youchat_personalization": "true",
|
|
113
114
|
"youchat_smart_learn": "true",
|
|
@@ -188,9 +189,10 @@ class YouChat(Provider):
|
|
|
188
189
|
"queryTraceId": trace_id,
|
|
189
190
|
"chatId": trace_id,
|
|
190
191
|
"conversationTurnId": conversation_turn_id,
|
|
191
|
-
"pastChatLength": 0,
|
|
192
|
-
"selectedChatMode": "
|
|
193
|
-
"
|
|
192
|
+
"pastChatLength": len(self.conversation.history) if hasattr(self.conversation, "history") else 0,
|
|
193
|
+
"selectedChatMode": "custom",
|
|
194
|
+
"selectedAiModel": self.model,
|
|
195
|
+
# "enable_agent_clarification_questions": "true",
|
|
194
196
|
"traceId": f"{trace_id}|{conversation_turn_id}|{current_time}",
|
|
195
197
|
"use_nested_youchat_updates": "true"
|
|
196
198
|
}
|
|
@@ -217,29 +219,31 @@ class YouChat(Provider):
|
|
|
217
219
|
)
|
|
218
220
|
|
|
219
221
|
streaming_text = ""
|
|
220
|
-
|
|
221
|
-
|
|
222
|
+
# New SSE event-based parsing
|
|
223
|
+
event_type = None
|
|
222
224
|
for value in response.iter_lines(
|
|
223
225
|
decode_unicode=True,
|
|
224
226
|
chunk_size=self.stream_chunk_size,
|
|
225
227
|
delimiter="\n",
|
|
226
228
|
):
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
229
|
+
if not value:
|
|
230
|
+
continue
|
|
231
|
+
if value.startswith("event: "):
|
|
232
|
+
event_type = value[7:].strip()
|
|
233
|
+
continue
|
|
234
|
+
if value.startswith("data: "):
|
|
235
|
+
data_str = value[6:]
|
|
236
|
+
if event_type == "youChatToken":
|
|
237
|
+
try:
|
|
238
|
+
data = json.loads(data_str)
|
|
239
|
+
token = data.get("youChatToken", "")
|
|
240
|
+
if token:
|
|
241
|
+
streaming_text += token
|
|
242
|
+
yield token if raw else dict(text=token)
|
|
243
|
+
except Exception:
|
|
244
|
+
pass
|
|
245
|
+
# Reset event_type after processing
|
|
246
|
+
event_type = None
|
|
243
247
|
|
|
244
248
|
self.last_response.update(dict(text=streaming_text))
|
|
245
249
|
self.conversation.update_chat_history(
|
webscout/Provider/__init__.py
CHANGED
|
@@ -91,11 +91,16 @@ from .searchchat import *
|
|
|
91
91
|
from .ExaAI import ExaAI
|
|
92
92
|
from .OpenGPT import OpenGPT
|
|
93
93
|
from .scira_chat import *
|
|
94
|
-
from .
|
|
94
|
+
from .StandardInput import *
|
|
95
|
+
from .Writecream import Writecream
|
|
96
|
+
from .toolbaz import Toolbaz
|
|
97
|
+
from .scnet import SCNet
|
|
98
|
+
from .WritingMate import WritingMate
|
|
95
99
|
__all__ = [
|
|
96
100
|
'LLAMA',
|
|
97
|
-
'
|
|
101
|
+
'SCNet',
|
|
98
102
|
'SciraAI',
|
|
103
|
+
'StandardInputAI',
|
|
99
104
|
'LabyrinthAI',
|
|
100
105
|
'OpenGPT',
|
|
101
106
|
'C4ai',
|
|
@@ -109,6 +114,7 @@ __all__ = [
|
|
|
109
114
|
'PerplexityLabs',
|
|
110
115
|
'AkashGPT',
|
|
111
116
|
'DeepSeek',
|
|
117
|
+
'WritingMate',
|
|
112
118
|
'WiseCat',
|
|
113
119
|
'IBMGranite',
|
|
114
120
|
'QwenLM',
|
|
@@ -187,4 +193,6 @@ __all__ = [
|
|
|
187
193
|
'AskSteve',
|
|
188
194
|
'Aitopia',
|
|
189
195
|
'SearchChatAI',
|
|
196
|
+
'Writecream',
|
|
197
|
+
'Toolbaz'
|
|
190
198
|
]
|
webscout/Provider/koala.py
CHANGED
webscout/Provider/llmchatco.py
CHANGED
webscout/Provider/scira_chat.py
CHANGED
|
@@ -20,8 +20,11 @@ class SciraAI(Provider):
|
|
|
20
20
|
"scira-default": "Grok3",
|
|
21
21
|
"scira-grok-3-mini": "Grok3-mini", # thinking model
|
|
22
22
|
"scira-vision" : "Grok2-Vision", # vision model
|
|
23
|
-
"scira-
|
|
24
|
-
"scira-
|
|
23
|
+
"scira-4.1-mini": "GPT4.1-mini",
|
|
24
|
+
"scira-qwq": "QWQ-32B",
|
|
25
|
+
"scira-o4-mini": "o4-mini",
|
|
26
|
+
"scira-google": "gemini 2.5 flash"
|
|
27
|
+
|
|
25
28
|
|
|
26
29
|
}
|
|
27
30
|
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import secrets
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
|
|
10
|
+
class SCNet(Provider):
|
|
11
|
+
"""
|
|
12
|
+
Provider for SCNet chatbot API.
|
|
13
|
+
"""
|
|
14
|
+
AVAILABLE_MODELS = [
|
|
15
|
+
{"modelId": 2, "name": "Deepseek-r1-7B"},
|
|
16
|
+
{"modelId": 3, "name": "Deepseek-r1-32B"},
|
|
17
|
+
{"modelId": 5, "name": "Deepseek-r1-70B"},
|
|
18
|
+
{"modelId": 7, "name": "QWQ-32B"},
|
|
19
|
+
{"modelId": 8, "name": "minimax-text-01-456B"},
|
|
20
|
+
# Add more models here as needed
|
|
21
|
+
]
|
|
22
|
+
MODEL_NAME_TO_ID = {m["name"]: m["modelId"] for m in AVAILABLE_MODELS}
|
|
23
|
+
MODEL_ID_TO_NAME = {m["modelId"]: m["name"] for m in AVAILABLE_MODELS}
|
|
24
|
+
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
model: str = "QWQ-32B",
|
|
28
|
+
is_conversation: bool = True,
|
|
29
|
+
max_tokens: int = 2048,
|
|
30
|
+
timeout: int = 30,
|
|
31
|
+
intro: Optional[str] = None,
|
|
32
|
+
filepath: Optional[str] = None,
|
|
33
|
+
update_file: bool = True,
|
|
34
|
+
proxies: Optional[dict] = None,
|
|
35
|
+
history_offset: int = 0,
|
|
36
|
+
act: Optional[str] = None,
|
|
37
|
+
system_prompt: str = (
|
|
38
|
+
"You are a helpful, advanced LLM assistant. "
|
|
39
|
+
"You must always answer in English, regardless of the user's language. "
|
|
40
|
+
"If the user asks in another language, politely respond in English only. "
|
|
41
|
+
"Be clear, concise, and helpful."
|
|
42
|
+
),
|
|
43
|
+
):
|
|
44
|
+
if model not in self.MODEL_NAME_TO_ID:
|
|
45
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {list(self.MODEL_NAME_TO_ID.keys())}")
|
|
46
|
+
self.model = model
|
|
47
|
+
self.modelId = self.MODEL_NAME_TO_ID[model]
|
|
48
|
+
self.system_prompt = system_prompt
|
|
49
|
+
self.session = requests.Session()
|
|
50
|
+
self.is_conversation = is_conversation
|
|
51
|
+
self.max_tokens_to_sample = max_tokens
|
|
52
|
+
self.timeout = timeout
|
|
53
|
+
self.last_response: Dict[str, Any] = {}
|
|
54
|
+
self.proxies = proxies or {}
|
|
55
|
+
self.cookies = {
|
|
56
|
+
"Token": secrets.token_hex(16),
|
|
57
|
+
}
|
|
58
|
+
self.headers = {
|
|
59
|
+
"accept": "text/event-stream",
|
|
60
|
+
"content-type": "application/json",
|
|
61
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
|
|
62
|
+
"referer": "https://www.scnet.cn/ui/chatbot/temp_1744712663464",
|
|
63
|
+
"origin": "https://www.scnet.cn",
|
|
64
|
+
}
|
|
65
|
+
self.url = "https://www.scnet.cn/acx/chatbot/v1/chat/completion"
|
|
66
|
+
self.__available_optimizers = (
|
|
67
|
+
method for method in dir(Optimizers)
|
|
68
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
69
|
+
)
|
|
70
|
+
Conversation.intro = (
|
|
71
|
+
AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
|
|
72
|
+
if act
|
|
73
|
+
else intro or Conversation.intro
|
|
74
|
+
)
|
|
75
|
+
self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
|
|
76
|
+
self.conversation.history_offset = history_offset
|
|
77
|
+
|
|
78
|
+
def ask(
|
|
79
|
+
self,
|
|
80
|
+
prompt: str,
|
|
81
|
+
stream: bool = False,
|
|
82
|
+
raw: bool = False,
|
|
83
|
+
optimizer: Optional[str] = None,
|
|
84
|
+
conversationally: bool = False,
|
|
85
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
86
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
87
|
+
if optimizer:
|
|
88
|
+
if optimizer in self.__available_optimizers:
|
|
89
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
90
|
+
conversation_prompt if conversationally else prompt
|
|
91
|
+
)
|
|
92
|
+
else:
|
|
93
|
+
raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {list(self.__available_optimizers)}")
|
|
94
|
+
|
|
95
|
+
payload = {
|
|
96
|
+
"conversationId": "",
|
|
97
|
+
"content": f"SYSTEM: {self.system_prompt} USER: {conversation_prompt}",
|
|
98
|
+
"thinking": 0,
|
|
99
|
+
"online": 0,
|
|
100
|
+
"modelId": self.modelId,
|
|
101
|
+
"textFile": [],
|
|
102
|
+
"imageFile": [],
|
|
103
|
+
"clusterId": ""
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
def for_stream():
|
|
107
|
+
try:
|
|
108
|
+
with self.session.post(
|
|
109
|
+
self.url,
|
|
110
|
+
headers=self.headers,
|
|
111
|
+
cookies=self.cookies,
|
|
112
|
+
json=payload,
|
|
113
|
+
stream=True,
|
|
114
|
+
timeout=self.timeout,
|
|
115
|
+
proxies=self.proxies
|
|
116
|
+
) as resp:
|
|
117
|
+
streaming_text = ""
|
|
118
|
+
for line in resp.iter_lines(decode_unicode=True):
|
|
119
|
+
if line and line.startswith("data:"):
|
|
120
|
+
data = line[5:].strip()
|
|
121
|
+
if data and data != "[done]":
|
|
122
|
+
try:
|
|
123
|
+
obj = json.loads(data)
|
|
124
|
+
content = obj.get("content", "")
|
|
125
|
+
streaming_text += content
|
|
126
|
+
yield {"text": content} if raw else {"text": content}
|
|
127
|
+
except Exception:
|
|
128
|
+
continue
|
|
129
|
+
elif data == "[done]":
|
|
130
|
+
break
|
|
131
|
+
self.last_response = {"text": streaming_text}
|
|
132
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
133
|
+
except Exception as e:
|
|
134
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
135
|
+
|
|
136
|
+
def for_non_stream():
|
|
137
|
+
text = ""
|
|
138
|
+
for chunk in for_stream():
|
|
139
|
+
text += chunk["text"]
|
|
140
|
+
return {"text": text}
|
|
141
|
+
|
|
142
|
+
return for_stream() if stream else for_non_stream()
|
|
143
|
+
|
|
144
|
+
def chat(
|
|
145
|
+
self,
|
|
146
|
+
prompt: str,
|
|
147
|
+
stream: bool = False,
|
|
148
|
+
optimizer: Optional[str] = None,
|
|
149
|
+
conversationally: bool = False,
|
|
150
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
151
|
+
def for_stream():
|
|
152
|
+
for response in self.ask(
|
|
153
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
154
|
+
):
|
|
155
|
+
yield self.get_message(response)
|
|
156
|
+
def for_non_stream():
|
|
157
|
+
return self.get_message(
|
|
158
|
+
self.ask(
|
|
159
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
160
|
+
)
|
|
161
|
+
)
|
|
162
|
+
return for_stream() if stream else for_non_stream()
|
|
163
|
+
|
|
164
|
+
def get_message(self, response: dict) -> str:
|
|
165
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
166
|
+
return response["text"]
|
|
167
|
+
|
|
168
|
+
if __name__ == "__main__":
|
|
169
|
+
print("-" * 80)
|
|
170
|
+
print(f"{'ModelId':<10} {'Model':<30} {'Status':<10} {'Response'}")
|
|
171
|
+
print("-" * 80)
|
|
172
|
+
for model in SCNet.AVAILABLE_MODELS:
|
|
173
|
+
try:
|
|
174
|
+
test_ai = SCNet(model=model["name"], timeout=60)
|
|
175
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
176
|
+
response_text = ""
|
|
177
|
+
for chunk in response:
|
|
178
|
+
response_text += chunk
|
|
179
|
+
if response_text and len(response_text.strip()) > 0:
|
|
180
|
+
status = "✓"
|
|
181
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
182
|
+
else:
|
|
183
|
+
status = "✗"
|
|
184
|
+
display_text = "Empty or invalid response"
|
|
185
|
+
print(f"{model['modelId']:<10} {model['name']:<30} {status:<10} {display_text}")
|
|
186
|
+
except Exception as e:
|
|
187
|
+
print(f"{model['modelId']:<10} {model['name']:<30} {'✗':<10} {str(e)}")
|