webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +20 -0
- webscout/Litlogger/core/logger.py +123 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +50 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +174 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +231 -0
- webscout/Litlogger/styles/formats.py +377 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +91 -78
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +74 -59
- webscout/Provider/Groq.py +30 -18
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +191 -137
- webscout/Provider/Netwrck.py +62 -50
- webscout/Provider/PI.py +79 -124
- webscout/Provider/PizzaGPT.py +129 -83
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +74 -47
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +144 -136
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +67 -39
- webscout/Provider/gaurish.py +105 -66
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +60 -35
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -230
- webscout/Provider/typegpt.py +359 -356
- webscout/Provider/yep.py +5 -5
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1142 -1140
- webscout/webscout_search_async.py +635 -635
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
- webscout-7.2.dist-info/RECORD +217 -0
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout/Provider/RUBIKSAI.py +0 -272
- webscout-7.0.dist-info/RECORD +0 -199
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
webscout/Provider/chatglm.py
CHANGED
|
@@ -1,205 +1,205 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
from typing import Any, Dict, Optional, Generator, List, Union
|
|
4
|
-
import uuid
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts
|
|
9
|
-
from webscout.AIbase import Provider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class ChatGLM(Provider):
|
|
14
|
-
"""
|
|
15
|
-
A class to interact with the ChatGLM API.
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
def __init__(
|
|
19
|
-
self,
|
|
20
|
-
is_conversation: bool = True,
|
|
21
|
-
max_tokens: int = 600,
|
|
22
|
-
timeout: int = 30,
|
|
23
|
-
intro: str = None,
|
|
24
|
-
filepath: str = None,
|
|
25
|
-
update_file: bool = True,
|
|
26
|
-
proxies: dict = {},
|
|
27
|
-
history_offset: int = 10250,
|
|
28
|
-
act: str = None,
|
|
29
|
-
model: str = "all-tools-230b",
|
|
30
|
-
):
|
|
31
|
-
"""Initializes the ChatGLM API client."""
|
|
32
|
-
self.session = requests.Session()
|
|
33
|
-
self.is_conversation = is_conversation
|
|
34
|
-
self.max_tokens_to_sample = max_tokens
|
|
35
|
-
self.api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream"
|
|
36
|
-
self.stream_chunk_size = 64
|
|
37
|
-
self.timeout = timeout
|
|
38
|
-
self.last_response = {}
|
|
39
|
-
self.model = model
|
|
40
|
-
self.headers = {
|
|
41
|
-
'Accept-Language': 'en-US,en;q=0.9',
|
|
42
|
-
'App-Name': 'chatglm',
|
|
43
|
-
'Authorization': 'undefined',
|
|
44
|
-
'Content-Type': 'application/json',
|
|
45
|
-
'Origin': 'https://chatglm.cn',
|
|
46
|
-
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
|
|
47
|
-
'X-App-Platform': 'pc',
|
|
48
|
-
'X-App-Version': '0.0.1',
|
|
49
|
-
'X-Device-Id': '', #Will be generated each time
|
|
50
|
-
'Accept': 'text/event-stream',
|
|
51
|
-
}
|
|
52
|
-
self.__available_optimizers = (
|
|
53
|
-
method
|
|
54
|
-
for method in dir(Optimizers)
|
|
55
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
56
|
-
)
|
|
57
|
-
self.session.headers.update(self.headers)
|
|
58
|
-
Conversation.intro = (
|
|
59
|
-
AwesomePrompts().get_act(
|
|
60
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
61
|
-
)
|
|
62
|
-
if act
|
|
63
|
-
else intro or Conversation.intro
|
|
64
|
-
)
|
|
65
|
-
self.conversation = Conversation(
|
|
66
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
67
|
-
)
|
|
68
|
-
self.conversation.history_offset = history_offset
|
|
69
|
-
self.session.proxies = proxies
|
|
70
|
-
|
|
71
|
-
def ask(
|
|
72
|
-
self,
|
|
73
|
-
prompt: str,
|
|
74
|
-
stream: bool = False,
|
|
75
|
-
raw: bool = False,
|
|
76
|
-
optimizer: str = None,
|
|
77
|
-
conversationally: bool = False,
|
|
78
|
-
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
79
|
-
"""Chat with AI
|
|
80
|
-
Args:
|
|
81
|
-
prompt (str): Prompt to be sent.
|
|
82
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
83
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
84
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
85
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
86
|
-
Returns:
|
|
87
|
-
Union[Dict, Generator[Dict, None, None]]: Response generated
|
|
88
|
-
"""
|
|
89
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
90
|
-
if optimizer:
|
|
91
|
-
if optimizer in self.__available_optimizers:
|
|
92
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
93
|
-
conversation_prompt if conversationally else prompt
|
|
94
|
-
)
|
|
95
|
-
else:
|
|
96
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
97
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
98
|
-
)
|
|
99
|
-
device_id = str(uuid.uuid4()).replace('-', '')
|
|
100
|
-
self.session.headers.update({'X-Device-Id': device_id})
|
|
101
|
-
payload = {
|
|
102
|
-
"assistant_id": "65940acff94777010aa6b796",
|
|
103
|
-
"conversation_id": "",
|
|
104
|
-
"meta_data": {
|
|
105
|
-
"if_plus_model": False,
|
|
106
|
-
"is_test": False,
|
|
107
|
-
"input_question_type": "xxxx",
|
|
108
|
-
"channel": "",
|
|
109
|
-
"draft_id": "",
|
|
110
|
-
"quote_log_id": "",
|
|
111
|
-
"platform": "pc",
|
|
112
|
-
},
|
|
113
|
-
"messages": [
|
|
114
|
-
{
|
|
115
|
-
"role": "user",
|
|
116
|
-
"content": [{"type": "text", "text": conversation_prompt}],
|
|
117
|
-
}
|
|
118
|
-
],
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
def for_stream():
|
|
122
|
-
try:
|
|
123
|
-
with self.session.post(
|
|
124
|
-
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
125
|
-
) as response:
|
|
126
|
-
response.raise_for_status()
|
|
127
|
-
|
|
128
|
-
streaming_text = ""
|
|
129
|
-
last_processed_content = "" # Track the last processed content
|
|
130
|
-
for chunk in response.iter_lines():
|
|
131
|
-
if chunk:
|
|
132
|
-
decoded_chunk = chunk.decode('utf-8')
|
|
133
|
-
if decoded_chunk.startswith('data: '):
|
|
134
|
-
try:
|
|
135
|
-
json_data = json.loads(decoded_chunk[6:])
|
|
136
|
-
parts = json_data.get('parts', [])
|
|
137
|
-
if parts:
|
|
138
|
-
content = parts[0].get('content', [])
|
|
139
|
-
if content:
|
|
140
|
-
text = content[0].get('text', '')
|
|
141
|
-
new_text = text[len(last_processed_content):]
|
|
142
|
-
if new_text: # Check for new content
|
|
143
|
-
streaming_text += new_text
|
|
144
|
-
last_processed_content = text
|
|
145
|
-
yield new_text if raw else dict(text=new_text)
|
|
146
|
-
except json.JSONDecodeError:
|
|
147
|
-
continue
|
|
148
|
-
|
|
149
|
-
self.last_response.update(dict(text=streaming_text))
|
|
150
|
-
self.conversation.update_chat_history(
|
|
151
|
-
prompt, self.get_message(self.last_response)
|
|
152
|
-
)
|
|
153
|
-
|
|
154
|
-
except requests.exceptions.RequestException as e:
|
|
155
|
-
raise exceptions.ProviderConnectionError(f"Request failed: {e}")
|
|
156
|
-
except json.JSONDecodeError as e:
|
|
157
|
-
raise exceptions.InvalidResponseError(f"Failed to decode JSON: {e}")
|
|
158
|
-
except Exception as e:
|
|
159
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
|
|
160
|
-
|
|
161
|
-
def for_non_stream():
|
|
162
|
-
for _ in for_stream():
|
|
163
|
-
pass
|
|
164
|
-
return self.last_response
|
|
165
|
-
return for_stream() if stream else for_non_stream()
|
|
166
|
-
|
|
167
|
-
def chat(
|
|
168
|
-
self,
|
|
169
|
-
prompt: str,
|
|
170
|
-
stream: bool = False,
|
|
171
|
-
optimizer: str = None,
|
|
172
|
-
conversationally: bool = False,
|
|
173
|
-
) -> str | Generator[str, None, None]:
|
|
174
|
-
"""Generate response `str`"""
|
|
175
|
-
|
|
176
|
-
def for_stream():
|
|
177
|
-
for response in self.ask(
|
|
178
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
179
|
-
):
|
|
180
|
-
yield self.get_message(response)
|
|
181
|
-
|
|
182
|
-
def for_non_stream():
|
|
183
|
-
return self.get_message(
|
|
184
|
-
self.ask(
|
|
185
|
-
prompt,
|
|
186
|
-
False,
|
|
187
|
-
optimizer=optimizer,
|
|
188
|
-
conversationally=conversationally,
|
|
189
|
-
)
|
|
190
|
-
)
|
|
191
|
-
|
|
192
|
-
return for_stream() if stream else for_non_stream()
|
|
193
|
-
|
|
194
|
-
def get_message(self, response: dict) -> str:
|
|
195
|
-
"""Retrieves message only from response"""
|
|
196
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
197
|
-
return response["text"]
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
if __name__ == "__main__":
|
|
201
|
-
from rich import print
|
|
202
|
-
ai = ChatGLM()
|
|
203
|
-
response = ai.chat(input(">>> "), stream=True)
|
|
204
|
-
for chunk in response:
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Optional, Generator, List, Union
|
|
4
|
+
import uuid
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ChatGLM(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the ChatGLM API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
is_conversation: bool = True,
|
|
21
|
+
max_tokens: int = 600,
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
intro: str = None,
|
|
24
|
+
filepath: str = None,
|
|
25
|
+
update_file: bool = True,
|
|
26
|
+
proxies: dict = {},
|
|
27
|
+
history_offset: int = 10250,
|
|
28
|
+
act: str = None,
|
|
29
|
+
model: str = "all-tools-230b",
|
|
30
|
+
):
|
|
31
|
+
"""Initializes the ChatGLM API client."""
|
|
32
|
+
self.session = requests.Session()
|
|
33
|
+
self.is_conversation = is_conversation
|
|
34
|
+
self.max_tokens_to_sample = max_tokens
|
|
35
|
+
self.api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream"
|
|
36
|
+
self.stream_chunk_size = 64
|
|
37
|
+
self.timeout = timeout
|
|
38
|
+
self.last_response = {}
|
|
39
|
+
self.model = model
|
|
40
|
+
self.headers = {
|
|
41
|
+
'Accept-Language': 'en-US,en;q=0.9',
|
|
42
|
+
'App-Name': 'chatglm',
|
|
43
|
+
'Authorization': 'undefined',
|
|
44
|
+
'Content-Type': 'application/json',
|
|
45
|
+
'Origin': 'https://chatglm.cn',
|
|
46
|
+
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
|
|
47
|
+
'X-App-Platform': 'pc',
|
|
48
|
+
'X-App-Version': '0.0.1',
|
|
49
|
+
'X-Device-Id': '', #Will be generated each time
|
|
50
|
+
'Accept': 'text/event-stream',
|
|
51
|
+
}
|
|
52
|
+
self.__available_optimizers = (
|
|
53
|
+
method
|
|
54
|
+
for method in dir(Optimizers)
|
|
55
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
56
|
+
)
|
|
57
|
+
self.session.headers.update(self.headers)
|
|
58
|
+
Conversation.intro = (
|
|
59
|
+
AwesomePrompts().get_act(
|
|
60
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
61
|
+
)
|
|
62
|
+
if act
|
|
63
|
+
else intro or Conversation.intro
|
|
64
|
+
)
|
|
65
|
+
self.conversation = Conversation(
|
|
66
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
67
|
+
)
|
|
68
|
+
self.conversation.history_offset = history_offset
|
|
69
|
+
self.session.proxies = proxies
|
|
70
|
+
|
|
71
|
+
def ask(
|
|
72
|
+
self,
|
|
73
|
+
prompt: str,
|
|
74
|
+
stream: bool = False,
|
|
75
|
+
raw: bool = False,
|
|
76
|
+
optimizer: str = None,
|
|
77
|
+
conversationally: bool = False,
|
|
78
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
79
|
+
"""Chat with AI
|
|
80
|
+
Args:
|
|
81
|
+
prompt (str): Prompt to be sent.
|
|
82
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
83
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
84
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
85
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
86
|
+
Returns:
|
|
87
|
+
Union[Dict, Generator[Dict, None, None]]: Response generated
|
|
88
|
+
"""
|
|
89
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
90
|
+
if optimizer:
|
|
91
|
+
if optimizer in self.__available_optimizers:
|
|
92
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
93
|
+
conversation_prompt if conversationally else prompt
|
|
94
|
+
)
|
|
95
|
+
else:
|
|
96
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
97
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
98
|
+
)
|
|
99
|
+
device_id = str(uuid.uuid4()).replace('-', '')
|
|
100
|
+
self.session.headers.update({'X-Device-Id': device_id})
|
|
101
|
+
payload = {
|
|
102
|
+
"assistant_id": "65940acff94777010aa6b796",
|
|
103
|
+
"conversation_id": "",
|
|
104
|
+
"meta_data": {
|
|
105
|
+
"if_plus_model": False,
|
|
106
|
+
"is_test": False,
|
|
107
|
+
"input_question_type": "xxxx",
|
|
108
|
+
"channel": "",
|
|
109
|
+
"draft_id": "",
|
|
110
|
+
"quote_log_id": "",
|
|
111
|
+
"platform": "pc",
|
|
112
|
+
},
|
|
113
|
+
"messages": [
|
|
114
|
+
{
|
|
115
|
+
"role": "user",
|
|
116
|
+
"content": [{"type": "text", "text": conversation_prompt}],
|
|
117
|
+
}
|
|
118
|
+
],
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
def for_stream():
|
|
122
|
+
try:
|
|
123
|
+
with self.session.post(
|
|
124
|
+
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
125
|
+
) as response:
|
|
126
|
+
response.raise_for_status()
|
|
127
|
+
|
|
128
|
+
streaming_text = ""
|
|
129
|
+
last_processed_content = "" # Track the last processed content
|
|
130
|
+
for chunk in response.iter_lines():
|
|
131
|
+
if chunk:
|
|
132
|
+
decoded_chunk = chunk.decode('utf-8')
|
|
133
|
+
if decoded_chunk.startswith('data: '):
|
|
134
|
+
try:
|
|
135
|
+
json_data = json.loads(decoded_chunk[6:])
|
|
136
|
+
parts = json_data.get('parts', [])
|
|
137
|
+
if parts:
|
|
138
|
+
content = parts[0].get('content', [])
|
|
139
|
+
if content:
|
|
140
|
+
text = content[0].get('text', '')
|
|
141
|
+
new_text = text[len(last_processed_content):]
|
|
142
|
+
if new_text: # Check for new content
|
|
143
|
+
streaming_text += new_text
|
|
144
|
+
last_processed_content = text
|
|
145
|
+
yield new_text if raw else dict(text=new_text)
|
|
146
|
+
except json.JSONDecodeError:
|
|
147
|
+
continue
|
|
148
|
+
|
|
149
|
+
self.last_response.update(dict(text=streaming_text))
|
|
150
|
+
self.conversation.update_chat_history(
|
|
151
|
+
prompt, self.get_message(self.last_response)
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
except requests.exceptions.RequestException as e:
|
|
155
|
+
raise exceptions.ProviderConnectionError(f"Request failed: {e}")
|
|
156
|
+
except json.JSONDecodeError as e:
|
|
157
|
+
raise exceptions.InvalidResponseError(f"Failed to decode JSON: {e}")
|
|
158
|
+
except Exception as e:
|
|
159
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
|
|
160
|
+
|
|
161
|
+
def for_non_stream():
|
|
162
|
+
for _ in for_stream():
|
|
163
|
+
pass
|
|
164
|
+
return self.last_response
|
|
165
|
+
return for_stream() if stream else for_non_stream()
|
|
166
|
+
|
|
167
|
+
def chat(
|
|
168
|
+
self,
|
|
169
|
+
prompt: str,
|
|
170
|
+
stream: bool = False,
|
|
171
|
+
optimizer: str = None,
|
|
172
|
+
conversationally: bool = False,
|
|
173
|
+
) -> str | Generator[str, None, None]:
|
|
174
|
+
"""Generate response `str`"""
|
|
175
|
+
|
|
176
|
+
def for_stream():
|
|
177
|
+
for response in self.ask(
|
|
178
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
179
|
+
):
|
|
180
|
+
yield self.get_message(response)
|
|
181
|
+
|
|
182
|
+
def for_non_stream():
|
|
183
|
+
return self.get_message(
|
|
184
|
+
self.ask(
|
|
185
|
+
prompt,
|
|
186
|
+
False,
|
|
187
|
+
optimizer=optimizer,
|
|
188
|
+
conversationally=conversationally,
|
|
189
|
+
)
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
return for_stream() if stream else for_non_stream()
|
|
193
|
+
|
|
194
|
+
def get_message(self, response: dict) -> str:
|
|
195
|
+
"""Retrieves message only from response"""
|
|
196
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
197
|
+
return response["text"]
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
if __name__ == "__main__":
|
|
201
|
+
from rich import print
|
|
202
|
+
ai = ChatGLM()
|
|
203
|
+
response = ai.chat(input(">>> "), stream=True)
|
|
204
|
+
for chunk in response:
|
|
205
205
|
print(chunk, end="", flush=True)
|
webscout/Provider/dgaf.py
CHANGED
|
@@ -8,10 +8,12 @@ from webscout.AIutel import Conversation
|
|
|
8
8
|
from webscout.AIutel import AwesomePrompts
|
|
9
9
|
from webscout.AIbase import Provider
|
|
10
10
|
from webscout import exceptions
|
|
11
|
+
from webscout import LitAgent
|
|
12
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
11
13
|
|
|
12
14
|
class DGAFAI(Provider):
|
|
13
15
|
"""
|
|
14
|
-
A class to interact with the DGAF.ai API.
|
|
16
|
+
A class to interact with the DGAF.ai API with logging capabilities.
|
|
15
17
|
"""
|
|
16
18
|
|
|
17
19
|
def __init__(
|
|
@@ -25,10 +27,10 @@ class DGAFAI(Provider):
|
|
|
25
27
|
proxies: dict = {},
|
|
26
28
|
history_offset: int = 10250,
|
|
27
29
|
act: str = None,
|
|
28
|
-
system_prompt: str = "You are a helpful AI assistant.",
|
|
29
|
-
|
|
30
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
31
|
+
logging: bool = False
|
|
30
32
|
):
|
|
31
|
-
"""Initializes the DGAFAI API client."""
|
|
33
|
+
"""Initializes the DGAFAI API client with logging support."""
|
|
32
34
|
self.session = requests.Session()
|
|
33
35
|
self.is_conversation = is_conversation
|
|
34
36
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -37,6 +39,7 @@ class DGAFAI(Provider):
|
|
|
37
39
|
self.timeout = timeout
|
|
38
40
|
self.last_response = {}
|
|
39
41
|
self.system_prompt = system_prompt
|
|
42
|
+
|
|
40
43
|
self.headers = {
|
|
41
44
|
"accept": "*/*",
|
|
42
45
|
"accept-encoding": "gzip, deflate, br, zstd",
|
|
@@ -52,17 +55,13 @@ class DGAFAI(Provider):
|
|
|
52
55
|
"sec-fetch-dest": "empty",
|
|
53
56
|
"sec-fetch-mode": "cors",
|
|
54
57
|
"sec-fetch-site": "same-origin",
|
|
55
|
-
"user-agent": (
|
|
56
|
-
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
57
|
-
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
58
|
-
"Chrome/132.0.0.0 Safari/537.36 Edg/132.0.0.0"
|
|
59
|
-
),
|
|
58
|
+
"user-agent": LitAgent().random(),
|
|
60
59
|
}
|
|
61
60
|
self.session.headers.update(self.headers)
|
|
62
61
|
self.session.proxies = proxies
|
|
62
|
+
|
|
63
63
|
self.__available_optimizers = (
|
|
64
|
-
method
|
|
65
|
-
for method in dir(Optimizers)
|
|
64
|
+
method for method in dir(Optimizers)
|
|
66
65
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
67
66
|
)
|
|
68
67
|
Conversation.intro = (
|
|
@@ -77,6 +76,15 @@ class DGAFAI(Provider):
|
|
|
77
76
|
)
|
|
78
77
|
self.conversation.history_offset = history_offset
|
|
79
78
|
|
|
79
|
+
# Initialize logger if enabled
|
|
80
|
+
self.logger = Logger(
|
|
81
|
+
name="DGAFAI",
|
|
82
|
+
format=LogFormat.MODERN_EMOJI,
|
|
83
|
+
) if logging else None
|
|
84
|
+
|
|
85
|
+
if self.logger:
|
|
86
|
+
self.logger.info("DGAFAI initialized successfully")
|
|
87
|
+
|
|
80
88
|
def ask(
|
|
81
89
|
self,
|
|
82
90
|
prompt: str,
|
|
@@ -85,39 +93,49 @@ class DGAFAI(Provider):
|
|
|
85
93
|
optimizer: str = None,
|
|
86
94
|
conversationally: bool = False,
|
|
87
95
|
) -> Dict[str, Any] | Generator[str, None, None]:
|
|
88
|
-
"""Chat with AI
|
|
96
|
+
"""Chat with AI.
|
|
97
|
+
|
|
89
98
|
Args:
|
|
90
|
-
prompt (str): Prompt to be
|
|
99
|
+
prompt (str): Prompt to be sent.
|
|
91
100
|
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
92
|
-
raw (bool, optional):
|
|
93
|
-
optimizer (str, optional): Prompt optimizer name
|
|
101
|
+
raw (bool, optional): Return raw streaming response as received. Defaults to False.
|
|
102
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
94
103
|
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
95
104
|
Returns:
|
|
96
|
-
Union[Dict, Generator[Dict, None, None]]:
|
|
105
|
+
Union[Dict, Generator[Dict, None, None]]: Generated response.
|
|
97
106
|
"""
|
|
107
|
+
if self.logger:
|
|
108
|
+
self.logger.debug(f"Processing ask call with prompt: {prompt[:50]}...")
|
|
98
109
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
99
110
|
if optimizer:
|
|
100
111
|
if optimizer in self.__available_optimizers:
|
|
101
112
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
102
113
|
conversation_prompt if conversationally else prompt
|
|
103
114
|
)
|
|
115
|
+
if self.logger:
|
|
116
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
104
117
|
else:
|
|
118
|
+
if self.logger:
|
|
119
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
105
120
|
raise Exception(
|
|
106
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
121
|
+
f"Optimizer is not one of {list(self.__available_optimizers)}"
|
|
107
122
|
)
|
|
108
123
|
|
|
109
124
|
payload = {
|
|
110
125
|
"messages": [
|
|
111
|
-
|
|
126
|
+
{"role": "system", "content": self.system_prompt},
|
|
112
127
|
{"role": "user", "content": conversation_prompt}
|
|
113
128
|
]
|
|
114
129
|
}
|
|
115
130
|
|
|
116
131
|
def for_stream():
|
|
132
|
+
if self.logger:
|
|
133
|
+
self.logger.debug("Sending streaming request to DGAF.ai API...")
|
|
117
134
|
try:
|
|
118
135
|
with self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
|
|
119
|
-
response.raise_for_status()
|
|
120
|
-
|
|
136
|
+
response.raise_for_status() # Check for HTTP errors
|
|
137
|
+
if self.logger:
|
|
138
|
+
self.logger.debug(response.text)
|
|
121
139
|
streaming_text = ""
|
|
122
140
|
for line in response.iter_lines(decode_unicode=True):
|
|
123
141
|
if line:
|
|
@@ -126,14 +144,18 @@ class DGAFAI(Provider):
|
|
|
126
144
|
content = match.group(1)
|
|
127
145
|
if content:
|
|
128
146
|
streaming_text += content
|
|
147
|
+
# if self.logger:
|
|
148
|
+
# self.logger.debug(f"Received content: {content[:30]}...")
|
|
129
149
|
yield content if raw else dict(text=content)
|
|
130
|
-
|
|
131
150
|
self.last_response.update(dict(text=streaming_text))
|
|
132
151
|
self.conversation.update_chat_history(
|
|
133
152
|
prompt, self.get_message(self.last_response)
|
|
134
153
|
)
|
|
135
|
-
|
|
154
|
+
if self.logger:
|
|
155
|
+
self.logger.info("Streaming response completed successfully")
|
|
136
156
|
except requests.exceptions.RequestException as e:
|
|
157
|
+
if self.logger:
|
|
158
|
+
self.logger.error(f"Request error: {e}")
|
|
137
159
|
raise exceptions.ProviderConnectionError(f"Request failed: {e}")
|
|
138
160
|
|
|
139
161
|
def for_non_stream():
|
|
@@ -151,36 +173,42 @@ class DGAFAI(Provider):
|
|
|
151
173
|
optimizer: str = None,
|
|
152
174
|
conversationally: bool = False,
|
|
153
175
|
) -> str | Generator[str, None, None]:
|
|
154
|
-
"""Generate response
|
|
176
|
+
"""Generate chat response as a string.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
prompt (str): Prompt to be sent.
|
|
180
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
181
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
182
|
+
conversationally (bool, optional): Use conversational mode when using optimizer. Defaults to False.
|
|
183
|
+
Returns:
|
|
184
|
+
str or Generator[str, None, None]: Generated response.
|
|
185
|
+
"""
|
|
186
|
+
if self.logger:
|
|
187
|
+
self.logger.debug(f"Chat method invoked with prompt: {prompt[:50]}...")
|
|
155
188
|
def for_stream():
|
|
156
|
-
for response in self.ask(
|
|
157
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
158
|
-
):
|
|
189
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
159
190
|
yield self.get_message(response)
|
|
160
191
|
def for_non_stream():
|
|
161
192
|
return self.get_message(
|
|
162
|
-
self.ask(
|
|
163
|
-
prompt,
|
|
164
|
-
False,
|
|
165
|
-
optimizer=optimizer,
|
|
166
|
-
conversationally=conversationally,
|
|
167
|
-
)
|
|
193
|
+
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
168
194
|
)
|
|
169
195
|
return for_stream() if stream else for_non_stream()
|
|
170
196
|
|
|
171
197
|
def get_message(self, response: dict) -> str:
|
|
172
|
-
"""Retrieves message only from response
|
|
198
|
+
"""Retrieves message only from response.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
response (dict): Response from the ask method.
|
|
202
|
+
Returns:
|
|
203
|
+
str: Extracted message.
|
|
204
|
+
"""
|
|
173
205
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
174
206
|
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
175
207
|
|
|
176
|
-
# @staticmethod
|
|
177
|
-
# def clean_content(text: str) -> str:
|
|
178
|
-
# cleaned_text = re.sub(r'\[REF\]\(https?://[^\s]*\)', '', text)
|
|
179
|
-
# return cleaned_text
|
|
180
208
|
|
|
181
209
|
if __name__ == "__main__":
|
|
182
210
|
from rich import print
|
|
183
|
-
ai = DGAFAI()
|
|
211
|
+
ai = DGAFAI(logging=False)
|
|
184
212
|
response = ai.chat("write a poem about AI", stream=True)
|
|
185
213
|
for chunk in response:
|
|
186
|
-
print(chunk, end="", flush=True)
|
|
214
|
+
print(chunk, end="", flush=True)
|