webscout 7.1__py3-none-any.whl → 7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +166 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +460 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/AISEARCH/ISou.py +277 -0
- webscout/Provider/AISEARCH/__init__.py +2 -1
- webscout/Provider/Blackboxai.py +3 -3
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +3 -4
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +40 -24
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/Groq.py +5 -1
- webscout/Provider/Jadve.py +3 -3
- webscout/Provider/Marcus.py +191 -192
- webscout/Provider/Netwrck.py +3 -3
- webscout/Provider/PI.py +2 -2
- webscout/Provider/PizzaGPT.py +2 -3
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +28 -8
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +146 -134
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +2 -3
- webscout/Provider/freeaichat.py +221 -0
- webscout/Provider/gaurish.py +2 -3
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +3 -3
- webscout/Provider/llmchat.py +2 -3
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -331
- webscout/Provider/typegpt.py +359 -359
- webscout/Provider/yep.py +3 -3
- webscout/__init__.py +1 -0
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +87 -6
- webscout/webscout_search_async.py +58 -1
- webscout/yep_search.py +297 -0
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/METADATA +62 -22
- webscout-7.3.dist-info/RECORD +223 -0
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/WHEEL +1 -1
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout-7.1.dist-info/RECORD +0 -198
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/LICENSE.md +0 -0
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/entry_points.txt +0 -0
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/top_level.txt +0 -0
webscout/Provider/chatglm.py
CHANGED
|
@@ -1,205 +1,205 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
from typing import Any, Dict, Optional, Generator, List, Union
|
|
4
|
-
import uuid
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts
|
|
9
|
-
from webscout.AIbase import Provider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class ChatGLM(Provider):
|
|
14
|
-
"""
|
|
15
|
-
A class to interact with the ChatGLM API.
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
def __init__(
|
|
19
|
-
self,
|
|
20
|
-
is_conversation: bool = True,
|
|
21
|
-
max_tokens: int = 600,
|
|
22
|
-
timeout: int = 30,
|
|
23
|
-
intro: str = None,
|
|
24
|
-
filepath: str = None,
|
|
25
|
-
update_file: bool = True,
|
|
26
|
-
proxies: dict = {},
|
|
27
|
-
history_offset: int = 10250,
|
|
28
|
-
act: str = None,
|
|
29
|
-
model: str = "all-tools-230b",
|
|
30
|
-
):
|
|
31
|
-
"""Initializes the ChatGLM API client."""
|
|
32
|
-
self.session = requests.Session()
|
|
33
|
-
self.is_conversation = is_conversation
|
|
34
|
-
self.max_tokens_to_sample = max_tokens
|
|
35
|
-
self.api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream"
|
|
36
|
-
self.stream_chunk_size = 64
|
|
37
|
-
self.timeout = timeout
|
|
38
|
-
self.last_response = {}
|
|
39
|
-
self.model = model
|
|
40
|
-
self.headers = {
|
|
41
|
-
'Accept-Language': 'en-US,en;q=0.9',
|
|
42
|
-
'App-Name': 'chatglm',
|
|
43
|
-
'Authorization': 'undefined',
|
|
44
|
-
'Content-Type': 'application/json',
|
|
45
|
-
'Origin': 'https://chatglm.cn',
|
|
46
|
-
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
|
|
47
|
-
'X-App-Platform': 'pc',
|
|
48
|
-
'X-App-Version': '0.0.1',
|
|
49
|
-
'X-Device-Id': '', #Will be generated each time
|
|
50
|
-
'Accept': 'text/event-stream',
|
|
51
|
-
}
|
|
52
|
-
self.__available_optimizers = (
|
|
53
|
-
method
|
|
54
|
-
for method in dir(Optimizers)
|
|
55
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
56
|
-
)
|
|
57
|
-
self.session.headers.update(self.headers)
|
|
58
|
-
Conversation.intro = (
|
|
59
|
-
AwesomePrompts().get_act(
|
|
60
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
61
|
-
)
|
|
62
|
-
if act
|
|
63
|
-
else intro or Conversation.intro
|
|
64
|
-
)
|
|
65
|
-
self.conversation = Conversation(
|
|
66
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
67
|
-
)
|
|
68
|
-
self.conversation.history_offset = history_offset
|
|
69
|
-
self.session.proxies = proxies
|
|
70
|
-
|
|
71
|
-
def ask(
|
|
72
|
-
self,
|
|
73
|
-
prompt: str,
|
|
74
|
-
stream: bool = False,
|
|
75
|
-
raw: bool = False,
|
|
76
|
-
optimizer: str = None,
|
|
77
|
-
conversationally: bool = False,
|
|
78
|
-
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
79
|
-
"""Chat with AI
|
|
80
|
-
Args:
|
|
81
|
-
prompt (str): Prompt to be sent.
|
|
82
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
83
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
84
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
85
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
86
|
-
Returns:
|
|
87
|
-
Union[Dict, Generator[Dict, None, None]]: Response generated
|
|
88
|
-
"""
|
|
89
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
90
|
-
if optimizer:
|
|
91
|
-
if optimizer in self.__available_optimizers:
|
|
92
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
93
|
-
conversation_prompt if conversationally else prompt
|
|
94
|
-
)
|
|
95
|
-
else:
|
|
96
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
97
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
98
|
-
)
|
|
99
|
-
device_id = str(uuid.uuid4()).replace('-', '')
|
|
100
|
-
self.session.headers.update({'X-Device-Id': device_id})
|
|
101
|
-
payload = {
|
|
102
|
-
"assistant_id": "65940acff94777010aa6b796",
|
|
103
|
-
"conversation_id": "",
|
|
104
|
-
"meta_data": {
|
|
105
|
-
"if_plus_model": False,
|
|
106
|
-
"is_test": False,
|
|
107
|
-
"input_question_type": "xxxx",
|
|
108
|
-
"channel": "",
|
|
109
|
-
"draft_id": "",
|
|
110
|
-
"quote_log_id": "",
|
|
111
|
-
"platform": "pc",
|
|
112
|
-
},
|
|
113
|
-
"messages": [
|
|
114
|
-
{
|
|
115
|
-
"role": "user",
|
|
116
|
-
"content": [{"type": "text", "text": conversation_prompt}],
|
|
117
|
-
}
|
|
118
|
-
],
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
def for_stream():
|
|
122
|
-
try:
|
|
123
|
-
with self.session.post(
|
|
124
|
-
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
125
|
-
) as response:
|
|
126
|
-
response.raise_for_status()
|
|
127
|
-
|
|
128
|
-
streaming_text = ""
|
|
129
|
-
last_processed_content = "" # Track the last processed content
|
|
130
|
-
for chunk in response.iter_lines():
|
|
131
|
-
if chunk:
|
|
132
|
-
decoded_chunk = chunk.decode('utf-8')
|
|
133
|
-
if decoded_chunk.startswith('data: '):
|
|
134
|
-
try:
|
|
135
|
-
json_data = json.loads(decoded_chunk[6:])
|
|
136
|
-
parts = json_data.get('parts', [])
|
|
137
|
-
if parts:
|
|
138
|
-
content = parts[0].get('content', [])
|
|
139
|
-
if content:
|
|
140
|
-
text = content[0].get('text', '')
|
|
141
|
-
new_text = text[len(last_processed_content):]
|
|
142
|
-
if new_text: # Check for new content
|
|
143
|
-
streaming_text += new_text
|
|
144
|
-
last_processed_content = text
|
|
145
|
-
yield new_text if raw else dict(text=new_text)
|
|
146
|
-
except json.JSONDecodeError:
|
|
147
|
-
continue
|
|
148
|
-
|
|
149
|
-
self.last_response.update(dict(text=streaming_text))
|
|
150
|
-
self.conversation.update_chat_history(
|
|
151
|
-
prompt, self.get_message(self.last_response)
|
|
152
|
-
)
|
|
153
|
-
|
|
154
|
-
except requests.exceptions.RequestException as e:
|
|
155
|
-
raise exceptions.ProviderConnectionError(f"Request failed: {e}")
|
|
156
|
-
except json.JSONDecodeError as e:
|
|
157
|
-
raise exceptions.InvalidResponseError(f"Failed to decode JSON: {e}")
|
|
158
|
-
except Exception as e:
|
|
159
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
|
|
160
|
-
|
|
161
|
-
def for_non_stream():
|
|
162
|
-
for _ in for_stream():
|
|
163
|
-
pass
|
|
164
|
-
return self.last_response
|
|
165
|
-
return for_stream() if stream else for_non_stream()
|
|
166
|
-
|
|
167
|
-
def chat(
|
|
168
|
-
self,
|
|
169
|
-
prompt: str,
|
|
170
|
-
stream: bool = False,
|
|
171
|
-
optimizer: str = None,
|
|
172
|
-
conversationally: bool = False,
|
|
173
|
-
) -> str | Generator[str, None, None]:
|
|
174
|
-
"""Generate response `str`"""
|
|
175
|
-
|
|
176
|
-
def for_stream():
|
|
177
|
-
for response in self.ask(
|
|
178
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
179
|
-
):
|
|
180
|
-
yield self.get_message(response)
|
|
181
|
-
|
|
182
|
-
def for_non_stream():
|
|
183
|
-
return self.get_message(
|
|
184
|
-
self.ask(
|
|
185
|
-
prompt,
|
|
186
|
-
False,
|
|
187
|
-
optimizer=optimizer,
|
|
188
|
-
conversationally=conversationally,
|
|
189
|
-
)
|
|
190
|
-
)
|
|
191
|
-
|
|
192
|
-
return for_stream() if stream else for_non_stream()
|
|
193
|
-
|
|
194
|
-
def get_message(self, response: dict) -> str:
|
|
195
|
-
"""Retrieves message only from response"""
|
|
196
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
197
|
-
return response["text"]
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
if __name__ == "__main__":
|
|
201
|
-
from rich import print
|
|
202
|
-
ai = ChatGLM()
|
|
203
|
-
response = ai.chat(input(">>> "), stream=True)
|
|
204
|
-
for chunk in response:
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Optional, Generator, List, Union
|
|
4
|
+
import uuid
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ChatGLM(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the ChatGLM API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
is_conversation: bool = True,
|
|
21
|
+
max_tokens: int = 600,
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
intro: str = None,
|
|
24
|
+
filepath: str = None,
|
|
25
|
+
update_file: bool = True,
|
|
26
|
+
proxies: dict = {},
|
|
27
|
+
history_offset: int = 10250,
|
|
28
|
+
act: str = None,
|
|
29
|
+
model: str = "all-tools-230b",
|
|
30
|
+
):
|
|
31
|
+
"""Initializes the ChatGLM API client."""
|
|
32
|
+
self.session = requests.Session()
|
|
33
|
+
self.is_conversation = is_conversation
|
|
34
|
+
self.max_tokens_to_sample = max_tokens
|
|
35
|
+
self.api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream"
|
|
36
|
+
self.stream_chunk_size = 64
|
|
37
|
+
self.timeout = timeout
|
|
38
|
+
self.last_response = {}
|
|
39
|
+
self.model = model
|
|
40
|
+
self.headers = {
|
|
41
|
+
'Accept-Language': 'en-US,en;q=0.9',
|
|
42
|
+
'App-Name': 'chatglm',
|
|
43
|
+
'Authorization': 'undefined',
|
|
44
|
+
'Content-Type': 'application/json',
|
|
45
|
+
'Origin': 'https://chatglm.cn',
|
|
46
|
+
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
|
|
47
|
+
'X-App-Platform': 'pc',
|
|
48
|
+
'X-App-Version': '0.0.1',
|
|
49
|
+
'X-Device-Id': '', #Will be generated each time
|
|
50
|
+
'Accept': 'text/event-stream',
|
|
51
|
+
}
|
|
52
|
+
self.__available_optimizers = (
|
|
53
|
+
method
|
|
54
|
+
for method in dir(Optimizers)
|
|
55
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
56
|
+
)
|
|
57
|
+
self.session.headers.update(self.headers)
|
|
58
|
+
Conversation.intro = (
|
|
59
|
+
AwesomePrompts().get_act(
|
|
60
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
61
|
+
)
|
|
62
|
+
if act
|
|
63
|
+
else intro or Conversation.intro
|
|
64
|
+
)
|
|
65
|
+
self.conversation = Conversation(
|
|
66
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
67
|
+
)
|
|
68
|
+
self.conversation.history_offset = history_offset
|
|
69
|
+
self.session.proxies = proxies
|
|
70
|
+
|
|
71
|
+
def ask(
|
|
72
|
+
self,
|
|
73
|
+
prompt: str,
|
|
74
|
+
stream: bool = False,
|
|
75
|
+
raw: bool = False,
|
|
76
|
+
optimizer: str = None,
|
|
77
|
+
conversationally: bool = False,
|
|
78
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
79
|
+
"""Chat with AI
|
|
80
|
+
Args:
|
|
81
|
+
prompt (str): Prompt to be sent.
|
|
82
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
83
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
84
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
85
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
86
|
+
Returns:
|
|
87
|
+
Union[Dict, Generator[Dict, None, None]]: Response generated
|
|
88
|
+
"""
|
|
89
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
90
|
+
if optimizer:
|
|
91
|
+
if optimizer in self.__available_optimizers:
|
|
92
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
93
|
+
conversation_prompt if conversationally else prompt
|
|
94
|
+
)
|
|
95
|
+
else:
|
|
96
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
97
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
98
|
+
)
|
|
99
|
+
device_id = str(uuid.uuid4()).replace('-', '')
|
|
100
|
+
self.session.headers.update({'X-Device-Id': device_id})
|
|
101
|
+
payload = {
|
|
102
|
+
"assistant_id": "65940acff94777010aa6b796",
|
|
103
|
+
"conversation_id": "",
|
|
104
|
+
"meta_data": {
|
|
105
|
+
"if_plus_model": False,
|
|
106
|
+
"is_test": False,
|
|
107
|
+
"input_question_type": "xxxx",
|
|
108
|
+
"channel": "",
|
|
109
|
+
"draft_id": "",
|
|
110
|
+
"quote_log_id": "",
|
|
111
|
+
"platform": "pc",
|
|
112
|
+
},
|
|
113
|
+
"messages": [
|
|
114
|
+
{
|
|
115
|
+
"role": "user",
|
|
116
|
+
"content": [{"type": "text", "text": conversation_prompt}],
|
|
117
|
+
}
|
|
118
|
+
],
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
def for_stream():
|
|
122
|
+
try:
|
|
123
|
+
with self.session.post(
|
|
124
|
+
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
125
|
+
) as response:
|
|
126
|
+
response.raise_for_status()
|
|
127
|
+
|
|
128
|
+
streaming_text = ""
|
|
129
|
+
last_processed_content = "" # Track the last processed content
|
|
130
|
+
for chunk in response.iter_lines():
|
|
131
|
+
if chunk:
|
|
132
|
+
decoded_chunk = chunk.decode('utf-8')
|
|
133
|
+
if decoded_chunk.startswith('data: '):
|
|
134
|
+
try:
|
|
135
|
+
json_data = json.loads(decoded_chunk[6:])
|
|
136
|
+
parts = json_data.get('parts', [])
|
|
137
|
+
if parts:
|
|
138
|
+
content = parts[0].get('content', [])
|
|
139
|
+
if content:
|
|
140
|
+
text = content[0].get('text', '')
|
|
141
|
+
new_text = text[len(last_processed_content):]
|
|
142
|
+
if new_text: # Check for new content
|
|
143
|
+
streaming_text += new_text
|
|
144
|
+
last_processed_content = text
|
|
145
|
+
yield new_text if raw else dict(text=new_text)
|
|
146
|
+
except json.JSONDecodeError:
|
|
147
|
+
continue
|
|
148
|
+
|
|
149
|
+
self.last_response.update(dict(text=streaming_text))
|
|
150
|
+
self.conversation.update_chat_history(
|
|
151
|
+
prompt, self.get_message(self.last_response)
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
except requests.exceptions.RequestException as e:
|
|
155
|
+
raise exceptions.ProviderConnectionError(f"Request failed: {e}")
|
|
156
|
+
except json.JSONDecodeError as e:
|
|
157
|
+
raise exceptions.InvalidResponseError(f"Failed to decode JSON: {e}")
|
|
158
|
+
except Exception as e:
|
|
159
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
|
|
160
|
+
|
|
161
|
+
def for_non_stream():
|
|
162
|
+
for _ in for_stream():
|
|
163
|
+
pass
|
|
164
|
+
return self.last_response
|
|
165
|
+
return for_stream() if stream else for_non_stream()
|
|
166
|
+
|
|
167
|
+
def chat(
|
|
168
|
+
self,
|
|
169
|
+
prompt: str,
|
|
170
|
+
stream: bool = False,
|
|
171
|
+
optimizer: str = None,
|
|
172
|
+
conversationally: bool = False,
|
|
173
|
+
) -> str | Generator[str, None, None]:
|
|
174
|
+
"""Generate response `str`"""
|
|
175
|
+
|
|
176
|
+
def for_stream():
|
|
177
|
+
for response in self.ask(
|
|
178
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
179
|
+
):
|
|
180
|
+
yield self.get_message(response)
|
|
181
|
+
|
|
182
|
+
def for_non_stream():
|
|
183
|
+
return self.get_message(
|
|
184
|
+
self.ask(
|
|
185
|
+
prompt,
|
|
186
|
+
False,
|
|
187
|
+
optimizer=optimizer,
|
|
188
|
+
conversationally=conversationally,
|
|
189
|
+
)
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
return for_stream() if stream else for_non_stream()
|
|
193
|
+
|
|
194
|
+
def get_message(self, response: dict) -> str:
|
|
195
|
+
"""Retrieves message only from response"""
|
|
196
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
197
|
+
return response["text"]
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
if __name__ == "__main__":
|
|
201
|
+
from rich import print
|
|
202
|
+
ai = ChatGLM()
|
|
203
|
+
response = ai.chat(input(">>> "), stream=True)
|
|
204
|
+
for chunk in response:
|
|
205
205
|
print(chunk, end="", flush=True)
|
webscout/Provider/dgaf.py
CHANGED
|
@@ -9,7 +9,7 @@ from webscout.AIutel import AwesomePrompts
|
|
|
9
9
|
from webscout.AIbase import Provider
|
|
10
10
|
from webscout import exceptions
|
|
11
11
|
from webscout import LitAgent
|
|
12
|
-
from webscout.Litlogger import
|
|
12
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
13
13
|
|
|
14
14
|
class DGAFAI(Provider):
|
|
15
15
|
"""
|
|
@@ -77,10 +77,9 @@ class DGAFAI(Provider):
|
|
|
77
77
|
self.conversation.history_offset = history_offset
|
|
78
78
|
|
|
79
79
|
# Initialize logger if enabled
|
|
80
|
-
self.logger =
|
|
80
|
+
self.logger = Logger(
|
|
81
81
|
name="DGAFAI",
|
|
82
82
|
format=LogFormat.MODERN_EMOJI,
|
|
83
|
-
color_scheme=ColorScheme.CYBERPUNK
|
|
84
83
|
) if logging else None
|
|
85
84
|
|
|
86
85
|
if self.logger:
|
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout import LitAgent
|
|
12
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
13
|
+
|
|
14
|
+
class FreeAIChat(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the FreeAIChat API with logging and LitAgent user-agent.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
AVAILABLE_MODELS = [
|
|
20
|
+
"mistral-nemo",
|
|
21
|
+
"mistral-large",
|
|
22
|
+
"llama3.1-70b-fast",
|
|
23
|
+
"gemini-2.0-flash",
|
|
24
|
+
"gemini-1.5-pro",
|
|
25
|
+
"gemini-1.5-flash",
|
|
26
|
+
"gemini-2.0-pro-exp-02-05",
|
|
27
|
+
"deepseek-r1",
|
|
28
|
+
"deepseek-v3",
|
|
29
|
+
"Deepseek r1 14B",
|
|
30
|
+
"Deepseek r1 32B",
|
|
31
|
+
"o3-mini-high",
|
|
32
|
+
"o3-mini-medium",
|
|
33
|
+
"o3-mini-low",
|
|
34
|
+
"o3-mini",
|
|
35
|
+
"GPT-4o-mini",
|
|
36
|
+
"o1",
|
|
37
|
+
"o1-mini",
|
|
38
|
+
"GPT-4o"
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
is_conversation: bool = True,
|
|
44
|
+
max_tokens: int = 2049,
|
|
45
|
+
timeout: int = 30,
|
|
46
|
+
intro: str = None,
|
|
47
|
+
filepath: str = None,
|
|
48
|
+
update_file: bool = True,
|
|
49
|
+
proxies: dict = {},
|
|
50
|
+
history_offset: int = 10250,
|
|
51
|
+
act: str = None,
|
|
52
|
+
model: str = "GPT-4o",
|
|
53
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
54
|
+
logging: bool = False
|
|
55
|
+
):
|
|
56
|
+
"""Initializes the FreeAIChat API client with logging support."""
|
|
57
|
+
if model not in self.AVAILABLE_MODELS:
|
|
58
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
59
|
+
|
|
60
|
+
self.url = "https://freeaichatplayground.com/api/v1/chat/completions"
|
|
61
|
+
self.headers = {
|
|
62
|
+
'User-Agent': LitAgent().random(),
|
|
63
|
+
'Accept': '*/*',
|
|
64
|
+
'Content-Type': 'application/json',
|
|
65
|
+
'Origin': 'https://freeaichatplayground.com',
|
|
66
|
+
'Referer': 'https://freeaichatplayground.com/',
|
|
67
|
+
'Sec-Fetch-Mode': 'cors',
|
|
68
|
+
'Sec-Fetch-Site': 'same-origin'
|
|
69
|
+
}
|
|
70
|
+
self.session = requests.Session()
|
|
71
|
+
self.session.headers.update(self.headers)
|
|
72
|
+
self.session.proxies.update(proxies)
|
|
73
|
+
|
|
74
|
+
self.is_conversation = is_conversation
|
|
75
|
+
self.max_tokens_to_sample = max_tokens
|
|
76
|
+
self.timeout = timeout
|
|
77
|
+
self.last_response = {}
|
|
78
|
+
self.model = model
|
|
79
|
+
self.system_prompt = system_prompt
|
|
80
|
+
|
|
81
|
+
self.__available_optimizers = (
|
|
82
|
+
method
|
|
83
|
+
for method in dir(Optimizers)
|
|
84
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
85
|
+
)
|
|
86
|
+
Conversation.intro = (
|
|
87
|
+
AwesomePrompts().get_act(
|
|
88
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
89
|
+
)
|
|
90
|
+
if act
|
|
91
|
+
else intro or Conversation.intro
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
self.conversation = Conversation(
|
|
95
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
96
|
+
)
|
|
97
|
+
self.conversation.history_offset = history_offset
|
|
98
|
+
|
|
99
|
+
self.logger = Logger(
|
|
100
|
+
name="FreeAIChat",
|
|
101
|
+
format=LogFormat.MODERN_EMOJI,
|
|
102
|
+
) if logging else None
|
|
103
|
+
|
|
104
|
+
if self.logger:
|
|
105
|
+
self.logger.info(f"FreeAIChat initialized successfully with model: {model}")
|
|
106
|
+
|
|
107
|
+
def ask(
|
|
108
|
+
self,
|
|
109
|
+
prompt: str,
|
|
110
|
+
stream: bool = False,
|
|
111
|
+
raw: bool = False,
|
|
112
|
+
optimizer: str = None,
|
|
113
|
+
conversationally: bool = False,
|
|
114
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
115
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
116
|
+
if optimizer:
|
|
117
|
+
if optimizer in self.__available_optimizers:
|
|
118
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
119
|
+
conversation_prompt if conversationally else prompt
|
|
120
|
+
)
|
|
121
|
+
if self.logger:
|
|
122
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
123
|
+
else:
|
|
124
|
+
if self.logger:
|
|
125
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
126
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
127
|
+
|
|
128
|
+
messages = [
|
|
129
|
+
{
|
|
130
|
+
"role": "system",
|
|
131
|
+
"content": self.system_prompt
|
|
132
|
+
},
|
|
133
|
+
{
|
|
134
|
+
"role": "user",
|
|
135
|
+
"content": conversation_prompt
|
|
136
|
+
}
|
|
137
|
+
]
|
|
138
|
+
|
|
139
|
+
payload = {
|
|
140
|
+
"model": self.model,
|
|
141
|
+
"messages": messages
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
def for_stream():
|
|
145
|
+
if self.logger:
|
|
146
|
+
self.logger.debug("Sending streaming request to FreeAIChat API...")
|
|
147
|
+
try:
|
|
148
|
+
with requests.post(self.url, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
|
|
149
|
+
if response.status_code != 200:
|
|
150
|
+
if self.logger:
|
|
151
|
+
self.logger.error(f"Request failed with status code {response.status_code}")
|
|
152
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
153
|
+
f"Request failed with status code {response.status_code}"
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
streaming_text = ""
|
|
157
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
158
|
+
if line:
|
|
159
|
+
line = line.strip()
|
|
160
|
+
if line.startswith("data: "):
|
|
161
|
+
json_str = line[6:] # Remove "data: " prefix
|
|
162
|
+
if json_str == "[DONE]":
|
|
163
|
+
break
|
|
164
|
+
try:
|
|
165
|
+
json_data = json.loads(json_str)
|
|
166
|
+
if 'choices' in json_data:
|
|
167
|
+
choice = json_data['choices'][0]
|
|
168
|
+
if 'delta' in choice and 'content' in choice['delta']:
|
|
169
|
+
content = choice['delta']['content']
|
|
170
|
+
streaming_text += content
|
|
171
|
+
resp = dict(text=content)
|
|
172
|
+
yield resp if raw else resp
|
|
173
|
+
except json.JSONDecodeError:
|
|
174
|
+
if self.logger:
|
|
175
|
+
self.logger.error("JSON decode error in streaming data")
|
|
176
|
+
pass
|
|
177
|
+
|
|
178
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
179
|
+
if self.logger:
|
|
180
|
+
self.logger.info("Streaming response completed successfully")
|
|
181
|
+
|
|
182
|
+
except requests.RequestException as e:
|
|
183
|
+
if self.logger:
|
|
184
|
+
self.logger.error(f"Request failed: {e}")
|
|
185
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
186
|
+
|
|
187
|
+
def for_non_stream():
|
|
188
|
+
for _ in for_stream():
|
|
189
|
+
pass
|
|
190
|
+
return self.last_response
|
|
191
|
+
|
|
192
|
+
return for_stream() if stream else for_non_stream()
|
|
193
|
+
|
|
194
|
+
def chat(
|
|
195
|
+
self,
|
|
196
|
+
prompt: str,
|
|
197
|
+
stream: bool = False,
|
|
198
|
+
optimizer: str = None,
|
|
199
|
+
conversationally: bool = False,
|
|
200
|
+
) -> str:
|
|
201
|
+
def for_stream():
|
|
202
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
203
|
+
yield self.get_message(response)
|
|
204
|
+
|
|
205
|
+
def for_non_stream():
|
|
206
|
+
return self.get_message(
|
|
207
|
+
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
return for_stream() if stream else for_non_stream()
|
|
211
|
+
|
|
212
|
+
def get_message(self, response: dict) -> str:
|
|
213
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
214
|
+
return response["text"]
|
|
215
|
+
|
|
216
|
+
if __name__ == "__main__":
|
|
217
|
+
from rich import print
|
|
218
|
+
ai = FreeAIChat(model="GPT-4o", logging=True)
|
|
219
|
+
response = ai.chat("Write a hello world program in Python", stream=True)
|
|
220
|
+
for chunk in response:
|
|
221
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/gaurish.py
CHANGED
|
@@ -9,7 +9,7 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
|
9
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
10
10
|
from webscout import exceptions
|
|
11
11
|
from webscout import LitAgent
|
|
12
|
-
from webscout.Litlogger import
|
|
12
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
13
13
|
|
|
14
14
|
class GaurishCerebras(Provider):
|
|
15
15
|
"""
|
|
@@ -31,10 +31,9 @@ class GaurishCerebras(Provider):
|
|
|
31
31
|
logging: bool = False
|
|
32
32
|
):
|
|
33
33
|
"""Initializes the Gaurish Cerebras API client with logging capabilities."""
|
|
34
|
-
self.logger =
|
|
34
|
+
self.logger = Logger(
|
|
35
35
|
name="GaurishCerebras",
|
|
36
36
|
format=LogFormat.MODERN_EMOJI,
|
|
37
|
-
color_scheme=ColorScheme.CYBERPUNK
|
|
38
37
|
) if logging else None
|
|
39
38
|
|
|
40
39
|
if self.logger:
|