webscout 7.1__py3-none-any.whl → 7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +20 -0
- webscout/Litlogger/core/logger.py +123 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +50 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +174 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +231 -0
- webscout/Litlogger/styles/formats.py +377 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/Blackboxai.py +3 -3
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +3 -4
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +3 -3
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/Groq.py +5 -1
- webscout/Provider/Jadve.py +3 -3
- webscout/Provider/Marcus.py +191 -192
- webscout/Provider/Netwrck.py +3 -3
- webscout/Provider/PI.py +2 -2
- webscout/Provider/PizzaGPT.py +2 -3
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +2 -3
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +144 -134
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +2 -3
- webscout/Provider/gaurish.py +2 -3
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +3 -3
- webscout/Provider/llmchat.py +2 -3
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -331
- webscout/Provider/typegpt.py +359 -359
- webscout/Provider/yep.py +2 -2
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +5 -4
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.1.dist-info → webscout-7.2.dist-info}/METADATA +4 -3
- webscout-7.2.dist-info/RECORD +217 -0
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout-7.1.dist-info/RECORD +0 -198
- {webscout-7.1.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
- {webscout-7.1.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
- {webscout-7.1.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
- {webscout-7.1.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
|
|
2
|
+
import requests
|
|
3
|
+
import json
|
|
4
|
+
from typing import Any, Dict, Generator, Optional
|
|
5
|
+
import uuid
|
|
6
|
+
import re
|
|
7
|
+
|
|
8
|
+
import cloudscraper
|
|
9
|
+
|
|
10
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
11
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
12
|
+
from webscout import exceptions
|
|
13
|
+
|
|
14
|
+
# Import logging tools from our internal modules
|
|
15
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
16
|
+
from webscout import LitAgent as Lit
|
|
17
|
+
|
|
18
|
+
class QwenLM(Provider):
|
|
19
|
+
"""
|
|
20
|
+
A class to interact with the QwenLM API
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
AVAILABLE_MODELS = [
|
|
24
|
+
"qwen-max-latest",
|
|
25
|
+
"qwen-plus-latest",
|
|
26
|
+
"qwen2.5-14b-instruct-1m",
|
|
27
|
+
"qwq-32b-preview",
|
|
28
|
+
"qwen2.5-coder-32b-instruct",
|
|
29
|
+
"qwen-turbo-latest",
|
|
30
|
+
"qwen2.5-72b-instruct",
|
|
31
|
+
"qwen2.5-vl-72b-instruct",
|
|
32
|
+
"qvq-72b-preview"
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
cookies_path: str,
|
|
38
|
+
is_conversation: bool = True,
|
|
39
|
+
max_tokens: int = 600,
|
|
40
|
+
timeout: int = 30,
|
|
41
|
+
intro: Optional[str] = None,
|
|
42
|
+
filepath: Optional[str] = None,
|
|
43
|
+
update_file: bool = True,
|
|
44
|
+
proxies: dict = {},
|
|
45
|
+
history_offset: int = 10250,
|
|
46
|
+
act: Optional[str] = None,
|
|
47
|
+
model: str = "qwen-plus-latest",
|
|
48
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
49
|
+
logging: bool = False # New parameter to enable logging
|
|
50
|
+
):
|
|
51
|
+
"""Initializes the QwenLM API client with optional logging."""
|
|
52
|
+
if model not in self.AVAILABLE_MODELS:
|
|
53
|
+
raise ValueError(
|
|
54
|
+
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Setup logger if logging is enabled
|
|
58
|
+
self.logger = Logger(
|
|
59
|
+
name="QwenLM",
|
|
60
|
+
format=LogFormat.MODERN_EMOJI,
|
|
61
|
+
|
|
62
|
+
) if logging else None
|
|
63
|
+
|
|
64
|
+
if self.logger:
|
|
65
|
+
self.logger.info(f"Initializing QwenLM with model: {model}")
|
|
66
|
+
|
|
67
|
+
self.session = cloudscraper.create_scraper()
|
|
68
|
+
self.is_conversation = is_conversation
|
|
69
|
+
self.max_tokens_to_sample = max_tokens
|
|
70
|
+
self.api_endpoint = "https://chat.qwenlm.ai/api/chat/completions"
|
|
71
|
+
self.stream_chunk_size = 64
|
|
72
|
+
self.timeout = timeout
|
|
73
|
+
self.last_response = {}
|
|
74
|
+
self.model = model
|
|
75
|
+
self.system_prompt = system_prompt
|
|
76
|
+
self.cookies_path = cookies_path
|
|
77
|
+
self.cookie_string, self.token = self._load_cookies()
|
|
78
|
+
|
|
79
|
+
self.headers = {
|
|
80
|
+
"accept": "*/*",
|
|
81
|
+
"accept-language": "en-US,en;q=0.9",
|
|
82
|
+
"content-type": "application/json",
|
|
83
|
+
"origin": "https://chat.qwenlm.ai",
|
|
84
|
+
"referer": "https://chat.qwenlm.ai/",
|
|
85
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
|
|
86
|
+
"authorization": f"Bearer {self.token}" if self.token else '',
|
|
87
|
+
}
|
|
88
|
+
self.session.headers.update(self.headers)
|
|
89
|
+
self.session.proxies = proxies
|
|
90
|
+
self.chat_type = "t2t" # search - used WEB, t2t - chatbot, t2i - image_gen
|
|
91
|
+
if self.chat_type != "t2t":
|
|
92
|
+
AVAILABLE_MODELS = [
|
|
93
|
+
'qwen-plus-latest', 'qvq-72b-preview',
|
|
94
|
+
'qvq-32b-preview', 'qwen-turbo-latest',
|
|
95
|
+
'qwen-max-latest'
|
|
96
|
+
]
|
|
97
|
+
|
|
98
|
+
self.__available_optimizers = (
|
|
99
|
+
method
|
|
100
|
+
for method in dir(Optimizers)
|
|
101
|
+
if callable(getattr(Optimizers, method))
|
|
102
|
+
and not method.startswith("__")
|
|
103
|
+
)
|
|
104
|
+
Conversation.intro = (
|
|
105
|
+
AwesomePrompts().get_act(
|
|
106
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
107
|
+
)
|
|
108
|
+
if act
|
|
109
|
+
else intro or Conversation.intro
|
|
110
|
+
)
|
|
111
|
+
self.conversation = Conversation(
|
|
112
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
113
|
+
)
|
|
114
|
+
self.conversation.history_offset = history_offset
|
|
115
|
+
|
|
116
|
+
if self.logger:
|
|
117
|
+
self.logger.info("QwenLM initialized successfully")
|
|
118
|
+
|
|
119
|
+
def _load_cookies(self) -> tuple[str, str]:
|
|
120
|
+
"""Load cookies from a JSON file and build a cookie header string."""
|
|
121
|
+
try:
|
|
122
|
+
with open(self.cookies_path, "r") as f:
|
|
123
|
+
cookies = json.load(f)
|
|
124
|
+
cookie_string = "; ".join(
|
|
125
|
+
f"{cookie['name']}={cookie['value']}" for cookie in cookies
|
|
126
|
+
)
|
|
127
|
+
token = next(
|
|
128
|
+
(cookie.get("value") for cookie in cookies if cookie.get("name") == "token"),
|
|
129
|
+
"",
|
|
130
|
+
)
|
|
131
|
+
if self.logger:
|
|
132
|
+
self.logger.debug("Cookies loaded successfully")
|
|
133
|
+
return cookie_string, token
|
|
134
|
+
except FileNotFoundError:
|
|
135
|
+
if self.logger:
|
|
136
|
+
self.logger.error("cookies.json file not found!")
|
|
137
|
+
raise exceptions.InvalidAuthenticationError(
|
|
138
|
+
"Error: cookies.json file not found!"
|
|
139
|
+
)
|
|
140
|
+
except json.JSONDecodeError:
|
|
141
|
+
if self.logger:
|
|
142
|
+
self.logger.error("Invalid JSON format in cookies.json!")
|
|
143
|
+
raise exceptions.InvalidAuthenticationError(
|
|
144
|
+
"Error: Invalid JSON format in cookies.json!"
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
def ask(
|
|
148
|
+
self,
|
|
149
|
+
prompt: str,
|
|
150
|
+
stream: bool = False,
|
|
151
|
+
raw: bool = False,
|
|
152
|
+
optimizer: Optional[str] = None,
|
|
153
|
+
conversationally: bool = False,
|
|
154
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
155
|
+
"""Chat with AI and log the steps if logging is enabled."""
|
|
156
|
+
if self.logger:
|
|
157
|
+
self.logger.debug(f"Processing ask() request. Prompt: {prompt[:50]}...")
|
|
158
|
+
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
159
|
+
|
|
160
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
161
|
+
if optimizer:
|
|
162
|
+
if optimizer in self.__available_optimizers:
|
|
163
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
164
|
+
conversation_prompt if conversationally else prompt
|
|
165
|
+
)
|
|
166
|
+
if self.logger:
|
|
167
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
168
|
+
else:
|
|
169
|
+
if self.logger:
|
|
170
|
+
self.logger.error(f"Invalid optimizer: {optimizer}")
|
|
171
|
+
raise Exception(
|
|
172
|
+
f"Optimizer is not one of {list(self.__available_optimizers)}"
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
payload = {
|
|
176
|
+
'chat_type': self.chat_type,
|
|
177
|
+
"messages": [
|
|
178
|
+
{"role": "system", "content": self.system_prompt},
|
|
179
|
+
{"role": "user", "content": conversation_prompt}
|
|
180
|
+
],
|
|
181
|
+
"model": self.model,
|
|
182
|
+
"stream": stream,
|
|
183
|
+
"max_tokens": self.max_tokens_to_sample
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
def for_stream() -> Generator[Dict[str, Any], None, None]:
|
|
187
|
+
if self.logger:
|
|
188
|
+
self.logger.debug("Sending streaming request to QwenLM API")
|
|
189
|
+
|
|
190
|
+
response = self.session.post(
|
|
191
|
+
self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
|
|
192
|
+
)
|
|
193
|
+
if not response.ok:
|
|
194
|
+
if self.logger:
|
|
195
|
+
self.logger.error(f"API request failed - Status: {response.status_code}, Reason: {response.reason}")
|
|
196
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
197
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
cumulative_text = ""
|
|
201
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
202
|
+
if line and line.startswith("data: "):
|
|
203
|
+
data = line[6:]
|
|
204
|
+
if data == "[DONE]":
|
|
205
|
+
if self.logger:
|
|
206
|
+
self.logger.debug("Stream finished with [DONE] marker")
|
|
207
|
+
break
|
|
208
|
+
try:
|
|
209
|
+
json_data = json.loads(data)
|
|
210
|
+
# Handle multiple response formats
|
|
211
|
+
if "choices" in json_data:
|
|
212
|
+
new_content = json_data.get("choices")[0].get("delta", {}).get("content", "")
|
|
213
|
+
elif "messages" in json_data:
|
|
214
|
+
assistant_msg = next(
|
|
215
|
+
(msg for msg in reversed(json_data["messages"]) if msg.get("role") == "assistant"),
|
|
216
|
+
{}
|
|
217
|
+
)
|
|
218
|
+
content_field = assistant_msg.get("content", "")
|
|
219
|
+
if isinstance(content_field, list):
|
|
220
|
+
new_content = "".join(item.get("text", "") for item in content_field)
|
|
221
|
+
else:
|
|
222
|
+
new_content = content_field
|
|
223
|
+
else:
|
|
224
|
+
new_content = ""
|
|
225
|
+
delta = new_content[len(cumulative_text):]
|
|
226
|
+
cumulative_text = new_content
|
|
227
|
+
if delta:
|
|
228
|
+
if self.logger:
|
|
229
|
+
self.logger.debug(f"Yielding delta: {delta}")
|
|
230
|
+
yield delta if raw else {"text": delta}
|
|
231
|
+
except json.JSONDecodeError:
|
|
232
|
+
if self.logger:
|
|
233
|
+
self.logger.error("JSON decode error during streaming")
|
|
234
|
+
continue
|
|
235
|
+
self.last_response.update(dict(text=cumulative_text))
|
|
236
|
+
self.conversation.update_chat_history(
|
|
237
|
+
prompt, self.get_message(self.last_response)
|
|
238
|
+
)
|
|
239
|
+
if self.logger:
|
|
240
|
+
self.logger.debug("Finished processing stream response")
|
|
241
|
+
|
|
242
|
+
def for_non_stream() -> Dict[str, Any]:
|
|
243
|
+
"""
|
|
244
|
+
Handles non-streaming responses by aggregating all streamed chunks into a single string.
|
|
245
|
+
"""
|
|
246
|
+
if self.logger:
|
|
247
|
+
self.logger.debug("Processing non-streaming request")
|
|
248
|
+
|
|
249
|
+
# Initialize an empty string to accumulate the full response
|
|
250
|
+
full_response = ""
|
|
251
|
+
|
|
252
|
+
# Iterate through the stream generator and accumulate the text
|
|
253
|
+
try:
|
|
254
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
255
|
+
if isinstance(response, dict): # Check if the response is a dictionary
|
|
256
|
+
full_response += response.get("text", "") # Extract and append the "text" field
|
|
257
|
+
elif isinstance(response, str): # If the response is a string, directly append it
|
|
258
|
+
full_response += response
|
|
259
|
+
except Exception as e:
|
|
260
|
+
self.logger.error(f"Error processing response: {str(e)}")
|
|
261
|
+
raise
|
|
262
|
+
|
|
263
|
+
# Ensure last_response is updated with the aggregated text
|
|
264
|
+
self.last_response.update({"text": full_response})
|
|
265
|
+
|
|
266
|
+
# Update conversation history with the final response
|
|
267
|
+
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
268
|
+
|
|
269
|
+
if self.logger:
|
|
270
|
+
self.logger.debug(f"Non-streaming response: {full_response}")
|
|
271
|
+
|
|
272
|
+
return {"text": full_response} # Return the dictionary containing the full response
|
|
273
|
+
|
|
274
|
+
return for_stream() if stream else for_non_stream()
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def chat(
|
|
278
|
+
self,
|
|
279
|
+
prompt: str,
|
|
280
|
+
stream: bool = False,
|
|
281
|
+
optimizer: Optional[str] = None,
|
|
282
|
+
conversationally: bool = False,
|
|
283
|
+
) -> str | Generator[str, None, None]:
|
|
284
|
+
"""Generate response string from chat, with logging if enabled."""
|
|
285
|
+
if self.logger:
|
|
286
|
+
self.logger.debug(f"Processing chat() request. Prompt: {prompt[:50]}...")
|
|
287
|
+
|
|
288
|
+
def for_stream() -> Generator[str, None, None]:
|
|
289
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
290
|
+
yield response if isinstance(response, str) else response["text"]
|
|
291
|
+
|
|
292
|
+
def for_non_stream() -> str:
|
|
293
|
+
result = self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
294
|
+
return self.get_message(result)
|
|
295
|
+
|
|
296
|
+
return for_stream() if stream else for_non_stream()
|
|
297
|
+
|
|
298
|
+
def get_message(self, response: dict) -> str:
|
|
299
|
+
"""Extracts the message content from a response dict."""
|
|
300
|
+
assert isinstance(response, dict), "Response should be a dict"
|
|
301
|
+
return response.get("text", "")
|
|
302
|
+
|
|
303
|
+
if __name__ == "__main__":
|
|
304
|
+
from rich import print
|
|
305
|
+
# Enable logging for a test run
|
|
306
|
+
ai = QwenLM(cookies_path="cookies.json", logging=False)
|
|
307
|
+
response = ai.chat(input(">>> "), stream=False)
|
|
308
|
+
ai.chat_type = "search" # search - used WEB, t2t - chatbot, t2i - image_gen
|
|
309
|
+
print(response)
|
|
310
|
+
# for chunk in response:
|
|
311
|
+
# print(chunk, end="", flush=True)
|
|
@@ -1,22 +1,22 @@
|
|
|
1
|
-
"""
|
|
2
|
-
AiForce - Your go-to provider for generating fire images! 🔥
|
|
3
|
-
|
|
4
|
-
Examples:
|
|
5
|
-
>>> # Sync Usage
|
|
6
|
-
>>> from webscout import AiForceimager
|
|
7
|
-
>>> provider = AiForceimager()
|
|
8
|
-
>>> images = provider.generate("Cool art")
|
|
9
|
-
>>> paths = provider.save(images)
|
|
10
|
-
>>>
|
|
11
|
-
>>> # Async Usage
|
|
12
|
-
>>> from webscout import AsyncAiForceimager
|
|
13
|
-
>>> async def example():
|
|
14
|
-
... provider = AsyncAiForceimager()
|
|
15
|
-
... images = await provider.generate("Epic dragon")
|
|
16
|
-
... paths = await provider.save(images)
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
from .sync_aiforce import AiForceimager
|
|
20
|
-
from .async_aiforce import AsyncAiForceimager
|
|
21
|
-
|
|
22
|
-
__all__ = ["AiForceimager", "AsyncAiForceimager"]
|
|
1
|
+
"""
|
|
2
|
+
AiForce - Your go-to provider for generating fire images! 🔥
|
|
3
|
+
|
|
4
|
+
Examples:
|
|
5
|
+
>>> # Sync Usage
|
|
6
|
+
>>> from webscout import AiForceimager
|
|
7
|
+
>>> provider = AiForceimager()
|
|
8
|
+
>>> images = provider.generate("Cool art")
|
|
9
|
+
>>> paths = provider.save(images)
|
|
10
|
+
>>>
|
|
11
|
+
>>> # Async Usage
|
|
12
|
+
>>> from webscout import AsyncAiForceimager
|
|
13
|
+
>>> async def example():
|
|
14
|
+
... provider = AsyncAiForceimager()
|
|
15
|
+
... images = await provider.generate("Epic dragon")
|
|
16
|
+
... paths = await provider.save(images)
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from .sync_aiforce import AiForceimager
|
|
20
|
+
from .async_aiforce import AsyncAiForceimager
|
|
21
|
+
|
|
22
|
+
__all__ = ["AiForceimager", "AsyncAiForceimager"]
|