webscout 7.3__py3-none-any.whl → 7.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Provider/AISEARCH/__init__.py +4 -3
- webscout/Provider/AISEARCH/genspark_search.py +208 -0
- webscout/Provider/AllenAI.py +282 -0
- webscout/Provider/Deepinfra.py +43 -44
- webscout/Provider/ElectronHub.py +634 -0
- webscout/Provider/Glider.py +7 -41
- webscout/Provider/HeckAI.py +200 -0
- webscout/Provider/Jadve.py +49 -63
- webscout/Provider/PI.py +106 -93
- webscout/Provider/Perplexitylabs.py +395 -0
- webscout/Provider/QwenLM.py +7 -61
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TextPollinationsAI.py +3 -2
- webscout/Provider/TwoAI.py +200 -0
- webscout/Provider/Venice.py +200 -0
- webscout/Provider/WiseCat.py +1 -18
- webscout/Provider/__init__.py +12 -0
- webscout/Provider/akashgpt.py +312 -0
- webscout/Provider/chatglm.py +5 -5
- webscout/Provider/freeaichat.py +251 -221
- webscout/Provider/koala.py +9 -1
- webscout/Provider/yep.py +4 -24
- webscout/version.py +1 -1
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/METADATA +44 -49
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/RECORD +32 -21
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/LICENSE.md +0 -0
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/WHEEL +0 -0
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/entry_points.txt +0 -0
- {webscout-7.3.dist-info → webscout-7.4.dist-info}/top_level.txt +0 -0
webscout/Provider/freeaichat.py
CHANGED
|
@@ -1,221 +1,251 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import time
|
|
4
|
-
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
-
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
from webscout import LitAgent
|
|
12
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
13
|
-
|
|
14
|
-
class FreeAIChat(Provider):
|
|
15
|
-
"""
|
|
16
|
-
A class to interact with the FreeAIChat API with logging and LitAgent user-agent.
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
AVAILABLE_MODELS = [
|
|
20
|
-
"mistral-nemo",
|
|
21
|
-
"mistral-large",
|
|
22
|
-
"
|
|
23
|
-
"gemini-
|
|
24
|
-
"gemini-1.5-
|
|
25
|
-
"gemini-
|
|
26
|
-
"
|
|
27
|
-
"deepseek-
|
|
28
|
-
"
|
|
29
|
-
"Deepseek r1
|
|
30
|
-
"
|
|
31
|
-
"o3-mini-
|
|
32
|
-
"o3-mini-
|
|
33
|
-
"o3-mini
|
|
34
|
-
"
|
|
35
|
-
"
|
|
36
|
-
"o1",
|
|
37
|
-
"
|
|
38
|
-
"
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
'
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
self.
|
|
77
|
-
self.
|
|
78
|
-
self.
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
self.
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
)
|
|
90
|
-
if
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
)
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
self.
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout import LitAgent
|
|
12
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
13
|
+
|
|
14
|
+
class FreeAIChat(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the FreeAIChat API with logging and LitAgent user-agent.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
AVAILABLE_MODELS = [
|
|
20
|
+
"mistral-nemo",
|
|
21
|
+
"mistral-large",
|
|
22
|
+
"gemini-2.0-flash",
|
|
23
|
+
"gemini-1.5-pro",
|
|
24
|
+
"gemini-1.5-flash",
|
|
25
|
+
"gemini-2.0-pro-exp-02-05",
|
|
26
|
+
"deepseek-r1",
|
|
27
|
+
"deepseek-v3",
|
|
28
|
+
"Deepseek r1 14B",
|
|
29
|
+
"Deepseek r1 32B",
|
|
30
|
+
"o3-mini-high",
|
|
31
|
+
"o3-mini-medium",
|
|
32
|
+
"o3-mini-low",
|
|
33
|
+
"o3-mini",
|
|
34
|
+
"GPT-4o-mini",
|
|
35
|
+
"o1",
|
|
36
|
+
"o1-mini",
|
|
37
|
+
"GPT-4o",
|
|
38
|
+
"Qwen coder",
|
|
39
|
+
"Qwen 2.5 72B",
|
|
40
|
+
"Llama 3.1 405B",
|
|
41
|
+
"llama3.1-70b-fast",
|
|
42
|
+
"Llama 3.3 70B",
|
|
43
|
+
"claude 3.5 haiku",
|
|
44
|
+
"claude 3.5 sonnet",
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
def __init__(
|
|
48
|
+
self,
|
|
49
|
+
is_conversation: bool = True,
|
|
50
|
+
max_tokens: int = 2049,
|
|
51
|
+
timeout: int = 30,
|
|
52
|
+
intro: str = None,
|
|
53
|
+
filepath: str = None,
|
|
54
|
+
update_file: bool = True,
|
|
55
|
+
proxies: dict = {},
|
|
56
|
+
history_offset: int = 10250,
|
|
57
|
+
act: str = None,
|
|
58
|
+
model: str = "GPT-4o",
|
|
59
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
60
|
+
logging: bool = False
|
|
61
|
+
):
|
|
62
|
+
"""Initializes the FreeAIChat API client with logging support."""
|
|
63
|
+
if model not in self.AVAILABLE_MODELS:
|
|
64
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
65
|
+
|
|
66
|
+
self.url = "https://freeaichatplayground.com/api/v1/chat/completions"
|
|
67
|
+
self.headers = {
|
|
68
|
+
'User-Agent': LitAgent().random(),
|
|
69
|
+
'Accept': '*/*',
|
|
70
|
+
'Content-Type': 'application/json',
|
|
71
|
+
'Origin': 'https://freeaichatplayground.com',
|
|
72
|
+
'Referer': 'https://freeaichatplayground.com/',
|
|
73
|
+
'Sec-Fetch-Mode': 'cors',
|
|
74
|
+
'Sec-Fetch-Site': 'same-origin'
|
|
75
|
+
}
|
|
76
|
+
self.session = requests.Session()
|
|
77
|
+
self.session.headers.update(self.headers)
|
|
78
|
+
self.session.proxies.update(proxies)
|
|
79
|
+
|
|
80
|
+
self.is_conversation = is_conversation
|
|
81
|
+
self.max_tokens_to_sample = max_tokens
|
|
82
|
+
self.timeout = timeout
|
|
83
|
+
self.last_response = {}
|
|
84
|
+
self.model = model
|
|
85
|
+
self.system_prompt = system_prompt
|
|
86
|
+
|
|
87
|
+
self.__available_optimizers = (
|
|
88
|
+
method
|
|
89
|
+
for method in dir(Optimizers)
|
|
90
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
91
|
+
)
|
|
92
|
+
Conversation.intro = (
|
|
93
|
+
AwesomePrompts().get_act(
|
|
94
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
95
|
+
)
|
|
96
|
+
if act
|
|
97
|
+
else intro or Conversation.intro
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
self.conversation = Conversation(
|
|
101
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
102
|
+
)
|
|
103
|
+
self.conversation.history_offset = history_offset
|
|
104
|
+
|
|
105
|
+
self.logger = Logger(
|
|
106
|
+
name="FreeAIChat",
|
|
107
|
+
format=LogFormat.MODERN_EMOJI,
|
|
108
|
+
) if logging else None
|
|
109
|
+
|
|
110
|
+
if self.logger:
|
|
111
|
+
self.logger.info(f"FreeAIChat initialized successfully with model: {model}")
|
|
112
|
+
|
|
113
|
+
def ask(
|
|
114
|
+
self,
|
|
115
|
+
prompt: str,
|
|
116
|
+
stream: bool = False,
|
|
117
|
+
raw: bool = False,
|
|
118
|
+
optimizer: str = None,
|
|
119
|
+
conversationally: bool = False,
|
|
120
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
121
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
122
|
+
if optimizer:
|
|
123
|
+
if optimizer in self.__available_optimizers:
|
|
124
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
125
|
+
conversation_prompt if conversationally else prompt
|
|
126
|
+
)
|
|
127
|
+
if self.logger:
|
|
128
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
129
|
+
else:
|
|
130
|
+
if self.logger:
|
|
131
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
132
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
133
|
+
|
|
134
|
+
messages = [
|
|
135
|
+
{
|
|
136
|
+
"role": "system",
|
|
137
|
+
"content": self.system_prompt
|
|
138
|
+
},
|
|
139
|
+
{
|
|
140
|
+
"role": "user",
|
|
141
|
+
"content": conversation_prompt
|
|
142
|
+
}
|
|
143
|
+
]
|
|
144
|
+
|
|
145
|
+
payload = {
|
|
146
|
+
"model": self.model,
|
|
147
|
+
"messages": messages
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
def for_stream():
|
|
151
|
+
if self.logger:
|
|
152
|
+
self.logger.debug("Sending streaming request to FreeAIChat API...")
|
|
153
|
+
try:
|
|
154
|
+
with requests.post(self.url, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
|
|
155
|
+
if response.status_code != 200:
|
|
156
|
+
if self.logger:
|
|
157
|
+
self.logger.error(f"Request failed with status code {response.status_code}")
|
|
158
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
159
|
+
f"Request failed with status code {response.status_code}"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
streaming_text = ""
|
|
163
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
164
|
+
if line:
|
|
165
|
+
line = line.strip()
|
|
166
|
+
if line.startswith("data: "):
|
|
167
|
+
json_str = line[6:] # Remove "data: " prefix
|
|
168
|
+
if json_str == "[DONE]":
|
|
169
|
+
break
|
|
170
|
+
try:
|
|
171
|
+
json_data = json.loads(json_str)
|
|
172
|
+
if 'choices' in json_data:
|
|
173
|
+
choice = json_data['choices'][0]
|
|
174
|
+
if 'delta' in choice and 'content' in choice['delta']:
|
|
175
|
+
content = choice['delta']['content']
|
|
176
|
+
streaming_text += content
|
|
177
|
+
resp = dict(text=content)
|
|
178
|
+
yield resp if raw else resp
|
|
179
|
+
except json.JSONDecodeError:
|
|
180
|
+
if self.logger:
|
|
181
|
+
self.logger.error("JSON decode error in streaming data")
|
|
182
|
+
pass
|
|
183
|
+
|
|
184
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
185
|
+
if self.logger:
|
|
186
|
+
self.logger.info("Streaming response completed successfully")
|
|
187
|
+
|
|
188
|
+
except requests.RequestException as e:
|
|
189
|
+
if self.logger:
|
|
190
|
+
self.logger.error(f"Request failed: {e}")
|
|
191
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
192
|
+
|
|
193
|
+
def for_non_stream():
|
|
194
|
+
full_text = ""
|
|
195
|
+
for chunk in for_stream():
|
|
196
|
+
full_text += chunk["text"]
|
|
197
|
+
return {"text": full_text}
|
|
198
|
+
|
|
199
|
+
return for_stream() if stream else for_non_stream()
|
|
200
|
+
|
|
201
|
+
def chat(
|
|
202
|
+
self,
|
|
203
|
+
prompt: str,
|
|
204
|
+
stream: bool = False,
|
|
205
|
+
optimizer: str = None,
|
|
206
|
+
conversationally: bool = False,
|
|
207
|
+
) -> str:
|
|
208
|
+
def for_stream():
|
|
209
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
210
|
+
yield self.get_message(response)
|
|
211
|
+
|
|
212
|
+
def for_non_stream():
|
|
213
|
+
return self.get_message(
|
|
214
|
+
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
return for_stream() if stream else for_non_stream()
|
|
218
|
+
|
|
219
|
+
def get_message(self, response: dict) -> str:
|
|
220
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
221
|
+
return response["text"]
|
|
222
|
+
|
|
223
|
+
@staticmethod
|
|
224
|
+
def fix_encoding(text):
|
|
225
|
+
if isinstance(text, dict) and "text" in text:
|
|
226
|
+
try:
|
|
227
|
+
text["text"] = text["text"].encode("latin1").decode("utf-8")
|
|
228
|
+
return text
|
|
229
|
+
except (UnicodeError, AttributeError) as e:
|
|
230
|
+
return text
|
|
231
|
+
elif isinstance(text, str):
|
|
232
|
+
try:
|
|
233
|
+
return text.encode("latin1").decode("utf-8")
|
|
234
|
+
except (UnicodeError, AttributeError) as e:
|
|
235
|
+
return text
|
|
236
|
+
return text
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
if __name__ == "__main__":
|
|
240
|
+
from rich import print
|
|
241
|
+
ai = FreeAIChat(model="GPT-4o", logging=True)
|
|
242
|
+
# response = ai.chat(input(">>>"), stream=True)
|
|
243
|
+
# full_text = ""
|
|
244
|
+
|
|
245
|
+
# for chunk in response:
|
|
246
|
+
# corrected_chunk = ai.fix_encoding(chunk)
|
|
247
|
+
# full_text += corrected_chunk
|
|
248
|
+
|
|
249
|
+
response = ai.chat(input(">>>"), stream=False)
|
|
250
|
+
response = ai.fix_encoding(response)
|
|
251
|
+
print(response)
|
webscout/Provider/koala.py
CHANGED
|
@@ -12,6 +12,11 @@ class KOALA(Provider):
|
|
|
12
12
|
A class to interact with the Koala.sh API.
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
|
+
AVAILABLE_MODELS = [
|
|
16
|
+
"gpt-4o-mini",
|
|
17
|
+
"gpt-4o",
|
|
18
|
+
]
|
|
19
|
+
|
|
15
20
|
def __init__(
|
|
16
21
|
self,
|
|
17
22
|
is_conversation: bool = True,
|
|
@@ -23,7 +28,7 @@ class KOALA(Provider):
|
|
|
23
28
|
proxies: dict = {},
|
|
24
29
|
history_offset: int = 10250,
|
|
25
30
|
act: str = None,
|
|
26
|
-
model: str = "gpt-4o
|
|
31
|
+
model: str = "gpt-4o",
|
|
27
32
|
web_search: bool = True,
|
|
28
33
|
|
|
29
34
|
) -> None:
|
|
@@ -44,6 +49,9 @@ class KOALA(Provider):
|
|
|
44
49
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
45
50
|
model (str, optional): AI model to use. Defaults to "gpt-4o-mini".
|
|
46
51
|
"""
|
|
52
|
+
if model not in self.AVAILABLE_MODELS:
|
|
53
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
54
|
+
|
|
47
55
|
self.session = requests.Session()
|
|
48
56
|
self.is_conversation = is_conversation
|
|
49
57
|
self.max_tokens_to_sample = max_tokens
|
webscout/Provider/yep.py
CHANGED
|
@@ -12,7 +12,6 @@ from webscout.AIutel import Conversation
|
|
|
12
12
|
from webscout.AIutel import AwesomePrompts
|
|
13
13
|
from webscout.AIbase import Provider
|
|
14
14
|
from webscout import WEBS, exceptions
|
|
15
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
16
15
|
from webscout.litagent import LitAgent
|
|
17
16
|
|
|
18
17
|
|
|
@@ -39,14 +38,13 @@ class YEPCHAT(Provider):
|
|
|
39
38
|
act: str = None,
|
|
40
39
|
model: str = "DeepSeek-R1-Distill-Qwen-32B",
|
|
41
40
|
temperature: float = 0.6,
|
|
42
|
-
top_p: float = 0.7
|
|
43
|
-
logging: bool = False,
|
|
41
|
+
top_p: float = 0.7
|
|
44
42
|
):
|
|
45
43
|
"""
|
|
46
44
|
Initializes the YEPCHAT provider with the specified parameters.
|
|
47
45
|
|
|
48
46
|
Examples:
|
|
49
|
-
>>> ai = YEPCHAT(
|
|
47
|
+
>>> ai = YEPCHAT()
|
|
50
48
|
>>> ai.ask("What's the weather today?")
|
|
51
49
|
Sends a prompt to the Yep API and returns the response.
|
|
52
50
|
|
|
@@ -85,7 +83,7 @@ class YEPCHAT(Provider):
|
|
|
85
83
|
"Sec-CH-UA-Platform": '"Windows"',
|
|
86
84
|
"User-Agent": self.agent.random(), # Use LitAgent to generate a random user agent
|
|
87
85
|
}
|
|
88
|
-
self.cookies = {"__Host-session": uuid.uuid4().hex}
|
|
86
|
+
self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
|
|
89
87
|
|
|
90
88
|
self.__available_optimizers = (
|
|
91
89
|
method
|
|
@@ -106,9 +104,6 @@ class YEPCHAT(Provider):
|
|
|
106
104
|
|
|
107
105
|
self.knowledge_cutoff = "December 2023"
|
|
108
106
|
|
|
109
|
-
# Initialize logger
|
|
110
|
-
self.logger = Logger(name="YEPCHAT", format=LogFormat.MODERN_EMOJI) if logging else None
|
|
111
|
-
|
|
112
107
|
def ask(
|
|
113
108
|
self,
|
|
114
109
|
prompt: str,
|
|
@@ -128,9 +123,6 @@ class YEPCHAT(Provider):
|
|
|
128
123
|
>>> ai.ask("Tell me a joke", stream=True)
|
|
129
124
|
Streams the response from the Yep API.
|
|
130
125
|
"""
|
|
131
|
-
if self.logger:
|
|
132
|
-
self.logger.debug(f"ask() called with prompt: {prompt}")
|
|
133
|
-
|
|
134
126
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
135
127
|
if optimizer:
|
|
136
128
|
if optimizer in self.__available_optimizers:
|
|
@@ -138,8 +130,6 @@ class YEPCHAT(Provider):
|
|
|
138
130
|
conversation_prompt if conversationally else prompt
|
|
139
131
|
)
|
|
140
132
|
else:
|
|
141
|
-
if self.logger:
|
|
142
|
-
self.logger.error(f"Invalid optimizer: {optimizer}")
|
|
143
133
|
raise Exception(
|
|
144
134
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
145
135
|
)
|
|
@@ -157,8 +147,6 @@ class YEPCHAT(Provider):
|
|
|
157
147
|
try:
|
|
158
148
|
with self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout) as response:
|
|
159
149
|
if not response.ok:
|
|
160
|
-
if self.logger:
|
|
161
|
-
self.logger.error(f"Failed to generate response: {response.status_code} {response.reason}")
|
|
162
150
|
raise exceptions.FailedToGenerateResponseError(
|
|
163
151
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
164
152
|
)
|
|
@@ -183,13 +171,9 @@ class YEPCHAT(Provider):
|
|
|
183
171
|
resp = dict(text=content)
|
|
184
172
|
yield resp if raw else resp
|
|
185
173
|
except json.JSONDecodeError:
|
|
186
|
-
if self.logger:
|
|
187
|
-
self.logger.warning("JSONDecodeError encountered.")
|
|
188
174
|
pass
|
|
189
175
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
190
176
|
except Exception as e:
|
|
191
|
-
if self.logger:
|
|
192
|
-
self.logger.error(f"Request failed: {e}")
|
|
193
177
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
194
178
|
|
|
195
179
|
def for_non_stream():
|
|
@@ -217,9 +201,6 @@ class YEPCHAT(Provider):
|
|
|
217
201
|
>>> ai.chat("What's the weather today?", stream=True)
|
|
218
202
|
Streams the chat response from the Yep API.
|
|
219
203
|
"""
|
|
220
|
-
if self.logger:
|
|
221
|
-
self.logger.debug(f"chat() called with prompt: {prompt}")
|
|
222
|
-
|
|
223
204
|
def for_stream():
|
|
224
205
|
for response in self.ask(
|
|
225
206
|
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
@@ -255,8 +236,7 @@ class YEPCHAT(Provider):
|
|
|
255
236
|
if __name__ == "__main__":
|
|
256
237
|
from rich import print
|
|
257
238
|
|
|
258
|
-
ai = YEPCHAT(
|
|
259
|
-
|
|
239
|
+
ai = YEPCHAT(model="DeepSeek-R1-Distill-Qwen-32B")
|
|
260
240
|
response = ai.chat("how many r in 'strawberry'", stream=True)
|
|
261
241
|
for chunk in response:
|
|
262
242
|
print(chunk, end="", flush=True)
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "7.
|
|
1
|
+
__version__ = "7.4"
|
|
2
2
|
__prog__ = "webscout"
|