webscout 7.5__py3-none-any.whl → 7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +5 -53
- webscout/AIutel.py +8 -318
- webscout/DWEBS.py +460 -489
- webscout/Extra/YTToolkit/YTdownloader.py +14 -53
- webscout/Extra/YTToolkit/transcriber.py +12 -13
- webscout/Extra/YTToolkit/ytapi/video.py +0 -1
- webscout/Extra/__init__.py +0 -1
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +193 -199
- webscout/Extra/autocoder/rawdog.py +789 -677
- webscout/Extra/gguf.py +682 -428
- webscout/Extra/weather.py +178 -156
- webscout/Extra/weather_ascii.py +70 -17
- webscout/Litlogger/core/logger.py +1 -2
- webscout/Litlogger/handlers/file.py +1 -1
- webscout/Litlogger/styles/formats.py +0 -2
- webscout/Litlogger/utils/detectors.py +0 -1
- webscout/Provider/AISEARCH/DeepFind.py +0 -1
- webscout/Provider/AISEARCH/ISou.py +1 -22
- webscout/Provider/AISEARCH/felo_search.py +0 -1
- webscout/Provider/AllenAI.py +28 -30
- webscout/Provider/C4ai.py +29 -11
- webscout/Provider/ChatGPTClone.py +226 -0
- webscout/Provider/ChatGPTGratis.py +24 -56
- webscout/Provider/DeepSeek.py +25 -17
- webscout/Provider/Deepinfra.py +115 -48
- webscout/Provider/Gemini.py +1 -1
- webscout/Provider/Glider.py +33 -12
- webscout/Provider/HF_space/qwen_qwen2.py +2 -2
- webscout/Provider/HeckAI.py +23 -7
- webscout/Provider/Hunyuan.py +272 -0
- webscout/Provider/Jadve.py +20 -5
- webscout/Provider/LambdaChat.py +391 -0
- webscout/Provider/Netwrck.py +42 -19
- webscout/Provider/OLLAMA.py +256 -32
- webscout/Provider/PI.py +4 -2
- webscout/Provider/Perplexitylabs.py +26 -6
- webscout/Provider/PizzaGPT.py +10 -51
- webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
- webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +179 -206
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -192
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
- webscout/Provider/TTI/__init__.py +2 -3
- webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
- webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
- webscout/Provider/TTI/artbit/async_artbit.py +3 -32
- webscout/Provider/TTI/artbit/sync_artbit.py +3 -31
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/piclumen/__init__.py +22 -22
- webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
- webscout/Provider/TTS/__init__.py +2 -2
- webscout/Provider/TTS/deepgram.py +12 -39
- webscout/Provider/TTS/elevenlabs.py +14 -40
- webscout/Provider/TTS/gesserit.py +11 -35
- webscout/Provider/TTS/murfai.py +13 -39
- webscout/Provider/TTS/parler.py +17 -40
- webscout/Provider/TTS/speechma.py +180 -0
- webscout/Provider/TTS/streamElements.py +17 -44
- webscout/Provider/TextPollinationsAI.py +39 -59
- webscout/Provider/Venice.py +25 -8
- webscout/Provider/WebSim.py +227 -0
- webscout/Provider/WiseCat.py +27 -5
- webscout/Provider/Youchat.py +64 -37
- webscout/Provider/__init__.py +12 -7
- webscout/Provider/akashgpt.py +20 -5
- webscout/Provider/flowith.py +33 -7
- webscout/Provider/freeaichat.py +32 -45
- webscout/Provider/koala.py +20 -5
- webscout/Provider/labyrinth.py +239 -0
- webscout/Provider/learnfastai.py +28 -15
- webscout/Provider/llamatutor.py +1 -1
- webscout/Provider/llmchat.py +30 -8
- webscout/Provider/multichat.py +65 -9
- webscout/Provider/sonus.py +208 -0
- webscout/Provider/talkai.py +1 -0
- webscout/Provider/turboseek.py +3 -0
- webscout/Provider/tutorai.py +2 -0
- webscout/Provider/typegpt.py +155 -65
- webscout/Provider/uncovr.py +297 -0
- webscout/Provider/x0gpt.py +3 -1
- webscout/Provider/yep.py +102 -20
- webscout/__init__.py +3 -0
- webscout/cli.py +53 -40
- webscout/conversation.py +1 -10
- webscout/litagent/__init__.py +2 -2
- webscout/litagent/agent.py +356 -20
- webscout/litagent/constants.py +34 -5
- webscout/litprinter/__init__.py +0 -3
- webscout/models.py +181 -0
- webscout/optimizers.py +1 -1
- webscout/prompt_manager.py +2 -8
- webscout/scout/core/scout.py +1 -4
- webscout/scout/core/search_result.py +1 -1
- webscout/scout/core/text_utils.py +1 -1
- webscout/scout/core.py +2 -5
- webscout/scout/element.py +1 -1
- webscout/scout/parsers/html_parser.py +1 -1
- webscout/scout/utils.py +0 -1
- webscout/swiftcli/__init__.py +1 -3
- webscout/tempid.py +1 -1
- webscout/update_checker.py +1 -3
- webscout/version.py +1 -1
- webscout/webscout_search_async.py +1 -2
- webscout/yep_search.py +297 -297
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/LICENSE.md +4 -4
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/METADATA +127 -405
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/RECORD +118 -117
- webscout/Extra/autollama.py +0 -231
- webscout/Provider/Amigo.py +0 -274
- webscout/Provider/Bing.py +0 -243
- webscout/Provider/DiscordRocks.py +0 -253
- webscout/Provider/TTI/blackbox/__init__.py +0 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
- webscout/Provider/TTI/deepinfra/__init__.py +0 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
- webscout/Provider/TTI/imgninza/__init__.py +0 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
- webscout/Provider/TTS/voicepod.py +0 -117
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/WHEEL +0 -0
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/entry_points.txt +0 -0
- {webscout-7.5.dist-info → webscout-7.7.dist-info}/top_level.txt +0 -0
webscout/Provider/OLLAMA.py
CHANGED
|
@@ -3,8 +3,12 @@ from webscout.AIutel import Conversation
|
|
|
3
3
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
4
4
|
from webscout.AIbase import Provider, AsyncProvider
|
|
5
5
|
from webscout import exceptions
|
|
6
|
-
from typing import Any, AsyncGenerator, Dict
|
|
6
|
+
from typing import Any, AsyncGenerator, Dict, List, Optional, Union
|
|
7
7
|
import ollama
|
|
8
|
+
from ollama import AsyncClient, Client, ResponseError
|
|
9
|
+
import asyncio
|
|
10
|
+
import base64
|
|
11
|
+
from pathlib import Path
|
|
8
12
|
|
|
9
13
|
class OLLAMA(Provider):
|
|
10
14
|
def __init__(
|
|
@@ -19,7 +23,9 @@ class OLLAMA(Provider):
|
|
|
19
23
|
proxies: dict = {},
|
|
20
24
|
history_offset: int = 10250,
|
|
21
25
|
act: str = None,
|
|
22
|
-
system_prompt: str = "You are a helpful and friendly AI assistant.",
|
|
26
|
+
system_prompt: str = "You are a helpful and friendly AI assistant.",
|
|
27
|
+
host: str = 'http://localhost:11434',
|
|
28
|
+
headers: Optional[Dict] = None,
|
|
23
29
|
):
|
|
24
30
|
"""Instantiates Ollama
|
|
25
31
|
|
|
@@ -34,7 +40,9 @@ class OLLAMA(Provider):
|
|
|
34
40
|
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
35
41
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
36
42
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
37
|
-
system_prompt (str, optional): System prompt for Ollama. Defaults to "You are a helpful and friendly AI assistant.".
|
|
43
|
+
system_prompt (str, optional): System prompt for Ollama. Defaults to "You are a helpful and friendly AI assistant.".
|
|
44
|
+
host (str, optional): Ollama host URL. Defaults to 'http://localhost:11434'.
|
|
45
|
+
headers (dict, optional): Custom headers for requests. Defaults to None.
|
|
38
46
|
"""
|
|
39
47
|
self.model = model
|
|
40
48
|
self.is_conversation = is_conversation
|
|
@@ -42,6 +50,8 @@ class OLLAMA(Provider):
|
|
|
42
50
|
self.timeout = timeout
|
|
43
51
|
self.last_response = {}
|
|
44
52
|
self.system_prompt = system_prompt
|
|
53
|
+
self.client = Client(host=host, headers=headers)
|
|
54
|
+
self.async_client = AsyncClient(host=host, headers=headers)
|
|
45
55
|
|
|
46
56
|
self.__available_optimizers = (
|
|
47
57
|
method
|
|
@@ -67,6 +77,8 @@ class OLLAMA(Provider):
|
|
|
67
77
|
raw: bool = False,
|
|
68
78
|
optimizer: str = None,
|
|
69
79
|
conversationally: bool = False,
|
|
80
|
+
tools: Optional[List[Dict]] = None,
|
|
81
|
+
images: Optional[List[str]] = None,
|
|
70
82
|
) -> dict | AsyncGenerator:
|
|
71
83
|
"""Chat with AI
|
|
72
84
|
|
|
@@ -76,13 +88,10 @@ class OLLAMA(Provider):
|
|
|
76
88
|
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
77
89
|
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
78
90
|
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
91
|
+
tools (List[Dict], optional): List of tools/functions to use. Defaults to None.
|
|
92
|
+
images (List[str], optional): List of image paths or base64 encoded images. Defaults to None.
|
|
79
93
|
Returns:
|
|
80
94
|
dict|AsyncGenerator : ai content
|
|
81
|
-
```json
|
|
82
|
-
{
|
|
83
|
-
"text" : "print('How may I help you today?')"
|
|
84
|
-
}
|
|
85
|
-
```
|
|
86
95
|
"""
|
|
87
96
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
88
97
|
if optimizer:
|
|
@@ -95,29 +104,101 @@ class OLLAMA(Provider):
|
|
|
95
104
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
96
105
|
)
|
|
97
106
|
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
{'role': 'user', 'content': conversation_prompt}
|
|
103
|
-
], stream=True)
|
|
107
|
+
messages = [
|
|
108
|
+
{'role': 'system', 'content': self.system_prompt},
|
|
109
|
+
{'role': 'user', 'content': conversation_prompt}
|
|
110
|
+
]
|
|
104
111
|
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
|
|
112
|
+
if images:
|
|
113
|
+
messages[-1]['images'] = images
|
|
108
114
|
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
115
|
+
try:
|
|
116
|
+
def for_stream():
|
|
117
|
+
stream = self.client.chat(
|
|
118
|
+
model=self.model,
|
|
119
|
+
messages=messages,
|
|
120
|
+
stream=True,
|
|
121
|
+
tools=tools
|
|
122
|
+
)
|
|
123
|
+
for chunk in stream:
|
|
124
|
+
yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
|
|
119
125
|
|
|
120
|
-
|
|
126
|
+
def for_non_stream():
|
|
127
|
+
response = self.client.chat(
|
|
128
|
+
model=self.model,
|
|
129
|
+
messages=messages,
|
|
130
|
+
tools=tools
|
|
131
|
+
)
|
|
132
|
+
self.last_response.update(dict(text=response['message']['content']))
|
|
133
|
+
self.conversation.update_chat_history(
|
|
134
|
+
prompt, self.get_message(self.last_response)
|
|
135
|
+
)
|
|
136
|
+
return self.last_response
|
|
137
|
+
|
|
138
|
+
return for_stream() if stream else for_non_stream()
|
|
139
|
+
except ResponseError as e:
|
|
140
|
+
if e.status_code == 404:
|
|
141
|
+
raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
|
|
142
|
+
raise e
|
|
143
|
+
|
|
144
|
+
async def aask(
|
|
145
|
+
self,
|
|
146
|
+
prompt: str,
|
|
147
|
+
stream: bool = False,
|
|
148
|
+
raw: bool = False,
|
|
149
|
+
optimizer: str = None,
|
|
150
|
+
conversationally: bool = False,
|
|
151
|
+
tools: Optional[List[Dict]] = None,
|
|
152
|
+
images: Optional[List[str]] = None,
|
|
153
|
+
) -> dict | AsyncGenerator:
|
|
154
|
+
"""Async version of ask method"""
|
|
155
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
156
|
+
if optimizer:
|
|
157
|
+
if optimizer in self.__available_optimizers:
|
|
158
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
159
|
+
conversation_prompt if conversationally else prompt
|
|
160
|
+
)
|
|
161
|
+
else:
|
|
162
|
+
raise Exception(
|
|
163
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
messages = [
|
|
167
|
+
{'role': 'system', 'content': self.system_prompt},
|
|
168
|
+
{'role': 'user', 'content': conversation_prompt}
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
if images:
|
|
172
|
+
messages[-1]['images'] = images
|
|
173
|
+
|
|
174
|
+
try:
|
|
175
|
+
async def for_stream():
|
|
176
|
+
stream = await self.async_client.chat(
|
|
177
|
+
model=self.model,
|
|
178
|
+
messages=messages,
|
|
179
|
+
stream=True,
|
|
180
|
+
tools=tools
|
|
181
|
+
)
|
|
182
|
+
async for chunk in stream:
|
|
183
|
+
yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
|
|
184
|
+
|
|
185
|
+
async def for_non_stream():
|
|
186
|
+
response = await self.async_client.chat(
|
|
187
|
+
model=self.model,
|
|
188
|
+
messages=messages,
|
|
189
|
+
tools=tools
|
|
190
|
+
)
|
|
191
|
+
self.last_response.update(dict(text=response['message']['content']))
|
|
192
|
+
self.conversation.update_chat_history(
|
|
193
|
+
prompt, self.get_message(self.last_response)
|
|
194
|
+
)
|
|
195
|
+
return self.last_response
|
|
196
|
+
|
|
197
|
+
return for_stream() if stream else for_non_stream()
|
|
198
|
+
except ResponseError as e:
|
|
199
|
+
if e.status_code == 404:
|
|
200
|
+
raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
|
|
201
|
+
raise e
|
|
121
202
|
|
|
122
203
|
def chat(
|
|
123
204
|
self,
|
|
@@ -125,6 +206,8 @@ class OLLAMA(Provider):
|
|
|
125
206
|
stream: bool = False,
|
|
126
207
|
optimizer: str = None,
|
|
127
208
|
conversationally: bool = False,
|
|
209
|
+
tools: Optional[List[Dict]] = None,
|
|
210
|
+
images: Optional[List[str]] = None,
|
|
128
211
|
) -> str | AsyncGenerator:
|
|
129
212
|
"""Generate response `str`
|
|
130
213
|
Args:
|
|
@@ -132,13 +215,15 @@ class OLLAMA(Provider):
|
|
|
132
215
|
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
133
216
|
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
134
217
|
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
218
|
+
tools (List[Dict], optional): List of tools/functions to use. Defaults to None.
|
|
219
|
+
images (List[str], optional): List of image paths or base64 encoded images. Defaults to None.
|
|
135
220
|
Returns:
|
|
136
221
|
str: Response generated
|
|
137
222
|
"""
|
|
138
|
-
|
|
139
223
|
def for_stream():
|
|
140
224
|
for response in self.ask(
|
|
141
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
225
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally,
|
|
226
|
+
tools=tools, images=images
|
|
142
227
|
):
|
|
143
228
|
yield self.get_message(response)
|
|
144
229
|
|
|
@@ -149,6 +234,39 @@ class OLLAMA(Provider):
|
|
|
149
234
|
False,
|
|
150
235
|
optimizer=optimizer,
|
|
151
236
|
conversationally=conversationally,
|
|
237
|
+
tools=tools,
|
|
238
|
+
images=images
|
|
239
|
+
)
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
return for_stream() if stream else for_non_stream()
|
|
243
|
+
|
|
244
|
+
async def achat(
|
|
245
|
+
self,
|
|
246
|
+
prompt: str,
|
|
247
|
+
stream: bool = False,
|
|
248
|
+
optimizer: str = None,
|
|
249
|
+
conversationally: bool = False,
|
|
250
|
+
tools: Optional[List[Dict]] = None,
|
|
251
|
+
images: Optional[List[str]] = None,
|
|
252
|
+
) -> str | AsyncGenerator:
|
|
253
|
+
"""Async version of chat method"""
|
|
254
|
+
async def for_stream():
|
|
255
|
+
async for response in await self.aask(
|
|
256
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally,
|
|
257
|
+
tools=tools, images=images
|
|
258
|
+
):
|
|
259
|
+
yield self.get_message(response)
|
|
260
|
+
|
|
261
|
+
async def for_non_stream():
|
|
262
|
+
return self.get_message(
|
|
263
|
+
await self.aask(
|
|
264
|
+
prompt,
|
|
265
|
+
False,
|
|
266
|
+
optimizer=optimizer,
|
|
267
|
+
conversationally=conversationally,
|
|
268
|
+
tools=tools,
|
|
269
|
+
images=images
|
|
152
270
|
)
|
|
153
271
|
)
|
|
154
272
|
|
|
@@ -165,8 +283,114 @@ class OLLAMA(Provider):
|
|
|
165
283
|
"""
|
|
166
284
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
167
285
|
return response["text"]
|
|
286
|
+
|
|
287
|
+
def generate(
|
|
288
|
+
self,
|
|
289
|
+
prompt: str,
|
|
290
|
+
stream: bool = False,
|
|
291
|
+
**kwargs
|
|
292
|
+
) -> dict | AsyncGenerator:
|
|
293
|
+
"""Generate text using the model"""
|
|
294
|
+
try:
|
|
295
|
+
if stream:
|
|
296
|
+
return self.client.generate(model=self.model, prompt=prompt, stream=True, **kwargs)
|
|
297
|
+
return self.client.generate(model=self.model, prompt=prompt, **kwargs)
|
|
298
|
+
except ResponseError as e:
|
|
299
|
+
if e.status_code == 404:
|
|
300
|
+
raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
|
|
301
|
+
raise e
|
|
302
|
+
|
|
303
|
+
async def agenerate(
|
|
304
|
+
self,
|
|
305
|
+
prompt: str,
|
|
306
|
+
stream: bool = False,
|
|
307
|
+
**kwargs
|
|
308
|
+
) -> dict | AsyncGenerator:
|
|
309
|
+
"""Async version of generate method"""
|
|
310
|
+
try:
|
|
311
|
+
if stream:
|
|
312
|
+
return await self.async_client.generate(model=self.model, prompt=prompt, stream=True, **kwargs)
|
|
313
|
+
return await self.async_client.generate(model=self.model, prompt=prompt, **kwargs)
|
|
314
|
+
except ResponseError as e:
|
|
315
|
+
if e.status_code == 404:
|
|
316
|
+
raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
|
|
317
|
+
raise e
|
|
318
|
+
|
|
319
|
+
def list_models(self) -> List[dict]:
|
|
320
|
+
"""List all available models"""
|
|
321
|
+
return self.client.list()
|
|
322
|
+
|
|
323
|
+
def show_model(self, model: str = None) -> dict:
|
|
324
|
+
"""Show model details"""
|
|
325
|
+
model = model or self.model
|
|
326
|
+
return self.client.show(model)
|
|
327
|
+
|
|
328
|
+
def pull_model(self, model: str = None) -> None:
|
|
329
|
+
"""Pull a model from Ollama"""
|
|
330
|
+
model = model or self.model
|
|
331
|
+
self.client.pull(model)
|
|
332
|
+
|
|
333
|
+
def delete_model(self, model: str = None) -> None:
|
|
334
|
+
"""Delete a model"""
|
|
335
|
+
model = model or self.model
|
|
336
|
+
self.client.delete(model)
|
|
337
|
+
|
|
338
|
+
def embed(
|
|
339
|
+
self,
|
|
340
|
+
input: Union[str, List[str]],
|
|
341
|
+
model: str = None
|
|
342
|
+
) -> List[float]:
|
|
343
|
+
"""Generate embeddings for input text"""
|
|
344
|
+
model = model or self.model
|
|
345
|
+
return self.client.embed(model=model, input=input)
|
|
346
|
+
|
|
347
|
+
async def aembed(
|
|
348
|
+
self,
|
|
349
|
+
input: Union[str, List[str]],
|
|
350
|
+
model: str = None
|
|
351
|
+
) -> List[float]:
|
|
352
|
+
"""Async version of embed method"""
|
|
353
|
+
model = model or self.model
|
|
354
|
+
return await self.async_client.embed(model=model, input=input)
|
|
355
|
+
|
|
168
356
|
if __name__ == "__main__":
|
|
169
|
-
|
|
357
|
+
# Example usage
|
|
358
|
+
ai = OLLAMA(model="qwen2.5:0.5b")
|
|
359
|
+
# ai.pull_model("qwen2.5:0.5b")
|
|
360
|
+
# Basic chat
|
|
170
361
|
response = ai.chat("write a poem about AI", stream=True)
|
|
171
362
|
for chunk in response:
|
|
172
|
-
print(chunk, end="", flush=True)
|
|
363
|
+
print(chunk, end="", flush=True)
|
|
364
|
+
|
|
365
|
+
# Vision example
|
|
366
|
+
# response = ai.chat(
|
|
367
|
+
# "What's in this image?",
|
|
368
|
+
# images=["path/to/image.jpg"]
|
|
369
|
+
# )
|
|
370
|
+
# print(response)
|
|
371
|
+
|
|
372
|
+
# Tools example
|
|
373
|
+
def add_numbers(a: int, b: int) -> int:
|
|
374
|
+
return a + b
|
|
375
|
+
|
|
376
|
+
tools = [{
|
|
377
|
+
'type': 'function',
|
|
378
|
+
'function': {
|
|
379
|
+
'name': 'add_numbers',
|
|
380
|
+
'description': 'Add two numbers',
|
|
381
|
+
'parameters': {
|
|
382
|
+
'type': 'object',
|
|
383
|
+
'properties': {
|
|
384
|
+
'a': {'type': 'integer'},
|
|
385
|
+
'b': {'type': 'integer'}
|
|
386
|
+
},
|
|
387
|
+
'required': ['a', 'b']
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
}]
|
|
391
|
+
|
|
392
|
+
response = ai.chat(
|
|
393
|
+
"What is 5 plus 3?",
|
|
394
|
+
tools=tools
|
|
395
|
+
)
|
|
396
|
+
print(response)
|
webscout/Provider/PI.py
CHANGED
|
@@ -18,8 +18,9 @@ class PiAI(Provider):
|
|
|
18
18
|
Attributes:
|
|
19
19
|
knowledge_cutoff (str): The knowledge cutoff date for the model
|
|
20
20
|
AVAILABLE_VOICES (Dict[str, int]): Available voice options for audio responses
|
|
21
|
+
AVAILABLE_MODELS (List[str]): Available model options for the API
|
|
21
22
|
"""
|
|
22
|
-
|
|
23
|
+
AVAILABLE_MODELS = ["inflection_3_pi"]
|
|
23
24
|
AVAILABLE_VOICES: Dict[str, int] = {
|
|
24
25
|
"voice1": 1,
|
|
25
26
|
"voice2": 2,
|
|
@@ -44,7 +45,8 @@ class PiAI(Provider):
|
|
|
44
45
|
act: str = None,
|
|
45
46
|
voice: bool = False,
|
|
46
47
|
voice_name: str = "voice3",
|
|
47
|
-
output_file: str = "PiAI.mp3"
|
|
48
|
+
output_file: str = "PiAI.mp3",
|
|
49
|
+
model: str = "inflection_3_pi",
|
|
48
50
|
):
|
|
49
51
|
"""
|
|
50
52
|
Initializes PiAI with voice support.
|
|
@@ -386,10 +386,30 @@ class PerplexityLabs(Provider):
|
|
|
386
386
|
|
|
387
387
|
|
|
388
388
|
if __name__ == "__main__":
|
|
389
|
-
|
|
389
|
+
print("-" * 80)
|
|
390
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
391
|
+
print("-" * 80)
|
|
390
392
|
|
|
391
|
-
#
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
393
|
+
# Test all available models
|
|
394
|
+
working = 0
|
|
395
|
+
total = len(PerplexityLabs.AVAILABLE_MODELS)
|
|
396
|
+
|
|
397
|
+
for model in PerplexityLabs.AVAILABLE_MODELS:
|
|
398
|
+
try:
|
|
399
|
+
test_ai = PerplexityLabs(model=model, timeout=60)
|
|
400
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
401
|
+
response_text = ""
|
|
402
|
+
for chunk in response:
|
|
403
|
+
response_text += chunk
|
|
404
|
+
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
405
|
+
|
|
406
|
+
if response_text and len(response_text.strip()) > 0:
|
|
407
|
+
status = "✓"
|
|
408
|
+
# Truncate response if too long
|
|
409
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
410
|
+
else:
|
|
411
|
+
status = "✗"
|
|
412
|
+
display_text = "Empty or invalid response"
|
|
413
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
414
|
+
except Exception as e:
|
|
415
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/PizzaGPT.py
CHANGED
|
@@ -6,14 +6,14 @@ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
|
6
6
|
from webscout.AIbase import Provider
|
|
7
7
|
from webscout import exceptions
|
|
8
8
|
from webscout import LitAgent as Lit
|
|
9
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
10
9
|
|
|
11
10
|
class PIZZAGPT(Provider):
|
|
12
11
|
"""
|
|
13
12
|
PIZZAGPT is a provider class for interacting with the PizzaGPT API.
|
|
14
13
|
Supports web search integration and handles responses using regex.
|
|
15
14
|
"""
|
|
16
|
-
|
|
15
|
+
AVAILABLE_MODELS = ["gpt-4o-mini"]
|
|
16
|
+
|
|
17
17
|
def __init__(
|
|
18
18
|
self,
|
|
19
19
|
is_conversation: bool = True,
|
|
@@ -25,10 +25,12 @@ class PIZZAGPT(Provider):
|
|
|
25
25
|
proxies: dict = {},
|
|
26
26
|
history_offset: int = 10250,
|
|
27
27
|
act: str = None,
|
|
28
|
-
logging: bool = False,
|
|
29
28
|
model: str = "gpt-4o-mini"
|
|
30
29
|
) -> None:
|
|
31
30
|
"""Initialize PizzaGPT with enhanced configuration options."""
|
|
31
|
+
if model not in self.AVAILABLE_MODELS:
|
|
32
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
33
|
+
|
|
32
34
|
self.session = requests.Session()
|
|
33
35
|
self.is_conversation = is_conversation
|
|
34
36
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -45,7 +47,9 @@ class PIZZAGPT(Provider):
|
|
|
45
47
|
"origin": "https://www.pizzagpt.it",
|
|
46
48
|
"referer": "https://www.pizzagpt.it/en",
|
|
47
49
|
"user-agent": Lit().random(),
|
|
48
|
-
"x-secret": "Marinara"
|
|
50
|
+
"x-secret": "Marinara",
|
|
51
|
+
"sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24"',
|
|
52
|
+
"sec-ch-ua-platform": '"Windows"'
|
|
49
53
|
}
|
|
50
54
|
|
|
51
55
|
self.__available_optimizers = (
|
|
@@ -67,28 +71,15 @@ class PIZZAGPT(Provider):
|
|
|
67
71
|
)
|
|
68
72
|
self.conversation.history_offset = history_offset
|
|
69
73
|
self.session.proxies = proxies
|
|
70
|
-
|
|
71
|
-
self.logger = Logger(
|
|
72
|
-
name="PIZZAGPT",
|
|
73
|
-
format=LogFormat.MODERN_EMOJI,
|
|
74
|
-
) if logging else None
|
|
75
|
-
|
|
76
|
-
if self.logger:
|
|
77
|
-
self.logger.info(f"PIZZAGPT initialized with model: {self.model}")
|
|
78
74
|
|
|
79
75
|
def _extract_content(self, text: str) -> Dict[str, Any]:
|
|
80
76
|
"""
|
|
81
77
|
Extract content from response text using regex.
|
|
82
78
|
"""
|
|
83
|
-
if self.logger:
|
|
84
|
-
self.logger.debug("Extracting content from response text")
|
|
85
|
-
|
|
86
79
|
try:
|
|
87
80
|
# Look for content pattern
|
|
88
81
|
content_match = re.search(r'"content"\s*:\s*"(.*?)"(?=\s*[,}])', text, re.DOTALL)
|
|
89
82
|
if not content_match:
|
|
90
|
-
if self.logger:
|
|
91
|
-
self.logger.error("Content pattern not found in response")
|
|
92
83
|
raise exceptions.FailedToGenerateResponseError("Content not found in response")
|
|
93
84
|
|
|
94
85
|
content = content_match.group(1)
|
|
@@ -108,8 +99,6 @@ class PIZZAGPT(Provider):
|
|
|
108
99
|
}
|
|
109
100
|
|
|
110
101
|
except Exception as e:
|
|
111
|
-
if self.logger:
|
|
112
|
-
self.logger.error(f"Failed to extract content: {str(e)}")
|
|
113
102
|
raise exceptions.FailedToGenerateResponseError(f"Failed to extract content: {str(e)}")
|
|
114
103
|
|
|
115
104
|
def ask(
|
|
@@ -124,21 +113,13 @@ class PIZZAGPT(Provider):
|
|
|
124
113
|
"""
|
|
125
114
|
Send a prompt to PizzaGPT API with optional web search capability.
|
|
126
115
|
"""
|
|
127
|
-
if self.logger:
|
|
128
|
-
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
129
|
-
self.logger.debug(f"Web search enabled: {web_search}")
|
|
130
|
-
|
|
131
116
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
132
117
|
if optimizer:
|
|
133
118
|
if optimizer in self.__available_optimizers:
|
|
134
119
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
135
120
|
conversation_prompt if conversationally else prompt
|
|
136
121
|
)
|
|
137
|
-
if self.logger:
|
|
138
|
-
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
139
122
|
else:
|
|
140
|
-
if self.logger:
|
|
141
|
-
self.logger.error(f"Invalid optimizer: {optimizer}")
|
|
142
123
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
143
124
|
|
|
144
125
|
payload = {
|
|
@@ -147,9 +128,6 @@ class PIZZAGPT(Provider):
|
|
|
147
128
|
"searchEnabled": web_search
|
|
148
129
|
}
|
|
149
130
|
|
|
150
|
-
if self.logger:
|
|
151
|
-
self.logger.debug(f"Sending payload: {json.dumps(payload, indent=2)}")
|
|
152
|
-
|
|
153
131
|
try:
|
|
154
132
|
response = self.session.post(
|
|
155
133
|
self.api_endpoint,
|
|
@@ -157,26 +135,17 @@ class PIZZAGPT(Provider):
|
|
|
157
135
|
timeout=self.timeout
|
|
158
136
|
)
|
|
159
137
|
|
|
160
|
-
if self.logger:
|
|
161
|
-
self.logger.debug(f"Response status: {response.status_code}")
|
|
162
|
-
|
|
163
138
|
if not response.ok:
|
|
164
|
-
if self.logger:
|
|
165
|
-
self.logger.error(f"API request failed: {response.status_code} - {response.reason}")
|
|
166
139
|
raise exceptions.FailedToGenerateResponseError(
|
|
167
140
|
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
168
141
|
)
|
|
169
142
|
|
|
170
143
|
response_text = response.text
|
|
171
144
|
if not response_text:
|
|
172
|
-
if self.logger:
|
|
173
|
-
self.logger.error("Empty response received from API")
|
|
174
145
|
raise exceptions.FailedToGenerateResponseError("Empty response received from API")
|
|
175
146
|
|
|
176
147
|
try:
|
|
177
148
|
resp = self._extract_content(response_text)
|
|
178
|
-
if self.logger:
|
|
179
|
-
self.logger.debug("Response parsed successfully")
|
|
180
149
|
|
|
181
150
|
self.last_response.update(dict(text=resp['content']))
|
|
182
151
|
self.conversation.update_chat_history(
|
|
@@ -185,14 +154,9 @@ class PIZZAGPT(Provider):
|
|
|
185
154
|
return self.last_response
|
|
186
155
|
|
|
187
156
|
except Exception as e:
|
|
188
|
-
if self.logger:
|
|
189
|
-
self.logger.error(f"Failed to parse response: {str(e)}")
|
|
190
|
-
self.logger.debug(f"Raw response text: {response_text[:500]}")
|
|
191
157
|
raise exceptions.FailedToGenerateResponseError(f"Failed to parse response: {str(e)}")
|
|
192
158
|
|
|
193
159
|
except requests.exceptions.RequestException as e:
|
|
194
|
-
if self.logger:
|
|
195
|
-
self.logger.error(f"Request failed: {str(e)}")
|
|
196
160
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
197
161
|
|
|
198
162
|
def chat(
|
|
@@ -206,9 +170,6 @@ class PIZZAGPT(Provider):
|
|
|
206
170
|
"""
|
|
207
171
|
Chat with PizzaGPT with optional web search capability.
|
|
208
172
|
"""
|
|
209
|
-
if self.logger:
|
|
210
|
-
self.logger.debug(f"Chat request initiated with web_search={web_search}")
|
|
211
|
-
|
|
212
173
|
try:
|
|
213
174
|
response = self.ask(
|
|
214
175
|
prompt,
|
|
@@ -218,8 +179,6 @@ class PIZZAGPT(Provider):
|
|
|
218
179
|
)
|
|
219
180
|
return self.get_message(response)
|
|
220
181
|
except Exception as e:
|
|
221
|
-
if self.logger:
|
|
222
|
-
self.logger.error(f"Chat failed: {str(e)}")
|
|
223
182
|
raise
|
|
224
183
|
|
|
225
184
|
def get_message(self, response: dict) -> str:
|
|
@@ -231,9 +190,9 @@ if __name__ == "__main__":
|
|
|
231
190
|
from rich import print
|
|
232
191
|
|
|
233
192
|
# Example usage with web search enabled
|
|
234
|
-
ai = PIZZAGPT(
|
|
193
|
+
ai = PIZZAGPT()
|
|
235
194
|
try:
|
|
236
|
-
response = ai.chat("
|
|
195
|
+
response = ai.chat("hi")
|
|
237
196
|
print(response)
|
|
238
197
|
except Exception as e:
|
|
239
198
|
print(f"Error: {str(e)}")
|