webscout 4.0__py3-none-any.whl → 4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +126 -61
- webscout/AIutel.py +28 -18
- webscout/Local/_version.py +1 -1
- webscout/Provider/OLLAMA.py +187 -0
- webscout/Provider/__init__.py +2 -1
- webscout/__init__.py +1 -0
- webscout/version.py +1 -1
- webscout/webai.py +14 -0
- {webscout-4.0.dist-info → webscout-4.2.dist-info}/METADATA +10 -1
- {webscout-4.0.dist-info → webscout-4.2.dist-info}/RECORD +14 -13
- {webscout-4.0.dist-info → webscout-4.2.dist-info}/LICENSE.md +0 -0
- {webscout-4.0.dist-info → webscout-4.2.dist-info}/WHEEL +0 -0
- {webscout-4.0.dist-info → webscout-4.2.dist-info}/entry_points.txt +0 -0
- {webscout-4.0.dist-info → webscout-4.2.dist-info}/top_level.txt +0 -0
webscout/AIauto.py
CHANGED
|
@@ -1,29 +1,33 @@
|
|
|
1
1
|
from webscout.AIbase import Provider, AsyncProvider
|
|
2
|
-
from webscout import
|
|
3
|
-
from webscout import
|
|
4
|
-
from webscout import
|
|
5
|
-
from webscout import
|
|
6
|
-
from webscout import
|
|
7
|
-
from webscout import
|
|
8
|
-
from webscout import
|
|
9
|
-
from webscout import
|
|
10
|
-
from webscout import
|
|
11
|
-
from webscout.
|
|
12
|
-
from webscout import
|
|
13
|
-
from webscout import
|
|
14
|
-
from webscout import
|
|
15
|
-
from webscout import
|
|
16
|
-
from webscout import
|
|
17
|
-
from webscout import
|
|
18
|
-
from webscout import
|
|
19
|
-
from webscout import
|
|
20
|
-
from webscout import
|
|
21
|
-
from webscout import
|
|
22
|
-
from webscout import
|
|
23
|
-
from webscout import
|
|
24
|
-
from webscout import
|
|
25
|
-
from webscout import
|
|
26
|
-
from webscout import
|
|
2
|
+
from webscout.Provider.ThinkAnyAI import ThinkAnyAI
|
|
3
|
+
from webscout.Provider.Xjai import Xjai
|
|
4
|
+
from webscout.Provider.Llama2 import LLAMA2
|
|
5
|
+
from webscout.Provider.Llama2 import AsyncLLAMA2
|
|
6
|
+
from webscout.Provider.Leo import LEO
|
|
7
|
+
from webscout.Provider.Leo import AsyncLEO
|
|
8
|
+
from webscout.Provider.Koboldai import KOBOLDAI
|
|
9
|
+
from webscout.Provider.Koboldai import AsyncKOBOLDAI
|
|
10
|
+
from webscout.Provider.OpenGPT import OPENGPT
|
|
11
|
+
from webscout.Provider.OpenGPT import OPENGPTv2
|
|
12
|
+
from webscout.Provider.OpenGPT import AsyncOPENGPT
|
|
13
|
+
from webscout.Provider.Perplexity import PERPLEXITY
|
|
14
|
+
from webscout.Provider.Blackboxai import BLACKBOXAI
|
|
15
|
+
from webscout.Provider.Blackboxai import AsyncBLACKBOXAI
|
|
16
|
+
from webscout.Provider.Phind import PhindSearch
|
|
17
|
+
from webscout.Provider.Phind import AsyncPhindSearch
|
|
18
|
+
from webscout.Provider.Phind import Phindv2
|
|
19
|
+
from webscout.Provider.Phind import AsyncPhindv2
|
|
20
|
+
from webscout.Provider.Yepchat import YEPCHAT
|
|
21
|
+
from webscout.Provider.Yepchat import AsyncYEPCHAT
|
|
22
|
+
from webscout.Provider.Berlin4h import Berlin4h
|
|
23
|
+
from webscout.Provider.ChatGPTUK import ChatGPTUK
|
|
24
|
+
from webscout.Provider.Poe import POE
|
|
25
|
+
from webscout.Provider.BasedGPT import BasedGPT
|
|
26
|
+
from webscout.Provider.Deepseek import DeepSeek
|
|
27
|
+
from webscout.Provider.Deepinfra import DeepInfra, VLM, AsyncDeepInfra
|
|
28
|
+
from webscout.Provider.VTLchat import VTLchat
|
|
29
|
+
from webscout.Provider.Geminipro import GEMINIPRO
|
|
30
|
+
from webscout.Provider.Geminiflash import GEMINIFLASH
|
|
27
31
|
from webscout.g4f import GPT4FREE, AsyncGPT4FREE
|
|
28
32
|
from webscout.g4f import TestProviders
|
|
29
33
|
from webscout.exceptions import AllProvidersFailure
|
|
@@ -36,43 +40,56 @@ import logging
|
|
|
36
40
|
|
|
37
41
|
|
|
38
42
|
provider_map: dict[
|
|
39
|
-
str,
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
43
|
+
str,
|
|
44
|
+
Union[
|
|
45
|
+
ThinkAnyAI,
|
|
46
|
+
Xjai,
|
|
47
|
+
LLAMA2,
|
|
48
|
+
LEO,
|
|
49
|
+
KOBOLDAI,
|
|
50
|
+
OPENGPT,
|
|
51
|
+
OPENGPTv2,
|
|
52
|
+
PERPLEXITY,
|
|
53
|
+
BLACKBOXAI,
|
|
54
|
+
PhindSearch,
|
|
55
|
+
Phindv2,
|
|
56
|
+
YEPCHAT,
|
|
57
|
+
Berlin4h,
|
|
58
|
+
ChatGPTUK,
|
|
59
|
+
POE,
|
|
60
|
+
BasedGPT,
|
|
61
|
+
DeepSeek,
|
|
62
|
+
DeepInfra,
|
|
63
|
+
VLM,
|
|
64
|
+
VTLchat,
|
|
65
|
+
GEMINIPRO,
|
|
66
|
+
GEMINIFLASH,
|
|
67
|
+
GPT4FREE,
|
|
68
|
+
],
|
|
59
69
|
] = {
|
|
70
|
+
"ThinkAnyAI": ThinkAnyAI,
|
|
71
|
+
"Xjai": Xjai,
|
|
72
|
+
"LLAMA2": LLAMA2,
|
|
73
|
+
"LEO": LEO,
|
|
74
|
+
"KOBOLDAI": KOBOLDAI,
|
|
75
|
+
"OPENGPT": OPENGPT,
|
|
76
|
+
"OPENGPTv2": OPENGPTv2,
|
|
77
|
+
"PERPLEXITY": PERPLEXITY,
|
|
78
|
+
"BLACKBOXAI": BLACKBOXAI,
|
|
60
79
|
"PhindSearch": PhindSearch,
|
|
61
|
-
"
|
|
62
|
-
"
|
|
63
|
-
"
|
|
64
|
-
"
|
|
65
|
-
"
|
|
66
|
-
"
|
|
67
|
-
"
|
|
68
|
-
"
|
|
69
|
-
"
|
|
70
|
-
"
|
|
71
|
-
"
|
|
72
|
-
"
|
|
73
|
-
"chatgptuk": ChatGPTUK,
|
|
80
|
+
"Phindv2": Phindv2,
|
|
81
|
+
"YEPCHAT": YEPCHAT,
|
|
82
|
+
"Berlin4h": Berlin4h,
|
|
83
|
+
"ChatGPTUK": ChatGPTUK,
|
|
84
|
+
"POE": POE,
|
|
85
|
+
"BasedGPT": BasedGPT,
|
|
86
|
+
"DeepSeek": DeepSeek,
|
|
87
|
+
"DeepInfra": DeepInfra,
|
|
88
|
+
"VLM": VLM,
|
|
89
|
+
"VTLchat": VTLchat,
|
|
90
|
+
"GEMINIPRO": GEMINIPRO,
|
|
91
|
+
"GEMINIFLASH": GEMINIFLASH,
|
|
74
92
|
"gpt4free": GPT4FREE,
|
|
75
|
-
|
|
76
93
|
}
|
|
77
94
|
|
|
78
95
|
|
|
@@ -104,7 +121,31 @@ class AUTO(Provider):
|
|
|
104
121
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
105
122
|
exclude(list[str], optional): List of providers to be excluded. Defaults to [].
|
|
106
123
|
"""
|
|
107
|
-
self.provider: Union[
|
|
124
|
+
self.provider: Union[
|
|
125
|
+
ThinkAnyAI,
|
|
126
|
+
Xjai,
|
|
127
|
+
LLAMA2,
|
|
128
|
+
LEO,
|
|
129
|
+
KOBOLDAI,
|
|
130
|
+
OPENGPT,
|
|
131
|
+
OPENGPTv2,
|
|
132
|
+
PERPLEXITY,
|
|
133
|
+
BLACKBOXAI,
|
|
134
|
+
PhindSearch,
|
|
135
|
+
Phindv2,
|
|
136
|
+
YEPCHAT,
|
|
137
|
+
Berlin4h,
|
|
138
|
+
ChatGPTUK,
|
|
139
|
+
POE,
|
|
140
|
+
BasedGPT,
|
|
141
|
+
DeepSeek,
|
|
142
|
+
DeepInfra,
|
|
143
|
+
VLM,
|
|
144
|
+
VTLchat,
|
|
145
|
+
GEMINIPRO,
|
|
146
|
+
GEMINIFLASH,
|
|
147
|
+
GPT4FREE,
|
|
148
|
+
] = None
|
|
108
149
|
self.provider_name: str = None
|
|
109
150
|
self.is_conversation = is_conversation
|
|
110
151
|
self.max_tokens = max_tokens
|
|
@@ -315,6 +356,30 @@ class AsyncAUTO(AsyncProvider):
|
|
|
315
356
|
AsyncLLAMA2,
|
|
316
357
|
AsyncBLACKBOXAI,
|
|
317
358
|
AsyncGPT4FREE,
|
|
359
|
+
AsyncLEO,
|
|
360
|
+
ThinkAnyAI,
|
|
361
|
+
Xjai,
|
|
362
|
+
LLAMA2,
|
|
363
|
+
LEO,
|
|
364
|
+
KOBOLDAI,
|
|
365
|
+
OPENGPT,
|
|
366
|
+
OPENGPTv2,
|
|
367
|
+
PERPLEXITY,
|
|
368
|
+
BLACKBOXAI,
|
|
369
|
+
PhindSearch,
|
|
370
|
+
Phindv2,
|
|
371
|
+
YEPCHAT,
|
|
372
|
+
Berlin4h,
|
|
373
|
+
ChatGPTUK,
|
|
374
|
+
POE,
|
|
375
|
+
BasedGPT,
|
|
376
|
+
DeepSeek,
|
|
377
|
+
DeepInfra,
|
|
378
|
+
VLM,
|
|
379
|
+
VTLchat,
|
|
380
|
+
GEMINIPRO,
|
|
381
|
+
GEMINIFLASH,
|
|
382
|
+
GPT4FREE
|
|
318
383
|
] = None
|
|
319
384
|
self.provider_name: str = None
|
|
320
385
|
self.is_conversation = is_conversation
|
|
@@ -490,4 +555,4 @@ class AsyncAUTO(AsyncProvider):
|
|
|
490
555
|
str: Message extracted
|
|
491
556
|
"""
|
|
492
557
|
assert self.provider is not None, "Chat with AI first"
|
|
493
|
-
return await self.provider.get_message(response)
|
|
558
|
+
return await self.provider.get_message(response)
|
webscout/AIutel.py
CHANGED
|
@@ -52,6 +52,7 @@ webai = [
|
|
|
52
52
|
"vtlchat",
|
|
53
53
|
"geminiflash",
|
|
54
54
|
"geminipro",
|
|
55
|
+
"ollama"
|
|
55
56
|
]
|
|
56
57
|
|
|
57
58
|
gpt4free_providers = [
|
|
@@ -219,19 +220,20 @@ class Conversation:
|
|
|
219
220
|
), f"File '{filepath}' does not exist"
|
|
220
221
|
if not os.path.isfile(filepath):
|
|
221
222
|
logging.debug(f"Creating new chat-history file - '{filepath}'")
|
|
222
|
-
with open(filepath, "w"
|
|
223
|
+
with open(filepath, "w") as fh: # Try creating new file
|
|
223
224
|
# lets add intro here
|
|
224
225
|
fh.write(self.intro)
|
|
225
226
|
else:
|
|
226
227
|
logging.debug(f"Loading conversation from '{filepath}'")
|
|
227
|
-
with open(filepath
|
|
228
|
-
file_contents = fh.
|
|
229
|
-
|
|
230
|
-
|
|
228
|
+
with open(filepath) as fh:
|
|
229
|
+
file_contents = fh.readlines()
|
|
230
|
+
if file_contents:
|
|
231
|
+
self.intro = file_contents[0] # Presume first line is the intro.
|
|
232
|
+
self.chat_history = "\n".join(file_contents[1:])
|
|
231
233
|
|
|
232
|
-
def __trim_chat_history(self, chat_history: str) -> str:
|
|
234
|
+
def __trim_chat_history(self, chat_history: str, intro: str) -> str:
|
|
233
235
|
"""Ensures the len(prompt) and max_tokens_to_sample is not > 4096"""
|
|
234
|
-
len_of_intro = len(
|
|
236
|
+
len_of_intro = len(intro)
|
|
235
237
|
len_of_chat_history = len(chat_history)
|
|
236
238
|
total = (
|
|
237
239
|
self.max_tokens_to_sample + len_of_intro + len_of_chat_history
|
|
@@ -239,25 +241,28 @@ class Conversation:
|
|
|
239
241
|
if total > self.history_offset:
|
|
240
242
|
truncate_at = (total - self.history_offset) + self.prompt_allowance
|
|
241
243
|
# Remove head of total (n) of chat_history
|
|
242
|
-
|
|
243
|
-
|
|
244
|
+
trimmed_chat_history = chat_history[truncate_at:]
|
|
245
|
+
return "... " + trimmed_chat_history
|
|
244
246
|
# print(len(self.chat_history))
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
return chat_history
|
|
247
|
+
else:
|
|
248
|
+
return chat_history
|
|
248
249
|
|
|
249
|
-
def gen_complete_prompt(self, prompt: str) -> str:
|
|
250
|
+
def gen_complete_prompt(self, prompt: str, intro: str = None) -> str:
|
|
250
251
|
"""Generates a kinda like incomplete conversation
|
|
251
252
|
|
|
252
253
|
Args:
|
|
253
|
-
prompt (str):
|
|
254
|
+
prompt (str): Chat prompt
|
|
255
|
+
intro (str): Override class' intro. Defaults to None.
|
|
254
256
|
|
|
255
257
|
Returns:
|
|
256
258
|
str: Updated incomplete chat_history
|
|
257
259
|
"""
|
|
258
260
|
if self.status:
|
|
259
|
-
|
|
260
|
-
|
|
261
|
+
intro = self.intro if intro is None else intro
|
|
262
|
+
incomplete_chat_history = self.chat_history + self.history_format % dict(
|
|
263
|
+
user=prompt, llm=""
|
|
264
|
+
)
|
|
265
|
+
return intro + self.__trim_chat_history(incomplete_chat_history, intro)
|
|
261
266
|
|
|
262
267
|
return prompt
|
|
263
268
|
|
|
@@ -275,11 +280,16 @@ class Conversation:
|
|
|
275
280
|
return
|
|
276
281
|
new_history = self.history_format % dict(user=prompt, llm=response)
|
|
277
282
|
if self.file and self.update_file:
|
|
278
|
-
|
|
279
|
-
|
|
283
|
+
if os.path.exists(self.file):
|
|
284
|
+
with open(self.file, "w") as fh:
|
|
285
|
+
fh.write(self.intro + "\n" + new_history)
|
|
286
|
+
else:
|
|
287
|
+
with open(self.file, "a") as fh:
|
|
288
|
+
fh.write(new_history)
|
|
280
289
|
self.chat_history += new_history
|
|
281
290
|
|
|
282
291
|
|
|
292
|
+
|
|
283
293
|
class AwesomePrompts:
|
|
284
294
|
awesome_prompt_url = (
|
|
285
295
|
"https://raw.githubusercontent.com/OE-LUCIFER/prompts/main/prompt.json"
|
webscout/Local/_version.py
CHANGED
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import requests
|
|
4
|
+
from requests import get
|
|
5
|
+
from uuid import uuid4
|
|
6
|
+
from re import findall
|
|
7
|
+
from requests.exceptions import RequestException
|
|
8
|
+
from curl_cffi.requests import get, RequestsError
|
|
9
|
+
import g4f
|
|
10
|
+
from random import randint
|
|
11
|
+
from PIL import Image
|
|
12
|
+
import io
|
|
13
|
+
import re
|
|
14
|
+
import json
|
|
15
|
+
import yaml
|
|
16
|
+
from ..AIutel import Optimizers
|
|
17
|
+
from ..AIutel import Conversation
|
|
18
|
+
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
19
|
+
from ..AIbase import Provider, AsyncProvider
|
|
20
|
+
from webscout import exceptions
|
|
21
|
+
from typing import Any, AsyncGenerator, Dict
|
|
22
|
+
import logging
|
|
23
|
+
import httpx
|
|
24
|
+
import ollama
|
|
25
|
+
|
|
26
|
+
class OLLAMA(Provider):
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
model: str = 'qwen2:0.5b',
|
|
30
|
+
is_conversation: bool = True,
|
|
31
|
+
max_tokens: int = 600,
|
|
32
|
+
timeout: int = 30,
|
|
33
|
+
intro: str = None,
|
|
34
|
+
filepath: str = None,
|
|
35
|
+
update_file: bool = True,
|
|
36
|
+
proxies: dict = {},
|
|
37
|
+
history_offset: int = 10250,
|
|
38
|
+
act: str = None,
|
|
39
|
+
):
|
|
40
|
+
"""Instantiates Ollama
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
model (str, optional): Model name. Defaults to 'llama2'.
|
|
44
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
45
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
46
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
47
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
48
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
49
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
50
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
51
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
52
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
53
|
+
"""
|
|
54
|
+
self.model = model
|
|
55
|
+
self.is_conversation = is_conversation
|
|
56
|
+
self.max_tokens_to_sample = max_tokens
|
|
57
|
+
self.timeout = timeout
|
|
58
|
+
self.last_response = {}
|
|
59
|
+
|
|
60
|
+
self.__available_optimizers = (
|
|
61
|
+
method
|
|
62
|
+
for method in dir(Optimizers)
|
|
63
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
64
|
+
)
|
|
65
|
+
Conversation.intro = (
|
|
66
|
+
AwesomePrompts().get_act(
|
|
67
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
68
|
+
)
|
|
69
|
+
if act
|
|
70
|
+
else intro or Conversation.intro
|
|
71
|
+
)
|
|
72
|
+
self.conversation = Conversation(
|
|
73
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
74
|
+
)
|
|
75
|
+
self.conversation.history_offset = history_offset
|
|
76
|
+
|
|
77
|
+
def ask(
|
|
78
|
+
self,
|
|
79
|
+
prompt: str,
|
|
80
|
+
stream: bool = False,
|
|
81
|
+
raw: bool = False,
|
|
82
|
+
optimizer: str = None,
|
|
83
|
+
conversationally: bool = False,
|
|
84
|
+
) -> dict | AsyncGenerator:
|
|
85
|
+
"""Chat with AI
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
prompt (str): Prompt to be send.
|
|
89
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
90
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
91
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
92
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
93
|
+
Returns:
|
|
94
|
+
dict|AsyncGenerator : ai content
|
|
95
|
+
```json
|
|
96
|
+
{
|
|
97
|
+
"text" : "print('How may I help you today?')"
|
|
98
|
+
}
|
|
99
|
+
```
|
|
100
|
+
"""
|
|
101
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
102
|
+
if optimizer:
|
|
103
|
+
if optimizer in self.__available_optimizers:
|
|
104
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
105
|
+
conversation_prompt if conversationally else prompt
|
|
106
|
+
)
|
|
107
|
+
else:
|
|
108
|
+
raise Exception(
|
|
109
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def for_stream():
|
|
113
|
+
stream = ollama.chat(model=self.model, messages=[
|
|
114
|
+
{'role': 'user', 'content': conversation_prompt}
|
|
115
|
+
], stream=True)
|
|
116
|
+
|
|
117
|
+
message_load = ""
|
|
118
|
+
for chunk in stream:
|
|
119
|
+
message_load += chunk['message']['content']
|
|
120
|
+
yield chunk['message']['content'] if raw else dict(text=message_load)
|
|
121
|
+
self.last_response.update(dict(text=message_load))
|
|
122
|
+
self.conversation.update_chat_history(
|
|
123
|
+
prompt, self.get_message(self.last_response)
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
def for_non_stream():
|
|
127
|
+
response = ollama.chat(model=self.model, messages=[
|
|
128
|
+
{'role': 'user', 'content': conversation_prompt}
|
|
129
|
+
])
|
|
130
|
+
self.last_response.update(dict(text=response['message']['content']))
|
|
131
|
+
self.conversation.update_chat_history(
|
|
132
|
+
prompt, self.get_message(self.last_response)
|
|
133
|
+
)
|
|
134
|
+
return self.last_response
|
|
135
|
+
|
|
136
|
+
return for_stream() if stream else for_non_stream()
|
|
137
|
+
|
|
138
|
+
def chat(
|
|
139
|
+
self,
|
|
140
|
+
prompt: str,
|
|
141
|
+
stream: bool = False,
|
|
142
|
+
optimizer: str = None,
|
|
143
|
+
conversationally: bool = False,
|
|
144
|
+
) -> str | AsyncGenerator:
|
|
145
|
+
"""Generate response `str`
|
|
146
|
+
Args:
|
|
147
|
+
prompt (str): Prompt to be send.
|
|
148
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
149
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
150
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
151
|
+
Returns:
|
|
152
|
+
str: Response generated
|
|
153
|
+
"""
|
|
154
|
+
|
|
155
|
+
def for_stream():
|
|
156
|
+
for response in self.ask(
|
|
157
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
158
|
+
):
|
|
159
|
+
yield self.get_message(response)
|
|
160
|
+
|
|
161
|
+
def for_non_stream():
|
|
162
|
+
return self.get_message(
|
|
163
|
+
self.ask(
|
|
164
|
+
prompt,
|
|
165
|
+
False,
|
|
166
|
+
optimizer=optimizer,
|
|
167
|
+
conversationally=conversationally,
|
|
168
|
+
)
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
return for_stream() if stream else for_non_stream()
|
|
172
|
+
|
|
173
|
+
def get_message(self, response: dict) -> str:
|
|
174
|
+
"""Retrieves message only from response
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
response (dict): Response generated by `self.ask`
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
str: Message extracted
|
|
181
|
+
"""
|
|
182
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
183
|
+
return response["text"]
|
|
184
|
+
if __name__ == "__main__":
|
|
185
|
+
ollama_provider = OLLAMA(model="qwen2:0.5b")
|
|
186
|
+
response = ollama_provider.chat("What is the meaning of life?")
|
|
187
|
+
print(response)
|
webscout/Provider/__init__.py
CHANGED
|
@@ -37,7 +37,7 @@ from .Deepinfra import DeepInfra, VLM, AsyncDeepInfra
|
|
|
37
37
|
from .VTLchat import VTLchat
|
|
38
38
|
from .Geminipro import GEMINIPRO
|
|
39
39
|
from .Geminiflash import GEMINIFLASH
|
|
40
|
-
|
|
40
|
+
from .OLLAMA import OLLAMA
|
|
41
41
|
__all__ = [
|
|
42
42
|
'ThinkAnyAI',
|
|
43
43
|
'Xjai',
|
|
@@ -78,6 +78,7 @@ __all__ = [
|
|
|
78
78
|
'OPENGPTv2',
|
|
79
79
|
'GEMINIPRO',
|
|
80
80
|
'GEMINIFLASH',
|
|
81
|
+
'OLLAMA'
|
|
81
82
|
|
|
82
83
|
|
|
83
84
|
]
|
webscout/__init__.py
CHANGED
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "4.
|
|
1
|
+
__version__ = "4.2"
|
|
2
2
|
__prog__ = "webscout"
|
webscout/webai.py
CHANGED
|
@@ -831,7 +831,21 @@ class Main(cmd.Cmd):
|
|
|
831
831
|
act=awesome_prompt,
|
|
832
832
|
quiet=quiet,
|
|
833
833
|
)
|
|
834
|
+
elif provider == "ollama":
|
|
835
|
+
from webscout import OLLAMA
|
|
834
836
|
|
|
837
|
+
self.bot = OLLAMA(
|
|
838
|
+
is_conversation=disable_conversation,
|
|
839
|
+
max_tokens=max_tokens,
|
|
840
|
+
timeout=timeout,
|
|
841
|
+
intro=intro,
|
|
842
|
+
filepath=filepath,
|
|
843
|
+
update_file=update_file,
|
|
844
|
+
proxies=proxies,
|
|
845
|
+
history_offset=history_offset,
|
|
846
|
+
act=awesome_prompt,
|
|
847
|
+
model=getOr(model, "qwen2:0.5b")
|
|
848
|
+
)
|
|
835
849
|
else:
|
|
836
850
|
raise NotImplementedError(
|
|
837
851
|
f"The provider `{provider}` is not yet implemented."
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 4.
|
|
3
|
+
Version: 4.2
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -1463,6 +1463,15 @@ print(response)
|
|
|
1463
1463
|
|
|
1464
1464
|
### 21. GeminiFlash and geminipro
|
|
1465
1465
|
**Usage similar to other providers**
|
|
1466
|
+
|
|
1467
|
+
### 22. `Ollama` - chat will AI models locally
|
|
1468
|
+
```python
|
|
1469
|
+
from webscout import OLLAMA
|
|
1470
|
+
ollama_provider = OLLAMA(model="qwen2:0.5b")
|
|
1471
|
+
response = ollama_provider.chat("What is the meaning of life?")
|
|
1472
|
+
print(response)
|
|
1473
|
+
```
|
|
1474
|
+
|
|
1466
1475
|
### `LLM`
|
|
1467
1476
|
```python
|
|
1468
1477
|
from webscout.LLM import LLM
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
webscout/AIauto.py,sha256=
|
|
1
|
+
webscout/AIauto.py,sha256=DycblRfFsQiLJVeP1sRQ0C-eNX7iO3a_y1wt8wChM8Y,20005
|
|
2
2
|
webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
|
|
3
|
-
webscout/AIutel.py,sha256=
|
|
3
|
+
webscout/AIutel.py,sha256=xNqNnGO9st2aR2CdT4AePXt5yYFG4xgI2nKMo1UcQQ4,33980
|
|
4
4
|
webscout/DWEBS.py,sha256=QLuT1IKu0lnwdl7W6c-ctBAO7Jj0Zk3PYm6-13BC7rU,25740
|
|
5
5
|
webscout/LLM.py,sha256=LbGCZdJf8A5dwfoGS4tyy39tAh5BDdhMZP0ScKaaQfU,4184
|
|
6
6
|
webscout/YTdownloader.py,sha256=uWpUWnw9pxeEGw9KJ_3XDyQ5gd38gH1dJpr-HJo4vzU,39144
|
|
7
|
-
webscout/__init__.py,sha256=
|
|
7
|
+
webscout/__init__.py,sha256=bNfobn_GZVhf8CZVos1dX19xbON5tjsoTGFCeEwetnk,2211
|
|
8
8
|
webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
|
|
9
9
|
webscout/async_providers.py,sha256=holBv5SxanxVXc_92CBBaXHlB2IakB_fHnhyZaFjYF8,684
|
|
10
10
|
webscout/cli.py,sha256=EDxqTmcIshvhg9P0n2ZPaApj2-MEFY3uawS92zbBV_s,14705
|
|
@@ -14,9 +14,9 @@ webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
|
|
|
14
14
|
webscout/tempid.py,sha256=5oc3UbXhPGKxrMRTfRABT-V-dNzH_hOKWtLYM6iCWd4,5896
|
|
15
15
|
webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
|
|
16
16
|
webscout/utils.py,sha256=CxeXvp0rWIulUrEaPZMaNfg_tSuQLRSV8uuHA2chyKE,2603
|
|
17
|
-
webscout/version.py,sha256=
|
|
17
|
+
webscout/version.py,sha256=lOw9hPXXgs_Wlw6Px5eyN37MYJbDYnOuwXrI1TPkDXc,44
|
|
18
18
|
webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
|
|
19
|
-
webscout/webai.py,sha256=
|
|
19
|
+
webscout/webai.py,sha256=LPn9XKvc5SLxJ68slMsPUXxzkzfa4b0kzsiJyWs-yq0,88897
|
|
20
20
|
webscout/webscout_search.py,sha256=lFAot1-Qil_YfXieeLakDVDEX8Ckcima4ueXdOYwiMc,42804
|
|
21
21
|
webscout/webscout_search_async.py,sha256=dooKGwLm0cwTml55Vy6NHPPY-nymEqX2h8laX94Zg5A,14537
|
|
22
22
|
webscout/websx_search.py,sha256=n-qVwiHozJEF-GFRPcAfh4k1d_tscTmDe1dNL-1ngcU,12094
|
|
@@ -26,7 +26,7 @@ webscout/Extra/gguf.py,sha256=5zTNE5HxM_VQ5ONoocL8GG5fRXrgyLdEEjNzndG0oUw,7811
|
|
|
26
26
|
webscout/Extra/weather.py,sha256=ocGwJYp5B9FwVWvIZ9wtoJTQsPFt64Vt8TitxJcdvAU,1687
|
|
27
27
|
webscout/Extra/weather_ascii.py,sha256=sy6EEh2kN1CO1hKda8chD-mVCxH4p0NHyP7Uxr0-rgo,630
|
|
28
28
|
webscout/Local/__init__.py,sha256=RN6klpbabPGNX2YzPm_hdeUcQvieUwvJt22uAO2RKSM,238
|
|
29
|
-
webscout/Local/_version.py,sha256=
|
|
29
|
+
webscout/Local/_version.py,sha256=ZbCLJLHnrzQdwnxadyRSHEGRQY77fO8BRjE8sVITcnw,83
|
|
30
30
|
webscout/Local/formats.py,sha256=BiZZSoN3e8S6-S-ykBL9ogSUs0vK11GaZ3ghc9U8GRk,18994
|
|
31
31
|
webscout/Local/model.py,sha256=T_bzNNrxEyOyLyhp6fKwiuVBBkXC2a37LzJVCxFIxOU,30710
|
|
32
32
|
webscout/Local/rawdog.py,sha256=ojY_O8Vb1KvR34OwWdfLgllgaAK_7HMf64ElMATvCXs,36689
|
|
@@ -47,6 +47,7 @@ webscout/Provider/Groq.py,sha256=QfgP3hKUcqq5vUA4Pzuu3HAgpJkKwLWNjjsnxtkCYd8,210
|
|
|
47
47
|
webscout/Provider/Koboldai.py,sha256=KwWx2yPlvT9BGx37iNvSbgzWkJ9I8kSOmeg7sL1hb0M,15806
|
|
48
48
|
webscout/Provider/Leo.py,sha256=wbuDR-vFjLptfRC6yDlk74tINqNvCOzpISsK92lIgGg,19987
|
|
49
49
|
webscout/Provider/Llama2.py,sha256=gVMotyiBaDSqliwuDtFefHoOBn9V5m5Ze_YVtV0trt8,17525
|
|
50
|
+
webscout/Provider/OLLAMA.py,sha256=G8sz_P7OZINFI1qGnpDhNPWU789Sv2cpDnShOA5Nbmw,7075
|
|
50
51
|
webscout/Provider/OpenGPT.py,sha256=ZymwLgNJSPlGZHW3msMlnRR7NxmALqJw9yuToqrRrhw,35515
|
|
51
52
|
webscout/Provider/Openai.py,sha256=SjfVOwY94unVnXhvN0Fkome-q2-wi4mPJk_vCGq5Fjc,20617
|
|
52
53
|
webscout/Provider/Perplexity.py,sha256=CPdKqkdlVejXDcf1uycNO4LPCVNUADSCetvyJEGepSw,8826
|
|
@@ -58,10 +59,10 @@ webscout/Provider/VTLchat.py,sha256=_sErGr-wOi16ZAfiGOo0bPsAEMkjzzwreEsIqjIZMIU,
|
|
|
58
59
|
webscout/Provider/Xjai.py,sha256=BIlk2ouz9Kh_0Gg9hPvTqhI7XtcmWdg5vHSX_4uGrIs,9039
|
|
59
60
|
webscout/Provider/Yepchat.py,sha256=2Eit-A7w1ph1GQKNQuur_yaDzI64r0yBGxCIjDefJxQ,19875
|
|
60
61
|
webscout/Provider/Youchat.py,sha256=fhMpt94pIPE_XDbC4z9xyfgA7NbkNE2wlRFJabsjv90,8069
|
|
61
|
-
webscout/Provider/__init__.py,sha256=
|
|
62
|
-
webscout-4.
|
|
63
|
-
webscout-4.
|
|
64
|
-
webscout-4.
|
|
65
|
-
webscout-4.
|
|
66
|
-
webscout-4.
|
|
67
|
-
webscout-4.
|
|
62
|
+
webscout/Provider/__init__.py,sha256=ETLFpBrQsE5yCrrHXSnQtQfB9SF65oBDrylCi0bq5GY,1963
|
|
63
|
+
webscout-4.2.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
|
|
64
|
+
webscout-4.2.dist-info/METADATA,sha256=op8wqdzv0qTjR3MTqHSKYVOT-8QYOeDtffvjDz0dw_s,57080
|
|
65
|
+
webscout-4.2.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
|
|
66
|
+
webscout-4.2.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
|
|
67
|
+
webscout-4.2.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
|
|
68
|
+
webscout-4.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|