webscout 1.2.4__tar.gz → 1.2.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- {webscout-1.2.4 → webscout-1.2.5}/PKG-INFO +15 -29
- {webscout-1.2.4 → webscout-1.2.5}/README.md +14 -27
- {webscout-1.2.4 → webscout-1.2.5}/setup.py +1 -2
- {webscout-1.2.4 → webscout-1.2.5}/webscout/AI.py +204 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/AIutel.py +1 -1
- {webscout-1.2.4 → webscout-1.2.5}/webscout/__init__.py +3 -2
- webscout-1.2.5/webscout/version.py +2 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout.egg-info/PKG-INFO +15 -29
- {webscout-1.2.4 → webscout-1.2.5}/webscout.egg-info/SOURCES.txt +0 -1
- {webscout-1.2.4 → webscout-1.2.5}/webscout.egg-info/requires.txt +0 -1
- webscout-1.2.4/webscout/offlineAI.py +0 -206
- webscout-1.2.4/webscout/version.py +0 -2
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/__init__.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/documents/__init__.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/documents/query_results_extractor.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/networks/__init__.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/networks/filepath_converter.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/networks/google_searcher.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/networks/network_configs.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/networks/webpage_fetcher.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/utilsdw/__init__.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/utilsdw/enver.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/DeepWEBS/utilsdw/logger.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/LICENSE.md +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/setup.cfg +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/AIbase.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/DWEBS.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/HelpingAI.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/LLM.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/__main__.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/cli.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/exceptions.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/models.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/utils.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/webscout_search.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout/webscout_search_async.py +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout.egg-info/dependency_links.txt +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout.egg-info/entry_points.txt +0 -0
- {webscout-1.2.4 → webscout-1.2.5}/webscout.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.5
|
|
4
4
|
Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -45,7 +45,6 @@ Requires-Dist: sse_starlette
|
|
|
45
45
|
Requires-Dist: termcolor
|
|
46
46
|
Requires-Dist: tiktoken
|
|
47
47
|
Requires-Dist: tldextract
|
|
48
|
-
Requires-Dist: gpt4all
|
|
49
48
|
Requires-Dist: orjson
|
|
50
49
|
Provides-Extra: dev
|
|
51
50
|
Requires-Dist: ruff>=0.1.6; extra == "dev"
|
|
@@ -94,7 +93,7 @@ Also containes AI models that you can use
|
|
|
94
93
|
- [6. `BlackBox` - Search/chat With BlackBox](#6-blackbox---searchchat-with-blackbox)
|
|
95
94
|
- [7. `PERPLEXITY` - Search With PERPLEXITY](#7-perplexity---search-with-perplexity)
|
|
96
95
|
- [8. `OpenGPT` - chat With OPENGPT](#8-opengpt---chat-with-opengpt)
|
|
97
|
-
- [9. `
|
|
96
|
+
- [9. `KOBOLDIA` -](#9-koboldia--)
|
|
98
97
|
- [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
|
|
99
98
|
- [`LLM`](#llm)
|
|
100
99
|
|
|
@@ -608,36 +607,23 @@ prompt = "tell me about india"
|
|
|
608
607
|
response_str = opengpt.chat(prompt)
|
|
609
608
|
print(response_str)
|
|
610
609
|
```
|
|
611
|
-
### 9. `
|
|
610
|
+
### 9. `KOBOLDIA` -
|
|
612
611
|
```python
|
|
613
|
-
from webscout import
|
|
612
|
+
from webscout.AI import KOBOLDAI
|
|
614
613
|
|
|
615
|
-
#
|
|
616
|
-
|
|
617
|
-
model="path/to/your/model/file", # Replace with the actual path to your model file
|
|
618
|
-
is_conversation=True,
|
|
619
|
-
max_tokens=800,
|
|
620
|
-
temperature=0.7,
|
|
621
|
-
presence_penalty=0,
|
|
622
|
-
frequency_penalty=1.18,
|
|
623
|
-
top_p=0.4,
|
|
624
|
-
intro="Hello, how can I assist you today?",
|
|
625
|
-
filepath="path/to/conversation/history/file", # Optional, for conversation history
|
|
626
|
-
update_file=True,
|
|
627
|
-
history_offset=10250,
|
|
628
|
-
act=None # Optional, for using an awesome prompt as intro
|
|
629
|
-
)
|
|
614
|
+
# Instantiate the KOBOLDAI class with default parameters
|
|
615
|
+
koboldai = KOBOLDAI()
|
|
630
616
|
|
|
631
|
-
#
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
617
|
+
# Define a prompt to send to the AI
|
|
618
|
+
prompt = "What is the capital of France?"
|
|
619
|
+
|
|
620
|
+
# Use the 'ask' method to get a response from the AI
|
|
621
|
+
response = koboldai.ask(prompt)
|
|
622
|
+
|
|
623
|
+
# Extract and print the message from the response
|
|
624
|
+
message = koboldai.get_message(response)
|
|
625
|
+
print(message)
|
|
638
626
|
|
|
639
|
-
# Print the generated response
|
|
640
|
-
print(response)
|
|
641
627
|
```
|
|
642
628
|
|
|
643
629
|
## usage of special .LLM file from webscout (webscout.LLM)
|
|
@@ -41,7 +41,7 @@ Also containes AI models that you can use
|
|
|
41
41
|
- [6. `BlackBox` - Search/chat With BlackBox](#6-blackbox---searchchat-with-blackbox)
|
|
42
42
|
- [7. `PERPLEXITY` - Search With PERPLEXITY](#7-perplexity---search-with-perplexity)
|
|
43
43
|
- [8. `OpenGPT` - chat With OPENGPT](#8-opengpt---chat-with-opengpt)
|
|
44
|
-
- [9. `
|
|
44
|
+
- [9. `KOBOLDIA` -](#9-koboldia--)
|
|
45
45
|
- [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
|
|
46
46
|
- [`LLM`](#llm)
|
|
47
47
|
|
|
@@ -555,36 +555,23 @@ prompt = "tell me about india"
|
|
|
555
555
|
response_str = opengpt.chat(prompt)
|
|
556
556
|
print(response_str)
|
|
557
557
|
```
|
|
558
|
-
### 9. `
|
|
558
|
+
### 9. `KOBOLDIA` -
|
|
559
559
|
```python
|
|
560
|
-
from webscout import
|
|
560
|
+
from webscout.AI import KOBOLDAI
|
|
561
561
|
|
|
562
|
-
#
|
|
563
|
-
|
|
564
|
-
model="path/to/your/model/file", # Replace with the actual path to your model file
|
|
565
|
-
is_conversation=True,
|
|
566
|
-
max_tokens=800,
|
|
567
|
-
temperature=0.7,
|
|
568
|
-
presence_penalty=0,
|
|
569
|
-
frequency_penalty=1.18,
|
|
570
|
-
top_p=0.4,
|
|
571
|
-
intro="Hello, how can I assist you today?",
|
|
572
|
-
filepath="path/to/conversation/history/file", # Optional, for conversation history
|
|
573
|
-
update_file=True,
|
|
574
|
-
history_offset=10250,
|
|
575
|
-
act=None # Optional, for using an awesome prompt as intro
|
|
576
|
-
)
|
|
562
|
+
# Instantiate the KOBOLDAI class with default parameters
|
|
563
|
+
koboldai = KOBOLDAI()
|
|
577
564
|
|
|
578
|
-
#
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
565
|
+
# Define a prompt to send to the AI
|
|
566
|
+
prompt = "What is the capital of France?"
|
|
567
|
+
|
|
568
|
+
# Use the 'ask' method to get a response from the AI
|
|
569
|
+
response = koboldai.ask(prompt)
|
|
570
|
+
|
|
571
|
+
# Extract and print the message from the response
|
|
572
|
+
message = koboldai.get_message(response)
|
|
573
|
+
print(message)
|
|
585
574
|
|
|
586
|
-
# Print the generated response
|
|
587
|
-
print(response)
|
|
588
575
|
```
|
|
589
576
|
|
|
590
577
|
## usage of special .LLM file from webscout (webscout.LLM)
|
|
@@ -9,7 +9,7 @@ with open("README.md", encoding="utf-8") as f:
|
|
|
9
9
|
|
|
10
10
|
setup(
|
|
11
11
|
name="webscout",
|
|
12
|
-
version="1.2.
|
|
12
|
+
version="1.2.5",
|
|
13
13
|
description="Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models",
|
|
14
14
|
long_description=README,
|
|
15
15
|
long_description_content_type="text/markdown",
|
|
@@ -53,7 +53,6 @@ setup(
|
|
|
53
53
|
"termcolor",
|
|
54
54
|
"tiktoken",
|
|
55
55
|
"tldextract",
|
|
56
|
-
"gpt4all",
|
|
57
56
|
"orjson",
|
|
58
57
|
],
|
|
59
58
|
entry_points={
|
|
@@ -21,10 +21,200 @@ import yaml
|
|
|
21
21
|
from webscout.AIutel import Optimizers
|
|
22
22
|
from webscout.AIutel import Conversation
|
|
23
23
|
from webscout.AIutel import AwesomePrompts
|
|
24
|
+
from webscout.AIbase import Provider
|
|
24
25
|
from Helpingai_T2 import Perplexity
|
|
25
26
|
from typing import Any
|
|
26
27
|
import logging
|
|
27
28
|
#------------------------------------------------------OpenGPT-----------------------------------------------------------
|
|
29
|
+
class KOBOLDAI(Provider):
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
is_conversation: bool = True,
|
|
33
|
+
max_tokens: int = 600,
|
|
34
|
+
temperature: float = 1,
|
|
35
|
+
top_p: float = 1,
|
|
36
|
+
timeout: int = 30,
|
|
37
|
+
intro: str = None,
|
|
38
|
+
filepath: str = None,
|
|
39
|
+
update_file: bool = True,
|
|
40
|
+
proxies: dict = {},
|
|
41
|
+
history_offset: int = 10250,
|
|
42
|
+
act: str = None,
|
|
43
|
+
):
|
|
44
|
+
"""Instantiate TGPT
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
|
|
48
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
49
|
+
temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
|
|
50
|
+
top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
|
|
51
|
+
timeout (int, optional): Http requesting timeout. Defaults to 30
|
|
52
|
+
intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
|
|
53
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
54
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
55
|
+
proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
|
|
56
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
57
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
58
|
+
"""
|
|
59
|
+
self.session = requests.Session()
|
|
60
|
+
self.is_conversation = is_conversation
|
|
61
|
+
self.max_tokens_to_sample = max_tokens
|
|
62
|
+
self.temperature = temperature
|
|
63
|
+
self.top_p = top_p
|
|
64
|
+
self.chat_endpoint = (
|
|
65
|
+
"https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
|
|
66
|
+
)
|
|
67
|
+
self.stream_chunk_size = 64
|
|
68
|
+
self.timeout = timeout
|
|
69
|
+
self.last_response = {}
|
|
70
|
+
self.headers = {
|
|
71
|
+
"Content-Type": "application/json",
|
|
72
|
+
"Accept": "application/json",
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
self.__available_optimizers = (
|
|
76
|
+
method
|
|
77
|
+
for method in dir(Optimizers)
|
|
78
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
79
|
+
)
|
|
80
|
+
self.session.headers.update(self.headers)
|
|
81
|
+
Conversation.intro = (
|
|
82
|
+
AwesomePrompts().get_act(
|
|
83
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
84
|
+
)
|
|
85
|
+
if act
|
|
86
|
+
else intro or Conversation.intro
|
|
87
|
+
)
|
|
88
|
+
self.conversation = Conversation(
|
|
89
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
90
|
+
)
|
|
91
|
+
self.conversation.history_offset = history_offset
|
|
92
|
+
self.session.proxies = proxies
|
|
93
|
+
|
|
94
|
+
def ask(
|
|
95
|
+
self,
|
|
96
|
+
prompt: str,
|
|
97
|
+
stream: bool = False,
|
|
98
|
+
raw: bool = False,
|
|
99
|
+
optimizer: str = None,
|
|
100
|
+
conversationally: bool = False,
|
|
101
|
+
) -> dict:
|
|
102
|
+
"""Chat with AI
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
prompt (str): Prompt to be send.
|
|
106
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
107
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
108
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
109
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
110
|
+
Returns:
|
|
111
|
+
dict : {}
|
|
112
|
+
```json
|
|
113
|
+
{
|
|
114
|
+
"token" : "How may I assist you today?"
|
|
115
|
+
}
|
|
116
|
+
```
|
|
117
|
+
"""
|
|
118
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
119
|
+
if optimizer:
|
|
120
|
+
if optimizer in self.__available_optimizers:
|
|
121
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
122
|
+
conversation_prompt if conversationally else prompt
|
|
123
|
+
)
|
|
124
|
+
else:
|
|
125
|
+
raise Exception(
|
|
126
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
self.session.headers.update(self.headers)
|
|
130
|
+
payload = {
|
|
131
|
+
"prompt": conversation_prompt,
|
|
132
|
+
"temperature": self.temperature,
|
|
133
|
+
"top_p": self.top_p,
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
def for_stream():
|
|
137
|
+
response = self.session.post(
|
|
138
|
+
self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
139
|
+
)
|
|
140
|
+
if not response.ok:
|
|
141
|
+
raise Exception(
|
|
142
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
message_load = ""
|
|
146
|
+
for value in response.iter_lines(
|
|
147
|
+
decode_unicode=True,
|
|
148
|
+
delimiter="" if raw else "event: message\ndata:",
|
|
149
|
+
chunk_size=self.stream_chunk_size,
|
|
150
|
+
):
|
|
151
|
+
try:
|
|
152
|
+
resp = json.loads(value)
|
|
153
|
+
message_load += self.get_message(resp)
|
|
154
|
+
resp["token"] = message_load
|
|
155
|
+
self.last_response.update(resp)
|
|
156
|
+
yield value if raw else resp
|
|
157
|
+
except json.decoder.JSONDecodeError:
|
|
158
|
+
pass
|
|
159
|
+
self.conversation.update_chat_history(
|
|
160
|
+
prompt, self.get_message(self.last_response)
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
def for_non_stream():
|
|
164
|
+
# let's make use of stream
|
|
165
|
+
for _ in for_stream():
|
|
166
|
+
pass
|
|
167
|
+
return self.last_response
|
|
168
|
+
|
|
169
|
+
return for_stream() if stream else for_non_stream()
|
|
170
|
+
|
|
171
|
+
def chat(
|
|
172
|
+
self,
|
|
173
|
+
prompt: str,
|
|
174
|
+
stream: bool = False,
|
|
175
|
+
optimizer: str = None,
|
|
176
|
+
conversationally: bool = False,
|
|
177
|
+
) -> str:
|
|
178
|
+
"""Generate response `str`
|
|
179
|
+
Args:
|
|
180
|
+
prompt (str): Prompt to be send.
|
|
181
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
182
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
183
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
184
|
+
Returns:
|
|
185
|
+
str: Response generated
|
|
186
|
+
"""
|
|
187
|
+
|
|
188
|
+
def for_stream():
|
|
189
|
+
for response in self.ask(
|
|
190
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
191
|
+
):
|
|
192
|
+
yield self.get_message(response)
|
|
193
|
+
|
|
194
|
+
def for_non_stream():
|
|
195
|
+
return self.get_message(
|
|
196
|
+
self.ask(
|
|
197
|
+
prompt,
|
|
198
|
+
False,
|
|
199
|
+
optimizer=optimizer,
|
|
200
|
+
conversationally=conversationally,
|
|
201
|
+
)
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
return for_stream() if stream else for_non_stream()
|
|
205
|
+
|
|
206
|
+
def get_message(self, response: dict) -> str:
|
|
207
|
+
"""Retrieves message only from response
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
response (dict): Response generated by `self.ask`
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
str: Message extracted
|
|
214
|
+
"""
|
|
215
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
216
|
+
return response.get("token")
|
|
217
|
+
#------------------------------------------------------OpenGPT-----------------------------------------------------------
|
|
28
218
|
class OPENGPT:
|
|
29
219
|
def __init__(
|
|
30
220
|
self,
|
|
@@ -1237,5 +1427,19 @@ def opengpt(prompt, stream):
|
|
|
1237
1427
|
else:
|
|
1238
1428
|
response_str = opengpt.chat(prompt)
|
|
1239
1429
|
print(response_str)
|
|
1430
|
+
|
|
1431
|
+
@cli.command()
|
|
1432
|
+
@click.option('--prompt', prompt='Enter your prompt', help='The prompt to send.')
|
|
1433
|
+
@click.option('--stream', is_flag=True, help='Flag for streaming response.')
|
|
1434
|
+
@click.option('--raw', is_flag=True, help='Stream back raw response as received.')
|
|
1435
|
+
@click.option('--optimizer', type=str, help='Prompt optimizer name.')
|
|
1436
|
+
@click.option('--conversationally', is_flag=True, help='Chat conversationally when using optimizer.')
|
|
1437
|
+
def koboldai_cli(prompt, stream, raw, optimizer, conversationally):
|
|
1438
|
+
"""Chat with KOBOLDAI using the provided prompt."""
|
|
1439
|
+
koboldai_instance = KOBOLDAI() # Initialize a KOBOLDAI instance
|
|
1440
|
+
response = koboldai_instance.ask(prompt, stream, raw, optimizer, conversationally)
|
|
1441
|
+
processed_response = koboldai_instance.get_message(response) # Process the response
|
|
1442
|
+
print(processed_response)
|
|
1443
|
+
|
|
1240
1444
|
if __name__ == '__main__':
|
|
1241
1445
|
cli()
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Webscout.
|
|
2
2
|
|
|
3
3
|
Search for words, documents, images, videos, news, maps and text translation
|
|
4
|
-
using the DuckDuckGo.com
|
|
4
|
+
using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
import logging
|
|
@@ -9,7 +9,8 @@ from .webscout_search import WEBS
|
|
|
9
9
|
from .webscout_search_async import AsyncWEBS
|
|
10
10
|
from .version import __version__
|
|
11
11
|
from .DWEBS import DeepWEBS
|
|
12
|
-
from .
|
|
12
|
+
from .AIutel import appdir
|
|
13
|
+
|
|
13
14
|
__all__ = ["WEBS", "AsyncWEBS", "__version__", "cli"]
|
|
14
15
|
|
|
15
16
|
logging.getLogger("webscout").addHandler(logging.NullHandler())
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.5
|
|
4
4
|
Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -45,7 +45,6 @@ Requires-Dist: sse_starlette
|
|
|
45
45
|
Requires-Dist: termcolor
|
|
46
46
|
Requires-Dist: tiktoken
|
|
47
47
|
Requires-Dist: tldextract
|
|
48
|
-
Requires-Dist: gpt4all
|
|
49
48
|
Requires-Dist: orjson
|
|
50
49
|
Provides-Extra: dev
|
|
51
50
|
Requires-Dist: ruff>=0.1.6; extra == "dev"
|
|
@@ -94,7 +93,7 @@ Also containes AI models that you can use
|
|
|
94
93
|
- [6. `BlackBox` - Search/chat With BlackBox](#6-blackbox---searchchat-with-blackbox)
|
|
95
94
|
- [7. `PERPLEXITY` - Search With PERPLEXITY](#7-perplexity---search-with-perplexity)
|
|
96
95
|
- [8. `OpenGPT` - chat With OPENGPT](#8-opengpt---chat-with-opengpt)
|
|
97
|
-
- [9. `
|
|
96
|
+
- [9. `KOBOLDIA` -](#9-koboldia--)
|
|
98
97
|
- [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
|
|
99
98
|
- [`LLM`](#llm)
|
|
100
99
|
|
|
@@ -608,36 +607,23 @@ prompt = "tell me about india"
|
|
|
608
607
|
response_str = opengpt.chat(prompt)
|
|
609
608
|
print(response_str)
|
|
610
609
|
```
|
|
611
|
-
### 9. `
|
|
610
|
+
### 9. `KOBOLDIA` -
|
|
612
611
|
```python
|
|
613
|
-
from webscout import
|
|
612
|
+
from webscout.AI import KOBOLDAI
|
|
614
613
|
|
|
615
|
-
#
|
|
616
|
-
|
|
617
|
-
model="path/to/your/model/file", # Replace with the actual path to your model file
|
|
618
|
-
is_conversation=True,
|
|
619
|
-
max_tokens=800,
|
|
620
|
-
temperature=0.7,
|
|
621
|
-
presence_penalty=0,
|
|
622
|
-
frequency_penalty=1.18,
|
|
623
|
-
top_p=0.4,
|
|
624
|
-
intro="Hello, how can I assist you today?",
|
|
625
|
-
filepath="path/to/conversation/history/file", # Optional, for conversation history
|
|
626
|
-
update_file=True,
|
|
627
|
-
history_offset=10250,
|
|
628
|
-
act=None # Optional, for using an awesome prompt as intro
|
|
629
|
-
)
|
|
614
|
+
# Instantiate the KOBOLDAI class with default parameters
|
|
615
|
+
koboldai = KOBOLDAI()
|
|
630
616
|
|
|
631
|
-
#
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
617
|
+
# Define a prompt to send to the AI
|
|
618
|
+
prompt = "What is the capital of France?"
|
|
619
|
+
|
|
620
|
+
# Use the 'ask' method to get a response from the AI
|
|
621
|
+
response = koboldai.ask(prompt)
|
|
622
|
+
|
|
623
|
+
# Extract and print the message from the response
|
|
624
|
+
message = koboldai.get_message(response)
|
|
625
|
+
print(message)
|
|
638
626
|
|
|
639
|
-
# Print the generated response
|
|
640
|
-
print(response)
|
|
641
627
|
```
|
|
642
628
|
|
|
643
629
|
## usage of special .LLM file from webscout (webscout.LLM)
|
|
@@ -1,206 +0,0 @@
|
|
|
1
|
-
from webscout.AIutel import Optimizers
|
|
2
|
-
from webscout.AIutel import Conversation
|
|
3
|
-
from webscout.AIutel import AwesomePrompts
|
|
4
|
-
from webscout.AIbase import Provider
|
|
5
|
-
from gpt4all import GPT4All
|
|
6
|
-
from gpt4all.gpt4all import empty_chat_session
|
|
7
|
-
from gpt4all.gpt4all import append_extension_if_missing
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
import logging
|
|
11
|
-
|
|
12
|
-
my_logger = logging.getLogger("gpt4all")
|
|
13
|
-
my_logger.setLevel(logging.CRITICAL)
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class GPT4ALL(Provider):
|
|
17
|
-
def __init__(
|
|
18
|
-
self,
|
|
19
|
-
model: str,
|
|
20
|
-
is_conversation: bool = True,
|
|
21
|
-
max_tokens: int = 800,
|
|
22
|
-
temperature: float = 0.7,
|
|
23
|
-
presence_penalty: int = 0,
|
|
24
|
-
frequency_penalty: int = 1.18,
|
|
25
|
-
top_p: float = 0.4,
|
|
26
|
-
intro: str = None,
|
|
27
|
-
filepath: str = None,
|
|
28
|
-
update_file: bool = True,
|
|
29
|
-
history_offset: int = 10250,
|
|
30
|
-
act: str = None,
|
|
31
|
-
):
|
|
32
|
-
"""Instantiates GPT4ALL
|
|
33
|
-
|
|
34
|
-
Args:
|
|
35
|
-
model (str, optional): Path to LLM model (.gguf or .bin).
|
|
36
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
37
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 800.
|
|
38
|
-
temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.7.
|
|
39
|
-
presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
|
|
40
|
-
frequency_penalty (int, optional): Chances of word being repeated. Defaults to 1.18.
|
|
41
|
-
top_p (float, optional): Sampling threshold during inference time. Defaults to 0.4.
|
|
42
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
43
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
44
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
45
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
46
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
47
|
-
"""
|
|
48
|
-
self.is_conversation = is_conversation
|
|
49
|
-
self.max_tokens_to_sample = max_tokens
|
|
50
|
-
self.model = model
|
|
51
|
-
self.temperature = temperature
|
|
52
|
-
self.presence_penalty = presence_penalty
|
|
53
|
-
self.frequency_penalty = frequency_penalty
|
|
54
|
-
self.top_p = top_p
|
|
55
|
-
self.last_response = {}
|
|
56
|
-
|
|
57
|
-
self.__available_optimizers = (
|
|
58
|
-
method
|
|
59
|
-
for method in dir(Optimizers)
|
|
60
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
61
|
-
)
|
|
62
|
-
Conversation.intro = (
|
|
63
|
-
AwesomePrompts().get_act(
|
|
64
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
65
|
-
)
|
|
66
|
-
if act
|
|
67
|
-
else intro or Conversation.intro
|
|
68
|
-
)
|
|
69
|
-
self.conversation = Conversation(
|
|
70
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
71
|
-
)
|
|
72
|
-
self.conversation.history_offset = history_offset
|
|
73
|
-
|
|
74
|
-
def get_model_name_path():
|
|
75
|
-
import os
|
|
76
|
-
from pathlib import Path
|
|
77
|
-
|
|
78
|
-
initial_model_path = Path(append_extension_if_missing(model))
|
|
79
|
-
if initial_model_path.exists:
|
|
80
|
-
if not initial_model_path.is_absolute():
|
|
81
|
-
initial_model_path = Path(os.getcwd()) / initial_model_path
|
|
82
|
-
return os.path.split(initial_model_path.as_posix())
|
|
83
|
-
else:
|
|
84
|
-
raise FileNotFoundError(
|
|
85
|
-
"File does not exist " + initial_model_path.as_posix()
|
|
86
|
-
)
|
|
87
|
-
|
|
88
|
-
model_dir, model_name = get_model_name_path()
|
|
89
|
-
|
|
90
|
-
self.gpt4all = GPT4All(
|
|
91
|
-
model_name=model_name,
|
|
92
|
-
model_path=model_dir,
|
|
93
|
-
allow_download=False,
|
|
94
|
-
verbose=False,
|
|
95
|
-
)
|
|
96
|
-
|
|
97
|
-
def ask(
|
|
98
|
-
self,
|
|
99
|
-
prompt: str,
|
|
100
|
-
stream: bool = False,
|
|
101
|
-
raw: bool = False,
|
|
102
|
-
optimizer: str = None,
|
|
103
|
-
conversationally: bool = False,
|
|
104
|
-
) -> dict:
|
|
105
|
-
"""Chat with AI
|
|
106
|
-
|
|
107
|
-
Args:
|
|
108
|
-
prompt (str): Prompt to be send.
|
|
109
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
110
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
111
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
112
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
113
|
-
Returns:
|
|
114
|
-
dict : {}
|
|
115
|
-
```json
|
|
116
|
-
{
|
|
117
|
-
"text" : "How may I help you today?"
|
|
118
|
-
}
|
|
119
|
-
```
|
|
120
|
-
"""
|
|
121
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
122
|
-
if optimizer:
|
|
123
|
-
if optimizer in self.__available_optimizers:
|
|
124
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
125
|
-
conversation_prompt if conversationally else prompt
|
|
126
|
-
)
|
|
127
|
-
else:
|
|
128
|
-
raise Exception(
|
|
129
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
130
|
-
)
|
|
131
|
-
|
|
132
|
-
def for_stream():
|
|
133
|
-
response = self.gpt4all.generate(
|
|
134
|
-
prompt=conversation_prompt,
|
|
135
|
-
max_tokens=self.max_tokens_to_sample,
|
|
136
|
-
temp=self.temperature,
|
|
137
|
-
top_p=self.top_p,
|
|
138
|
-
repeat_penalty=self.frequency_penalty,
|
|
139
|
-
streaming=True,
|
|
140
|
-
)
|
|
141
|
-
|
|
142
|
-
message_load: str = ""
|
|
143
|
-
for token in response:
|
|
144
|
-
message_load += token
|
|
145
|
-
resp: dict = dict(text=message_load)
|
|
146
|
-
yield token if raw else resp
|
|
147
|
-
self.last_response.update(resp)
|
|
148
|
-
|
|
149
|
-
self.conversation.update_chat_history(
|
|
150
|
-
prompt, self.get_message(self.last_response)
|
|
151
|
-
)
|
|
152
|
-
self.gpt4all.current_chat_session = empty_chat_session()
|
|
153
|
-
|
|
154
|
-
def for_non_stream():
|
|
155
|
-
for _ in for_stream():
|
|
156
|
-
pass
|
|
157
|
-
return self.last_response
|
|
158
|
-
|
|
159
|
-
return for_stream() if stream else for_non_stream()
|
|
160
|
-
|
|
161
|
-
def chat(
|
|
162
|
-
self,
|
|
163
|
-
prompt: str,
|
|
164
|
-
stream: bool = False,
|
|
165
|
-
optimizer: str = None,
|
|
166
|
-
conversationally: bool = False,
|
|
167
|
-
) -> str:
|
|
168
|
-
"""Generate response `str`
|
|
169
|
-
Args:
|
|
170
|
-
prompt (str): Prompt to be send.
|
|
171
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
172
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
173
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
174
|
-
Returns:
|
|
175
|
-
str: Response generated
|
|
176
|
-
"""
|
|
177
|
-
|
|
178
|
-
def for_stream():
|
|
179
|
-
for response in self.ask(
|
|
180
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
181
|
-
):
|
|
182
|
-
yield self.get_message(response)
|
|
183
|
-
|
|
184
|
-
def for_non_stream():
|
|
185
|
-
return self.get_message(
|
|
186
|
-
self.ask(
|
|
187
|
-
prompt,
|
|
188
|
-
False,
|
|
189
|
-
optimizer=optimizer,
|
|
190
|
-
conversationally=conversationally,
|
|
191
|
-
)
|
|
192
|
-
)
|
|
193
|
-
|
|
194
|
-
return for_stream() if stream else for_non_stream()
|
|
195
|
-
|
|
196
|
-
def get_message(self, response: dict) -> str:
|
|
197
|
-
"""Retrieves message only from response
|
|
198
|
-
|
|
199
|
-
Args:
|
|
200
|
-
response (str): Response generated by `self.ask`
|
|
201
|
-
|
|
202
|
-
Returns:
|
|
203
|
-
str: Message extracted
|
|
204
|
-
"""
|
|
205
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
206
|
-
return response["text"]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|