webscout 4.2__py3-none-any.whl → 4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +2 -2
- webscout/AIutel.py +6 -8
- webscout/Local/_version.py +1 -1
- webscout/Provider/FreeGemini.py +169 -0
- webscout/Provider/Llama.py +211 -0
- webscout/Provider/__init__.py +5 -5
- webscout/__init__.py +1 -1
- webscout/async_providers.py +0 -2
- webscout/version.py +1 -1
- {webscout-4.2.dist-info → webscout-4.3.dist-info}/METADATA +29 -1
- {webscout-4.2.dist-info → webscout-4.3.dist-info}/RECORD +15 -14
- webscout/Provider/Llama2.py +0 -437
- {webscout-4.2.dist-info → webscout-4.3.dist-info}/LICENSE.md +0 -0
- {webscout-4.2.dist-info → webscout-4.3.dist-info}/WHEEL +0 -0
- {webscout-4.2.dist-info → webscout-4.3.dist-info}/entry_points.txt +0 -0
- {webscout-4.2.dist-info → webscout-4.3.dist-info}/top_level.txt +0 -0
webscout/AIauto.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
from webscout.AIbase import Provider, AsyncProvider
|
|
2
2
|
from webscout.Provider.ThinkAnyAI import ThinkAnyAI
|
|
3
3
|
from webscout.Provider.Xjai import Xjai
|
|
4
|
-
from webscout.Provider.
|
|
5
|
-
from webscout.Provider.
|
|
4
|
+
from webscout.Provider.Llama import LLAMA2
|
|
5
|
+
from webscout.Provider.Llama import AsyncLLAMA2
|
|
6
6
|
from webscout.Provider.Leo import LEO
|
|
7
7
|
from webscout.Provider.Leo import AsyncLEO
|
|
8
8
|
from webscout.Provider.Koboldai import KOBOLDAI
|
webscout/AIutel.py
CHANGED
|
@@ -220,17 +220,16 @@ class Conversation:
|
|
|
220
220
|
), f"File '{filepath}' does not exist"
|
|
221
221
|
if not os.path.isfile(filepath):
|
|
222
222
|
logging.debug(f"Creating new chat-history file - '{filepath}'")
|
|
223
|
-
with open(filepath, "w") as fh: # Try creating new file
|
|
224
|
-
# lets add intro here
|
|
223
|
+
with open(filepath, "w", encoding="utf-8") as fh: # Try creating new file with UTF-8 encoding
|
|
225
224
|
fh.write(self.intro)
|
|
226
225
|
else:
|
|
227
226
|
logging.debug(f"Loading conversation from '{filepath}'")
|
|
228
|
-
with open(filepath) as fh:
|
|
227
|
+
with open(filepath, encoding="utf-8") as fh: # Open with UTF-8 encoding
|
|
229
228
|
file_contents = fh.readlines()
|
|
230
229
|
if file_contents:
|
|
231
230
|
self.intro = file_contents[0] # Presume first line is the intro.
|
|
232
231
|
self.chat_history = "\n".join(file_contents[1:])
|
|
233
|
-
|
|
232
|
+
|
|
234
233
|
def __trim_chat_history(self, chat_history: str, intro: str) -> str:
|
|
235
234
|
"""Ensures the len(prompt) and max_tokens_to_sample is not > 4096"""
|
|
236
235
|
len_of_intro = len(intro)
|
|
@@ -243,7 +242,6 @@ class Conversation:
|
|
|
243
242
|
# Remove head of total (n) of chat_history
|
|
244
243
|
trimmed_chat_history = chat_history[truncate_at:]
|
|
245
244
|
return "... " + trimmed_chat_history
|
|
246
|
-
# print(len(self.chat_history))
|
|
247
245
|
else:
|
|
248
246
|
return chat_history
|
|
249
247
|
|
|
@@ -281,12 +279,12 @@ class Conversation:
|
|
|
281
279
|
new_history = self.history_format % dict(user=prompt, llm=response)
|
|
282
280
|
if self.file and self.update_file:
|
|
283
281
|
if os.path.exists(self.file):
|
|
284
|
-
with open(self.file, "w") as fh:
|
|
282
|
+
with open(self.file, "w", encoding="utf-8") as fh: # Specify UTF-8 encoding
|
|
285
283
|
fh.write(self.intro + "\n" + new_history)
|
|
286
284
|
else:
|
|
287
|
-
with open(self.file, "a") as fh:
|
|
285
|
+
with open(self.file, "a", encoding="utf-8") as fh: # Specify UTF-8 encoding
|
|
288
286
|
fh.write(new_history)
|
|
289
|
-
|
|
287
|
+
self.chat_history += new_history
|
|
290
288
|
|
|
291
289
|
|
|
292
290
|
|
webscout/Local/_version.py
CHANGED
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
from selenium import webdriver
|
|
4
|
+
from selenium.webdriver.chrome.options import Options
|
|
5
|
+
from selenium.webdriver.common.by import By
|
|
6
|
+
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
+
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
+
import click
|
|
9
|
+
import requests
|
|
10
|
+
from requests import get
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
from re import findall
|
|
13
|
+
from requests.exceptions import RequestException
|
|
14
|
+
from curl_cffi.requests import get, RequestsError
|
|
15
|
+
import g4f
|
|
16
|
+
from random import randint
|
|
17
|
+
from PIL import Image
|
|
18
|
+
import io
|
|
19
|
+
import re
|
|
20
|
+
import json
|
|
21
|
+
import yaml
|
|
22
|
+
from ..AIutel import Optimizers
|
|
23
|
+
from ..AIutel import Conversation
|
|
24
|
+
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from ..AIbase import Provider, AsyncProvider
|
|
26
|
+
from webscout import exceptions
|
|
27
|
+
from typing import Any, AsyncGenerator, Dict
|
|
28
|
+
import logging
|
|
29
|
+
import httpx
|
|
30
|
+
|
|
31
|
+
class FreeGemini(Provider):
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
is_conversation: bool = True,
|
|
35
|
+
max_tokens: int = 600,
|
|
36
|
+
timeout: int = 60,
|
|
37
|
+
intro: str = None,
|
|
38
|
+
filepath: str = None,
|
|
39
|
+
update_file: bool = True,
|
|
40
|
+
proxies: dict = {},
|
|
41
|
+
history_offset: int = 10250,
|
|
42
|
+
act: str = None,
|
|
43
|
+
):
|
|
44
|
+
"""Instantiates FreeGemini
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
48
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
49
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
50
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
51
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
52
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
53
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
54
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
55
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
56
|
+
"""
|
|
57
|
+
self.session = requests.Session()
|
|
58
|
+
self.is_conversation = is_conversation
|
|
59
|
+
self.max_tokens_to_sample = max_tokens
|
|
60
|
+
self.chat_endpoint = "https://api.safone.dev/bard"
|
|
61
|
+
self.timeout = timeout
|
|
62
|
+
self.last_response = {}
|
|
63
|
+
|
|
64
|
+
self.headers = {
|
|
65
|
+
"accept": "application/json",
|
|
66
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
self.__available_optimizers = (
|
|
70
|
+
method
|
|
71
|
+
for method in dir(Optimizers)
|
|
72
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
73
|
+
)
|
|
74
|
+
self.session.headers.update(self.headers)
|
|
75
|
+
Conversation.intro = (
|
|
76
|
+
AwesomePrompts().get_act(
|
|
77
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
78
|
+
)
|
|
79
|
+
if act
|
|
80
|
+
else intro or Conversation.intro
|
|
81
|
+
)
|
|
82
|
+
self.conversation = Conversation(
|
|
83
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
84
|
+
)
|
|
85
|
+
self.conversation.history_offset = history_offset
|
|
86
|
+
self.session.proxies = proxies
|
|
87
|
+
|
|
88
|
+
def ask(
|
|
89
|
+
self,
|
|
90
|
+
prompt: str,
|
|
91
|
+
stream: bool = False,
|
|
92
|
+
raw: bool = False,
|
|
93
|
+
optimizer: str = None,
|
|
94
|
+
conversationally: bool = False,
|
|
95
|
+
) -> dict:
|
|
96
|
+
"""Chat with AI
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
prompt (str): Prompt to be send.
|
|
100
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
101
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
102
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
103
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
104
|
+
"""
|
|
105
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
106
|
+
if optimizer:
|
|
107
|
+
if optimizer in self.__available_optimizers:
|
|
108
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
109
|
+
conversation_prompt if conversationally else prompt
|
|
110
|
+
)
|
|
111
|
+
else:
|
|
112
|
+
raise Exception(
|
|
113
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
self.session.headers.update(self.headers)
|
|
117
|
+
payload = {"message": conversation_prompt}
|
|
118
|
+
|
|
119
|
+
response = self.session.post(
|
|
120
|
+
self.chat_endpoint, json=payload, timeout=self.timeout
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
if not response.ok:
|
|
124
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
125
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
resp = response.json()
|
|
129
|
+
message_load = self.get_message(resp)
|
|
130
|
+
self.conversation.update_chat_history(
|
|
131
|
+
prompt, message_load
|
|
132
|
+
)
|
|
133
|
+
return resp
|
|
134
|
+
|
|
135
|
+
def chat(
|
|
136
|
+
self,
|
|
137
|
+
prompt: str,
|
|
138
|
+
stream: bool = False,
|
|
139
|
+
optimizer: str = None,
|
|
140
|
+
conversationally: bool = False,
|
|
141
|
+
) -> str:
|
|
142
|
+
"""Generate response `str`
|
|
143
|
+
Args:
|
|
144
|
+
prompt (str): Prompt to be send.
|
|
145
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
146
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
147
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
148
|
+
Returns:
|
|
149
|
+
str: Response generated
|
|
150
|
+
"""
|
|
151
|
+
return self.get_message(
|
|
152
|
+
self.ask(
|
|
153
|
+
prompt,
|
|
154
|
+
optimizer=optimizer,
|
|
155
|
+
conversationally=conversationally,
|
|
156
|
+
)
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
def get_message(self, response: dict) -> str:
|
|
160
|
+
"""Retrieves message only from response
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
response (dict): Response generated by `self.ask`
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
str: Message extracted
|
|
167
|
+
"""
|
|
168
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
169
|
+
return response["message"]
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
from selenium import webdriver
|
|
4
|
+
from selenium.webdriver.chrome.options import Options
|
|
5
|
+
from selenium.webdriver.common.by import By
|
|
6
|
+
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
+
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
+
import click
|
|
9
|
+
import requests
|
|
10
|
+
from requests import get
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
from re import findall
|
|
13
|
+
from requests.exceptions import RequestException
|
|
14
|
+
from curl_cffi.requests import get, RequestsError
|
|
15
|
+
import g4f
|
|
16
|
+
from random import randint
|
|
17
|
+
from PIL import Image
|
|
18
|
+
import io
|
|
19
|
+
import re
|
|
20
|
+
import json
|
|
21
|
+
import yaml
|
|
22
|
+
from ..AIutel import Optimizers
|
|
23
|
+
from ..AIutel import Conversation
|
|
24
|
+
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from ..AIbase import Provider, AsyncProvider
|
|
26
|
+
from webscout import exceptions
|
|
27
|
+
from typing import Any, AsyncGenerator, Dict
|
|
28
|
+
import logging
|
|
29
|
+
import httpx
|
|
30
|
+
|
|
31
|
+
class LLAMA(Provider):
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
is_conversation: bool = True,
|
|
35
|
+
max_tokens: int = 600,
|
|
36
|
+
timeout: int = 30,
|
|
37
|
+
intro: str = None,
|
|
38
|
+
filepath: str = None,
|
|
39
|
+
update_file: bool = True,
|
|
40
|
+
proxies: dict = {},
|
|
41
|
+
history_offset: int = 10250,
|
|
42
|
+
act: str = None,
|
|
43
|
+
):
|
|
44
|
+
"""Instantiates LLAMA
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
48
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
49
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
50
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
51
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
52
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
53
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
54
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
55
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
56
|
+
model (str, optional): LLM model name. Defaults to "llama3-70b-8192".
|
|
57
|
+
"""
|
|
58
|
+
self.is_conversation = is_conversation
|
|
59
|
+
self.max_tokens_to_sample = max_tokens
|
|
60
|
+
self.timeout = timeout
|
|
61
|
+
self.last_response = {}
|
|
62
|
+
self.model = "llama3-70b-8192",
|
|
63
|
+
self.api_endpoint = "https://api.safone.dev/llama"
|
|
64
|
+
self.headers = {
|
|
65
|
+
"accept": "application/json",
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
self.__available_optimizers = (
|
|
69
|
+
method
|
|
70
|
+
for method in dir(Optimizers)
|
|
71
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
72
|
+
)
|
|
73
|
+
Conversation.intro = (
|
|
74
|
+
AwesomePrompts().get_act(
|
|
75
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
76
|
+
)
|
|
77
|
+
if act
|
|
78
|
+
else intro or Conversation.intro
|
|
79
|
+
)
|
|
80
|
+
self.conversation = Conversation(
|
|
81
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
82
|
+
)
|
|
83
|
+
self.conversation.history_offset = history_offset
|
|
84
|
+
self.session = requests.Session()
|
|
85
|
+
self.session.proxies = proxies
|
|
86
|
+
|
|
87
|
+
def ask(
|
|
88
|
+
self,
|
|
89
|
+
prompt: str,
|
|
90
|
+
stream: bool = False,
|
|
91
|
+
raw: bool = False,
|
|
92
|
+
optimizer: str = None,
|
|
93
|
+
conversationally: bool = False,
|
|
94
|
+
) -> dict | AsyncGenerator:
|
|
95
|
+
"""Chat with AI
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
prompt (str): Prompt to be send.
|
|
99
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
100
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
101
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
102
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
103
|
+
Returns:
|
|
104
|
+
dict|AsyncGenerator : ai content
|
|
105
|
+
```json
|
|
106
|
+
{
|
|
107
|
+
"text" : "print('How may I help you today?')"
|
|
108
|
+
}
|
|
109
|
+
```
|
|
110
|
+
"""
|
|
111
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
112
|
+
if optimizer:
|
|
113
|
+
if optimizer in self.__available_optimizers:
|
|
114
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
115
|
+
conversation_prompt if conversationally else prompt
|
|
116
|
+
)
|
|
117
|
+
else:
|
|
118
|
+
raise Exception(
|
|
119
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
self.session.headers.update(self.headers)
|
|
123
|
+
payload = {
|
|
124
|
+
"message": conversation_prompt
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
def for_stream():
|
|
128
|
+
response = self.session.get(
|
|
129
|
+
self.api_endpoint, params=payload, stream=True, timeout=self.timeout
|
|
130
|
+
)
|
|
131
|
+
if not response.ok:
|
|
132
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
133
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
message_load = ""
|
|
137
|
+
for chunk in response.iter_lines():
|
|
138
|
+
try:
|
|
139
|
+
resp = json.loads(chunk)
|
|
140
|
+
message_load += resp['message']
|
|
141
|
+
yield chunk if raw else dict(text=message_load)
|
|
142
|
+
self.last_response.update(resp)
|
|
143
|
+
except:
|
|
144
|
+
pass
|
|
145
|
+
self.conversation.update_chat_history(
|
|
146
|
+
prompt, self.get_message(self.last_response)
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
def for_non_stream():
|
|
150
|
+
response = self.session.get(
|
|
151
|
+
self.api_endpoint, params=payload, stream=False, timeout=self.timeout
|
|
152
|
+
)
|
|
153
|
+
if not response.ok:
|
|
154
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
155
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
156
|
+
)
|
|
157
|
+
resp = response.json()
|
|
158
|
+
self.last_response.update(resp)
|
|
159
|
+
self.conversation.update_chat_history(
|
|
160
|
+
prompt, self.get_message(self.last_response)
|
|
161
|
+
)
|
|
162
|
+
return resp
|
|
163
|
+
|
|
164
|
+
return for_stream() if stream else for_non_stream()
|
|
165
|
+
|
|
166
|
+
def chat(
|
|
167
|
+
self,
|
|
168
|
+
prompt: str,
|
|
169
|
+
stream: bool = False,
|
|
170
|
+
optimizer: str = None,
|
|
171
|
+
conversationally: bool = False,
|
|
172
|
+
) -> str | AsyncGenerator:
|
|
173
|
+
"""Generate response `str`
|
|
174
|
+
Args:
|
|
175
|
+
prompt (str): Prompt to be send.
|
|
176
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
177
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
178
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
179
|
+
Returns:
|
|
180
|
+
str: Response generated
|
|
181
|
+
"""
|
|
182
|
+
|
|
183
|
+
def for_stream():
|
|
184
|
+
for response in self.ask(
|
|
185
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
186
|
+
):
|
|
187
|
+
yield self.get_message(response)
|
|
188
|
+
|
|
189
|
+
def for_non_stream():
|
|
190
|
+
return self.get_message(
|
|
191
|
+
self.ask(
|
|
192
|
+
prompt,
|
|
193
|
+
False,
|
|
194
|
+
optimizer=optimizer,
|
|
195
|
+
conversationally=conversationally,
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
return for_stream() if stream else for_non_stream()
|
|
200
|
+
|
|
201
|
+
def get_message(self, response: dict) -> str:
|
|
202
|
+
"""Retrieves message only from response
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
response (dict): Response generated by `self.ask`
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
str: Message extracted
|
|
209
|
+
"""
|
|
210
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
211
|
+
return response["message"]
|
webscout/Provider/__init__.py
CHANGED
|
@@ -2,8 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from .ThinkAnyAI import ThinkAnyAI
|
|
4
4
|
from .Xjai import Xjai
|
|
5
|
-
from .
|
|
6
|
-
from .Llama2 import AsyncLLAMA2
|
|
5
|
+
from .Llama import LLAMA
|
|
7
6
|
from .Cohere import Cohere
|
|
8
7
|
from .Reka import REKA
|
|
9
8
|
from .Groq import GROQ
|
|
@@ -38,11 +37,11 @@ from .VTLchat import VTLchat
|
|
|
38
37
|
from .Geminipro import GEMINIPRO
|
|
39
38
|
from .Geminiflash import GEMINIFLASH
|
|
40
39
|
from .OLLAMA import OLLAMA
|
|
40
|
+
from .FreeGemini import FreeGemini
|
|
41
41
|
__all__ = [
|
|
42
42
|
'ThinkAnyAI',
|
|
43
43
|
'Xjai',
|
|
44
|
-
'
|
|
45
|
-
'AsyncLLAMA2',
|
|
44
|
+
'LLAMA',
|
|
46
45
|
'Cohere',
|
|
47
46
|
'REKA',
|
|
48
47
|
'GROQ',
|
|
@@ -78,7 +77,8 @@ __all__ = [
|
|
|
78
77
|
'OPENGPTv2',
|
|
79
78
|
'GEMINIPRO',
|
|
80
79
|
'GEMINIFLASH',
|
|
81
|
-
'OLLAMA'
|
|
80
|
+
'OLLAMA',
|
|
81
|
+
'FreeGemini'
|
|
82
82
|
|
|
83
83
|
|
|
84
84
|
]
|
webscout/__init__.py
CHANGED
webscout/async_providers.py
CHANGED
|
@@ -2,7 +2,6 @@ from webscout import AsyncPhindSearch
|
|
|
2
2
|
from webscout import AsyncYEPCHAT
|
|
3
3
|
from webscout import AsyncOPENGPT
|
|
4
4
|
from webscout import AsyncOPENAI
|
|
5
|
-
from webscout import AsyncLLAMA2
|
|
6
5
|
from webscout import AsyncLEO
|
|
7
6
|
from webscout import AsyncKOBOLDAI
|
|
8
7
|
from webscout import AsyncGROQ
|
|
@@ -15,7 +14,6 @@ mapper: dict[str, object] = {
|
|
|
15
14
|
"koboldai": AsyncKOBOLDAI,
|
|
16
15
|
"blackboxai": AsyncBLACKBOXAI,
|
|
17
16
|
"gpt4free": AsyncGPT4FREE,
|
|
18
|
-
"llama2": AsyncLLAMA2,
|
|
19
17
|
"yepchat": AsyncYEPCHAT,
|
|
20
18
|
"leo": AsyncLEO,
|
|
21
19
|
"groq": AsyncGROQ,
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "4.
|
|
1
|
+
__version__ = "4.3"
|
|
2
2
|
__prog__ = "webscout"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 4.
|
|
3
|
+
Version: 4.3
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -1472,6 +1472,34 @@ response = ollama_provider.chat("What is the meaning of life?")
|
|
|
1472
1472
|
print(response)
|
|
1473
1473
|
```
|
|
1474
1474
|
|
|
1475
|
+
### 22. GROQ
|
|
1476
|
+
```python
|
|
1477
|
+
from webscout import GROQ
|
|
1478
|
+
ai = GROQ(api_key="")
|
|
1479
|
+
response = ai.chat("What is the meaning of life?")
|
|
1480
|
+
print(response)
|
|
1481
|
+
|
|
1482
|
+
```
|
|
1483
|
+
|
|
1484
|
+
### 23. Freegemini - chat with gemini for free
|
|
1485
|
+
```python
|
|
1486
|
+
from webscout import FreeGemini
|
|
1487
|
+
ai = FreeGemini()
|
|
1488
|
+
response = ai.chat("What is the meaning of life?")
|
|
1489
|
+
print(response)
|
|
1490
|
+
```
|
|
1491
|
+
|
|
1492
|
+
### 24. LLama 70b - chat with meta's llama 3 70b
|
|
1493
|
+
```python
|
|
1494
|
+
|
|
1495
|
+
from webscout import LLAMA
|
|
1496
|
+
|
|
1497
|
+
llama = LLAMA()
|
|
1498
|
+
|
|
1499
|
+
r = llama.chat("What is the meaning of life?")
|
|
1500
|
+
print(r)
|
|
1501
|
+
```
|
|
1502
|
+
|
|
1475
1503
|
### `LLM`
|
|
1476
1504
|
```python
|
|
1477
1505
|
from webscout.LLM import LLM
|
|
@@ -1,12 +1,12 @@
|
|
|
1
|
-
webscout/AIauto.py,sha256=
|
|
1
|
+
webscout/AIauto.py,sha256=5ZMoS39Tyy1AZS6s_bgVnng-x9CmvHhWWNB4QMB5v9U,20003
|
|
2
2
|
webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
|
|
3
|
-
webscout/AIutel.py,sha256=
|
|
3
|
+
webscout/AIutel.py,sha256=1NQAchS2e6c1SrIq0efsVtX3ANZ5XI1hjKVHGpJG7OU,34076
|
|
4
4
|
webscout/DWEBS.py,sha256=QLuT1IKu0lnwdl7W6c-ctBAO7Jj0Zk3PYm6-13BC7rU,25740
|
|
5
5
|
webscout/LLM.py,sha256=LbGCZdJf8A5dwfoGS4tyy39tAh5BDdhMZP0ScKaaQfU,4184
|
|
6
6
|
webscout/YTdownloader.py,sha256=uWpUWnw9pxeEGw9KJ_3XDyQ5gd38gH1dJpr-HJo4vzU,39144
|
|
7
|
-
webscout/__init__.py,sha256=
|
|
7
|
+
webscout/__init__.py,sha256=DX52bX0RKkXgKAWohQRyBKNdiamZmp2aQuTpsD5ohbY,2216
|
|
8
8
|
webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
|
|
9
|
-
webscout/async_providers.py,sha256=
|
|
9
|
+
webscout/async_providers.py,sha256=MRj0klEhBYVQXnzZGG_15d0e-TPA0nOc2nn735H-wR4,622
|
|
10
10
|
webscout/cli.py,sha256=EDxqTmcIshvhg9P0n2ZPaApj2-MEFY3uawS92zbBV_s,14705
|
|
11
11
|
webscout/exceptions.py,sha256=YtIs-vXBwcjbt9TZ_wB7yI0dO7ANYIZAmEEeLmoQ2fI,487
|
|
12
12
|
webscout/g4f.py,sha256=NNcnlOtIWV9R93UsBN4jBGBEJ9sJ-Np1WbgjkGVDcYc,24487
|
|
@@ -14,7 +14,7 @@ webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
|
|
|
14
14
|
webscout/tempid.py,sha256=5oc3UbXhPGKxrMRTfRABT-V-dNzH_hOKWtLYM6iCWd4,5896
|
|
15
15
|
webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
|
|
16
16
|
webscout/utils.py,sha256=CxeXvp0rWIulUrEaPZMaNfg_tSuQLRSV8uuHA2chyKE,2603
|
|
17
|
-
webscout/version.py,sha256=
|
|
17
|
+
webscout/version.py,sha256=Pp5thQN3CvwDpubKz9MHn-UvDhuocamnBfB2VckwBGI,44
|
|
18
18
|
webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
|
|
19
19
|
webscout/webai.py,sha256=LPn9XKvc5SLxJ68slMsPUXxzkzfa4b0kzsiJyWs-yq0,88897
|
|
20
20
|
webscout/webscout_search.py,sha256=lFAot1-Qil_YfXieeLakDVDEX8Ckcima4ueXdOYwiMc,42804
|
|
@@ -26,7 +26,7 @@ webscout/Extra/gguf.py,sha256=5zTNE5HxM_VQ5ONoocL8GG5fRXrgyLdEEjNzndG0oUw,7811
|
|
|
26
26
|
webscout/Extra/weather.py,sha256=ocGwJYp5B9FwVWvIZ9wtoJTQsPFt64Vt8TitxJcdvAU,1687
|
|
27
27
|
webscout/Extra/weather_ascii.py,sha256=sy6EEh2kN1CO1hKda8chD-mVCxH4p0NHyP7Uxr0-rgo,630
|
|
28
28
|
webscout/Local/__init__.py,sha256=RN6klpbabPGNX2YzPm_hdeUcQvieUwvJt22uAO2RKSM,238
|
|
29
|
-
webscout/Local/_version.py,sha256=
|
|
29
|
+
webscout/Local/_version.py,sha256=yH-h9AKl_KbJwMWeq0PDDOVI2FQ9NutjLDqcCGuAQ6I,83
|
|
30
30
|
webscout/Local/formats.py,sha256=BiZZSoN3e8S6-S-ykBL9ogSUs0vK11GaZ3ghc9U8GRk,18994
|
|
31
31
|
webscout/Local/model.py,sha256=T_bzNNrxEyOyLyhp6fKwiuVBBkXC2a37LzJVCxFIxOU,30710
|
|
32
32
|
webscout/Local/rawdog.py,sha256=ojY_O8Vb1KvR34OwWdfLgllgaAK_7HMf64ElMATvCXs,36689
|
|
@@ -40,13 +40,14 @@ webscout/Provider/ChatGPTUK.py,sha256=qmuCb_a71GNE5LelOb5AKJUBndvj7soebiNey4VdDv
|
|
|
40
40
|
webscout/Provider/Cohere.py,sha256=IXnRosYOaMAA65nvsKmN6ZkJGSdZFYQYBidzuNaCqX8,8711
|
|
41
41
|
webscout/Provider/Deepinfra.py,sha256=kVnWARJdEtIeIsZwGw3POq8B2dO87bDcJso3uOeCeOA,18750
|
|
42
42
|
webscout/Provider/Deepseek.py,sha256=pnOB44ObuOfAsoi_bUGUvha3tfwd0rTJ9rnX-14QkL4,10550
|
|
43
|
+
webscout/Provider/FreeGemini.py,sha256=GbTJEG09vs5IKWKy9FqHBvDNKVq-HdMexOplctpb0RI,6426
|
|
43
44
|
webscout/Provider/Gemini.py,sha256=_4DHWvlWuNAmVHPwHB1RjmryjTZZCthLa6lvPEHLvkQ,8451
|
|
44
45
|
webscout/Provider/Geminiflash.py,sha256=1kMPA-ypi1gmJoms606Z7j_51znpdofM2aAyo4Hl7wU,5951
|
|
45
46
|
webscout/Provider/Geminipro.py,sha256=nOifT5CRmnUg28iifSbOHkNLoKucLRr5zCj607mVrhw,5948
|
|
46
47
|
webscout/Provider/Groq.py,sha256=QfgP3hKUcqq5vUA4Pzuu3HAgpJkKwLWNjjsnxtkCYd8,21094
|
|
47
48
|
webscout/Provider/Koboldai.py,sha256=KwWx2yPlvT9BGx37iNvSbgzWkJ9I8kSOmeg7sL1hb0M,15806
|
|
48
49
|
webscout/Provider/Leo.py,sha256=wbuDR-vFjLptfRC6yDlk74tINqNvCOzpISsK92lIgGg,19987
|
|
49
|
-
webscout/Provider/
|
|
50
|
+
webscout/Provider/Llama.py,sha256=F_srqtdo6ws03tnEaetZOfDolXrQEnLZaIxmQaY_tJQ,8052
|
|
50
51
|
webscout/Provider/OLLAMA.py,sha256=G8sz_P7OZINFI1qGnpDhNPWU789Sv2cpDnShOA5Nbmw,7075
|
|
51
52
|
webscout/Provider/OpenGPT.py,sha256=ZymwLgNJSPlGZHW3msMlnRR7NxmALqJw9yuToqrRrhw,35515
|
|
52
53
|
webscout/Provider/Openai.py,sha256=SjfVOwY94unVnXhvN0Fkome-q2-wi4mPJk_vCGq5Fjc,20617
|
|
@@ -59,10 +60,10 @@ webscout/Provider/VTLchat.py,sha256=_sErGr-wOi16ZAfiGOo0bPsAEMkjzzwreEsIqjIZMIU,
|
|
|
59
60
|
webscout/Provider/Xjai.py,sha256=BIlk2ouz9Kh_0Gg9hPvTqhI7XtcmWdg5vHSX_4uGrIs,9039
|
|
60
61
|
webscout/Provider/Yepchat.py,sha256=2Eit-A7w1ph1GQKNQuur_yaDzI64r0yBGxCIjDefJxQ,19875
|
|
61
62
|
webscout/Provider/Youchat.py,sha256=fhMpt94pIPE_XDbC4z9xyfgA7NbkNE2wlRFJabsjv90,8069
|
|
62
|
-
webscout/Provider/__init__.py,sha256=
|
|
63
|
-
webscout-4.
|
|
64
|
-
webscout-4.
|
|
65
|
-
webscout-4.
|
|
66
|
-
webscout-4.
|
|
67
|
-
webscout-4.
|
|
68
|
-
webscout-4.
|
|
63
|
+
webscout/Provider/__init__.py,sha256=j6lZqjLYext2a-KTnvGEvVm-D3jezHIlnanlj2H37FI,1962
|
|
64
|
+
webscout-4.3.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
|
|
65
|
+
webscout-4.3.dist-info/METADATA,sha256=Wh2IMCZhNgKcxsOqGNPriPzrEYoQ4uWfLakOnteemsc,57597
|
|
66
|
+
webscout-4.3.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
|
|
67
|
+
webscout-4.3.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
|
|
68
|
+
webscout-4.3.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
|
|
69
|
+
webscout-4.3.dist-info/RECORD,,
|
webscout/Provider/Llama2.py
DELETED
|
@@ -1,437 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
from selenium import webdriver
|
|
4
|
-
from selenium.webdriver.chrome.options import Options
|
|
5
|
-
from selenium.webdriver.common.by import By
|
|
6
|
-
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
-
import click
|
|
9
|
-
import requests
|
|
10
|
-
from requests import get
|
|
11
|
-
from uuid import uuid4
|
|
12
|
-
from re import findall
|
|
13
|
-
from requests.exceptions import RequestException
|
|
14
|
-
from curl_cffi.requests import get, RequestsError
|
|
15
|
-
import g4f
|
|
16
|
-
from random import randint
|
|
17
|
-
from PIL import Image
|
|
18
|
-
import io
|
|
19
|
-
import re
|
|
20
|
-
import json
|
|
21
|
-
import yaml
|
|
22
|
-
from ..AIutel import Optimizers
|
|
23
|
-
from ..AIutel import Conversation
|
|
24
|
-
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
-
from ..AIbase import Provider, AsyncProvider
|
|
26
|
-
from Helpingai_T2 import Perplexity
|
|
27
|
-
from webscout import exceptions
|
|
28
|
-
from typing import Any, AsyncGenerator, Dict
|
|
29
|
-
import logging
|
|
30
|
-
import httpx
|
|
31
|
-
|
|
32
|
-
class AsyncLLAMA2(AsyncProvider):
|
|
33
|
-
def __init__(
|
|
34
|
-
self,
|
|
35
|
-
is_conversation: bool = True,
|
|
36
|
-
max_tokens: int = 800,
|
|
37
|
-
temperature: float = 0.75,
|
|
38
|
-
presence_penalty: int = 0,
|
|
39
|
-
frequency_penalty: int = 0,
|
|
40
|
-
top_p: float = 0.9,
|
|
41
|
-
model: str = "meta/meta-llama-3-70b-instruct",
|
|
42
|
-
timeout: int = 30,
|
|
43
|
-
intro: str = None,
|
|
44
|
-
filepath: str = None,
|
|
45
|
-
update_file: bool = True,
|
|
46
|
-
proxies: dict = {},
|
|
47
|
-
history_offset: int = 10250,
|
|
48
|
-
act: str = None,
|
|
49
|
-
):
|
|
50
|
-
"""Instantiates LLAMA2
|
|
51
|
-
|
|
52
|
-
Args:
|
|
53
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
54
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 800.
|
|
55
|
-
temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.75.
|
|
56
|
-
presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
|
|
57
|
-
frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
|
|
58
|
-
top_p (float, optional): Sampling threshold during inference time. Defaults to 0.9.
|
|
59
|
-
model (str, optional): LLM model name. Defaults to "meta/llama-2-70b-chat".
|
|
60
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
61
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
62
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
63
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
64
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
65
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
66
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
67
|
-
"""
|
|
68
|
-
self.is_conversation = is_conversation
|
|
69
|
-
self.max_tokens_to_sample = max_tokens
|
|
70
|
-
self.model = model
|
|
71
|
-
self.temperature = temperature
|
|
72
|
-
self.presence_penalty = presence_penalty
|
|
73
|
-
self.frequency_penalty = frequency_penalty
|
|
74
|
-
self.top_p = top_p
|
|
75
|
-
self.chat_endpoint = "https://www.llama2.ai/api"
|
|
76
|
-
self.stream_chunk_size = 64
|
|
77
|
-
self.timeout = timeout
|
|
78
|
-
self.last_response = {}
|
|
79
|
-
self.headers = {
|
|
80
|
-
"Content-Type": "application/json",
|
|
81
|
-
"Referer": "https://www.llama2.ai/",
|
|
82
|
-
"Content-Type": "text/plain;charset=UTF-8",
|
|
83
|
-
"Origin": "https://www.llama2.ai",
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
self.__available_optimizers = (
|
|
87
|
-
method
|
|
88
|
-
for method in dir(Optimizers)
|
|
89
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
90
|
-
)
|
|
91
|
-
Conversation.intro = (
|
|
92
|
-
AwesomePrompts().get_act(
|
|
93
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
94
|
-
)
|
|
95
|
-
if act
|
|
96
|
-
else intro or Conversation.intro
|
|
97
|
-
)
|
|
98
|
-
self.conversation = Conversation(
|
|
99
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
100
|
-
)
|
|
101
|
-
self.conversation.history_offset = history_offset
|
|
102
|
-
self.session = httpx.AsyncClient(
|
|
103
|
-
headers=self.headers,
|
|
104
|
-
proxies=proxies,
|
|
105
|
-
)
|
|
106
|
-
|
|
107
|
-
async def ask(
|
|
108
|
-
self,
|
|
109
|
-
prompt: str,
|
|
110
|
-
stream: bool = False,
|
|
111
|
-
raw: bool = False,
|
|
112
|
-
optimizer: str = None,
|
|
113
|
-
conversationally: bool = False,
|
|
114
|
-
) -> dict | AsyncGenerator:
|
|
115
|
-
"""Chat with AI asynchronously.
|
|
116
|
-
|
|
117
|
-
Args:
|
|
118
|
-
prompt (str): Prompt to be send.
|
|
119
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
120
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
121
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
122
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
123
|
-
Returns:
|
|
124
|
-
dict|AsyncGeneraror[dict] : ai content
|
|
125
|
-
```json
|
|
126
|
-
{
|
|
127
|
-
"text" : "How may I help you today?"
|
|
128
|
-
}
|
|
129
|
-
```
|
|
130
|
-
"""
|
|
131
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
132
|
-
if optimizer:
|
|
133
|
-
if optimizer in self.__available_optimizers:
|
|
134
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
135
|
-
conversation_prompt if conversationally else prompt
|
|
136
|
-
)
|
|
137
|
-
else:
|
|
138
|
-
raise Exception(
|
|
139
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
140
|
-
)
|
|
141
|
-
|
|
142
|
-
payload = {
|
|
143
|
-
"prompt": f"{conversation_prompt}<s>[INST] {prompt} [/INST]",
|
|
144
|
-
"model": self.model,
|
|
145
|
-
"systemPrompt": "You are a helpful assistant.",
|
|
146
|
-
"temperature": self.temperature,
|
|
147
|
-
"topP": self.top_p,
|
|
148
|
-
"maxTokens": self.max_tokens_to_sample,
|
|
149
|
-
"image": None,
|
|
150
|
-
"audio": None,
|
|
151
|
-
}
|
|
152
|
-
|
|
153
|
-
async def for_stream():
|
|
154
|
-
async with self.session.stream(
|
|
155
|
-
"POST", self.chat_endpoint, json=payload, timeout=self.timeout
|
|
156
|
-
) as response:
|
|
157
|
-
if (
|
|
158
|
-
not response.is_success
|
|
159
|
-
or not response.headers.get("Content-Type")
|
|
160
|
-
== "text/plain; charset=utf-8"
|
|
161
|
-
):
|
|
162
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
163
|
-
f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
|
|
164
|
-
)
|
|
165
|
-
message_load: str = ""
|
|
166
|
-
async for value in response.aiter_lines():
|
|
167
|
-
try:
|
|
168
|
-
if bool(value.strip()):
|
|
169
|
-
message_load += value + "\n"
|
|
170
|
-
resp: dict = dict(text=message_load)
|
|
171
|
-
yield value if raw else resp
|
|
172
|
-
self.last_response.update(resp)
|
|
173
|
-
except json.decoder.JSONDecodeError:
|
|
174
|
-
pass
|
|
175
|
-
self.conversation.update_chat_history(
|
|
176
|
-
prompt, await self.get_message(self.last_response)
|
|
177
|
-
)
|
|
178
|
-
|
|
179
|
-
async def for_non_stream():
|
|
180
|
-
async for _ in for_stream():
|
|
181
|
-
pass
|
|
182
|
-
return self.last_response
|
|
183
|
-
|
|
184
|
-
return for_stream() if stream else await for_non_stream()
|
|
185
|
-
|
|
186
|
-
async def chat(
|
|
187
|
-
self,
|
|
188
|
-
prompt: str,
|
|
189
|
-
stream: bool = False,
|
|
190
|
-
optimizer: str = None,
|
|
191
|
-
conversationally: bool = False,
|
|
192
|
-
) -> str | AsyncGenerator:
|
|
193
|
-
"""Generate response `str` asynchronously.
|
|
194
|
-
Args:
|
|
195
|
-
prompt (str): Prompt to be send.
|
|
196
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
197
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
198
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
199
|
-
Returns:
|
|
200
|
-
str|AsyncGenerator: Response generated
|
|
201
|
-
"""
|
|
202
|
-
|
|
203
|
-
async def for_stream():
|
|
204
|
-
async_ask = await self.ask(
|
|
205
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
206
|
-
)
|
|
207
|
-
async for response in async_ask:
|
|
208
|
-
yield await self.get_message(response)
|
|
209
|
-
|
|
210
|
-
async def for_non_stream():
|
|
211
|
-
return await self.get_message(
|
|
212
|
-
await self.ask(
|
|
213
|
-
prompt,
|
|
214
|
-
False,
|
|
215
|
-
optimizer=optimizer,
|
|
216
|
-
conversationally=conversationally,
|
|
217
|
-
)
|
|
218
|
-
)
|
|
219
|
-
|
|
220
|
-
return for_stream() if stream else await for_non_stream()
|
|
221
|
-
|
|
222
|
-
async def get_message(self, response: dict) -> str:
|
|
223
|
-
"""Retrieves message only from response
|
|
224
|
-
|
|
225
|
-
Args:
|
|
226
|
-
response (str): Response generated by `self.ask`
|
|
227
|
-
|
|
228
|
-
Returns:
|
|
229
|
-
str: Message extracted
|
|
230
|
-
"""
|
|
231
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
232
|
-
return response["text"]
|
|
233
|
-
class LLAMA2(Provider):
|
|
234
|
-
def __init__(
|
|
235
|
-
self,
|
|
236
|
-
is_conversation: bool = True,
|
|
237
|
-
max_tokens: int = 800,
|
|
238
|
-
temperature: float = 0.75,
|
|
239
|
-
presence_penalty: int = 0,
|
|
240
|
-
frequency_penalty: int = 0,
|
|
241
|
-
top_p: float = 0.9,
|
|
242
|
-
model: str = "meta/meta-llama-3-70b-instruct",
|
|
243
|
-
timeout: int = 30,
|
|
244
|
-
intro: str = None,
|
|
245
|
-
filepath: str = None,
|
|
246
|
-
update_file: bool = True,
|
|
247
|
-
proxies: dict = {},
|
|
248
|
-
history_offset: int = 10250,
|
|
249
|
-
act: str = None,
|
|
250
|
-
):
|
|
251
|
-
"""Instantiates LLAMA2
|
|
252
|
-
|
|
253
|
-
Args:
|
|
254
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
255
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 800.
|
|
256
|
-
temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.75.
|
|
257
|
-
presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
|
|
258
|
-
frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
|
|
259
|
-
top_p (float, optional): Sampling threshold during inference time. Defaults to 0.9.
|
|
260
|
-
model (str, optional): LLM model name. Defaults to "meta/llama-2-70b-chat".
|
|
261
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
262
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
263
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
264
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
265
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
266
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
267
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
268
|
-
"""
|
|
269
|
-
self.session = requests.Session()
|
|
270
|
-
self.is_conversation = is_conversation
|
|
271
|
-
self.max_tokens_to_sample = max_tokens
|
|
272
|
-
self.model = model
|
|
273
|
-
self.temperature = temperature
|
|
274
|
-
self.presence_penalty = presence_penalty
|
|
275
|
-
self.frequency_penalty = frequency_penalty
|
|
276
|
-
self.top_p = top_p
|
|
277
|
-
self.chat_endpoint = "https://www.llama2.ai/api"
|
|
278
|
-
self.stream_chunk_size = 64
|
|
279
|
-
self.timeout = timeout
|
|
280
|
-
self.last_response = {}
|
|
281
|
-
self.headers = {
|
|
282
|
-
"Content-Type": "application/json",
|
|
283
|
-
"Referer": "https://www.llama2.ai/",
|
|
284
|
-
"Content-Type": "text/plain;charset=UTF-8",
|
|
285
|
-
"Origin": "https://www.llama2.ai",
|
|
286
|
-
}
|
|
287
|
-
|
|
288
|
-
self.__available_optimizers = (
|
|
289
|
-
method
|
|
290
|
-
for method in dir(Optimizers)
|
|
291
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
292
|
-
)
|
|
293
|
-
self.session.headers.update(self.headers)
|
|
294
|
-
Conversation.intro = (
|
|
295
|
-
AwesomePrompts().get_act(
|
|
296
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
297
|
-
)
|
|
298
|
-
if act
|
|
299
|
-
else intro or Conversation.intro
|
|
300
|
-
)
|
|
301
|
-
self.conversation = Conversation(
|
|
302
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
303
|
-
)
|
|
304
|
-
self.conversation.history_offset = history_offset
|
|
305
|
-
self.session.proxies = proxies
|
|
306
|
-
|
|
307
|
-
def ask(
|
|
308
|
-
self,
|
|
309
|
-
prompt: str,
|
|
310
|
-
stream: bool = False,
|
|
311
|
-
raw: bool = False,
|
|
312
|
-
optimizer: str = None,
|
|
313
|
-
conversationally: bool = False,
|
|
314
|
-
) -> dict:
|
|
315
|
-
"""Chat with AI
|
|
316
|
-
|
|
317
|
-
Args:
|
|
318
|
-
prompt (str): Prompt to be send.
|
|
319
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
320
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
321
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
322
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
323
|
-
Returns:
|
|
324
|
-
dict : {}
|
|
325
|
-
```json
|
|
326
|
-
{
|
|
327
|
-
"text" : "How may I help you today?"
|
|
328
|
-
}
|
|
329
|
-
```
|
|
330
|
-
"""
|
|
331
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
332
|
-
if optimizer:
|
|
333
|
-
if optimizer in self.__available_optimizers:
|
|
334
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
335
|
-
conversation_prompt if conversationally else prompt
|
|
336
|
-
)
|
|
337
|
-
else:
|
|
338
|
-
raise Exception(
|
|
339
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
340
|
-
)
|
|
341
|
-
self.session.headers.update(self.headers)
|
|
342
|
-
|
|
343
|
-
payload = {
|
|
344
|
-
"prompt": f"{conversation_prompt}<s>[INST] {prompt} [/INST]",
|
|
345
|
-
"model": self.model,
|
|
346
|
-
"systemPrompt": "You are a helpful assistant.",
|
|
347
|
-
"temperature": self.temperature,
|
|
348
|
-
"topP": self.top_p,
|
|
349
|
-
"maxTokens": self.max_tokens_to_sample,
|
|
350
|
-
"image": None,
|
|
351
|
-
"audio": None,
|
|
352
|
-
}
|
|
353
|
-
|
|
354
|
-
def for_stream():
|
|
355
|
-
response = self.session.post(
|
|
356
|
-
self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
357
|
-
)
|
|
358
|
-
if (
|
|
359
|
-
not response.ok
|
|
360
|
-
or not response.headers.get("Content-Type")
|
|
361
|
-
== "text/plain; charset=utf-8"
|
|
362
|
-
):
|
|
363
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
364
|
-
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
365
|
-
)
|
|
366
|
-
|
|
367
|
-
message_load: str = ""
|
|
368
|
-
for value in response.iter_lines(
|
|
369
|
-
decode_unicode=True,
|
|
370
|
-
delimiter="\n",
|
|
371
|
-
chunk_size=self.stream_chunk_size,
|
|
372
|
-
):
|
|
373
|
-
try:
|
|
374
|
-
if bool(value.strip()):
|
|
375
|
-
message_load += value + "\n"
|
|
376
|
-
resp: dict = dict(text=message_load)
|
|
377
|
-
yield value if raw else resp
|
|
378
|
-
self.last_response.update(resp)
|
|
379
|
-
except json.decoder.JSONDecodeError:
|
|
380
|
-
pass
|
|
381
|
-
self.conversation.update_chat_history(
|
|
382
|
-
prompt, self.get_message(self.last_response)
|
|
383
|
-
)
|
|
384
|
-
|
|
385
|
-
def for_non_stream():
|
|
386
|
-
for _ in for_stream():
|
|
387
|
-
pass
|
|
388
|
-
return self.last_response
|
|
389
|
-
|
|
390
|
-
return for_stream() if stream else for_non_stream()
|
|
391
|
-
|
|
392
|
-
def chat(
|
|
393
|
-
self,
|
|
394
|
-
prompt: str,
|
|
395
|
-
stream: bool = False,
|
|
396
|
-
optimizer: str = None,
|
|
397
|
-
conversationally: bool = False,
|
|
398
|
-
) -> str:
|
|
399
|
-
"""Generate response `str`
|
|
400
|
-
Args:
|
|
401
|
-
prompt (str): Prompt to be send.
|
|
402
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
403
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
404
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
405
|
-
Returns:
|
|
406
|
-
str: Response generated
|
|
407
|
-
"""
|
|
408
|
-
|
|
409
|
-
def for_stream():
|
|
410
|
-
for response in self.ask(
|
|
411
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
412
|
-
):
|
|
413
|
-
yield self.get_message(response)
|
|
414
|
-
|
|
415
|
-
def for_non_stream():
|
|
416
|
-
return self.get_message(
|
|
417
|
-
self.ask(
|
|
418
|
-
prompt,
|
|
419
|
-
False,
|
|
420
|
-
optimizer=optimizer,
|
|
421
|
-
conversationally=conversationally,
|
|
422
|
-
)
|
|
423
|
-
)
|
|
424
|
-
|
|
425
|
-
return for_stream() if stream else for_non_stream()
|
|
426
|
-
|
|
427
|
-
def get_message(self, response: dict) -> str:
|
|
428
|
-
"""Retrieves message only from response
|
|
429
|
-
|
|
430
|
-
Args:
|
|
431
|
-
response (str): Response generated by `self.ask`
|
|
432
|
-
|
|
433
|
-
Returns:
|
|
434
|
-
str: Message extracted
|
|
435
|
-
"""
|
|
436
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
437
|
-
return response["text"]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|