webscout 6.2b0__py3-none-any.whl → 6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -176
- webscout/AIbase.py +112 -239
- webscout/AIutel.py +488 -1130
- webscout/Agents/functioncall.py +248 -198
- webscout/Bing_search.py +250 -153
- webscout/DWEBS.py +454 -178
- webscout/Extra/__init__.py +2 -1
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder_utiles.py +121 -0
- webscout/Extra/autocoder/rawdog.py +681 -0
- webscout/Extra/autollama.py +246 -195
- webscout/Extra/gguf.py +441 -226
- webscout/Extra/weather.py +172 -67
- webscout/LLM.py +442 -100
- webscout/Litlogger/__init__.py +681 -0
- webscout/Local/formats.py +4 -2
- webscout/Provider/Amigo.py +19 -10
- webscout/Provider/Andi.py +0 -33
- webscout/Provider/Blackboxai.py +4 -204
- webscout/Provider/DARKAI.py +1 -1
- webscout/Provider/EDITEE.py +1 -1
- webscout/Provider/Llama3.py +1 -1
- webscout/Provider/Marcus.py +137 -0
- webscout/Provider/NinjaChat.py +1 -1
- webscout/Provider/PI.py +221 -207
- webscout/Provider/Perplexity.py +598 -598
- webscout/Provider/RoboCoders.py +206 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
- webscout/Provider/TTI/__init__.py +3 -4
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +184 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
- webscout/Provider/TTI/blackbox/__init__.py +4 -0
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
- webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
- webscout/Provider/TTI/deepinfra/__init__.py +4 -0
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/imgninza/__init__.py +4 -0
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
- webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/__init__.py +146 -132
- webscout/Provider/askmyai.py +158 -0
- webscout/Provider/cerebras.py +227 -206
- webscout/Provider/geminiapi.py +208 -198
- webscout/Provider/llama3mitril.py +180 -0
- webscout/Provider/llmchat.py +203 -0
- webscout/Provider/mhystical.py +176 -0
- webscout/Provider/perplexitylabs.py +265 -0
- webscout/Provider/talkai.py +196 -0
- webscout/Provider/twitterclone.py +251 -244
- webscout/Provider/typegpt.py +359 -0
- webscout/__init__.py +28 -23
- webscout/__main__.py +5 -5
- webscout/cli.py +327 -347
- webscout/conversation.py +227 -0
- webscout/exceptions.py +161 -29
- webscout/litagent/__init__.py +172 -0
- webscout/litprinter/__init__.py +831 -0
- webscout/optimizers.py +270 -0
- webscout/prompt_manager.py +279 -0
- webscout/swiftcli/__init__.py +810 -0
- webscout/transcriber.py +479 -551
- webscout/update_checker.py +125 -0
- webscout/version.py +1 -1
- webscout-6.4.dist-info/LICENSE.md +211 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/METADATA +34 -55
- webscout-6.4.dist-info/RECORD +154 -0
- webscout/Provider/TTI/AIuncensored.py +0 -103
- webscout/Provider/TTI/Nexra.py +0 -120
- webscout/Provider/TTI/PollinationsAI.py +0 -138
- webscout/Provider/TTI/WebSimAI.py +0 -142
- webscout/Provider/TTI/aiforce.py +0 -160
- webscout/Provider/TTI/artbit.py +0 -141
- webscout/Provider/TTI/deepinfra.py +0 -148
- webscout/Provider/TTI/huggingface.py +0 -155
- webscout/models.py +0 -23
- webscout-6.2b0.dist-info/LICENSE.md +0 -50
- webscout-6.2b0.dist-info/RECORD +0 -118
- /webscout/{g4f.py → gpt4free.py} +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/WHEEL +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/entry_points.txt +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/top_level.txt +0 -0
webscout/Provider/geminiapi.py
CHANGED
|
@@ -1,198 +1,208 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Install the Google AI Python SDK
|
|
3
|
-
|
|
4
|
-
$ pip install google-generativeai
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import os
|
|
8
|
-
import google.generativeai as genai
|
|
9
|
-
|
|
10
|
-
import
|
|
11
|
-
|
|
12
|
-
from webscout.AIutel import
|
|
13
|
-
from webscout.AIutel import
|
|
14
|
-
from webscout.
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
self.
|
|
66
|
-
self.
|
|
67
|
-
self.
|
|
68
|
-
self.
|
|
69
|
-
self.
|
|
70
|
-
|
|
71
|
-
self.
|
|
72
|
-
self.
|
|
73
|
-
self.
|
|
74
|
-
self.
|
|
75
|
-
self.
|
|
76
|
-
|
|
77
|
-
self.
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
)
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
"
|
|
103
|
-
"
|
|
104
|
-
"
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
```
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
)
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
1
|
+
"""
|
|
2
|
+
Install the Google AI Python SDK
|
|
3
|
+
|
|
4
|
+
$ pip install google-generativeai
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import google.generativeai as genai
|
|
9
|
+
|
|
10
|
+
from google.generativeai.types import HarmCategory, HarmBlockThreshold
|
|
11
|
+
import requests
|
|
12
|
+
from webscout.AIutel import Optimizers
|
|
13
|
+
from webscout.AIutel import Conversation
|
|
14
|
+
from webscout.AIutel import AwesomePrompts
|
|
15
|
+
from webscout.AIbase import Provider
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class GEMINIAPI(Provider):
|
|
19
|
+
"""
|
|
20
|
+
A class to interact with the Gemini API using the google-generativeai library.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
api_key,
|
|
26
|
+
model_name: str = "gemini-1.5-flash-latest",
|
|
27
|
+
temperature: float = 1,
|
|
28
|
+
top_p: float = 0.95,
|
|
29
|
+
top_k: int = 64,
|
|
30
|
+
max_output_tokens: int = 8192,
|
|
31
|
+
is_conversation: bool = True,
|
|
32
|
+
timeout: int = 30,
|
|
33
|
+
intro: str = None,
|
|
34
|
+
filepath: str = None,
|
|
35
|
+
update_file: bool = True,
|
|
36
|
+
proxies: dict = {},
|
|
37
|
+
history_offset: int = 10250,
|
|
38
|
+
act: str = None,
|
|
39
|
+
system_instruction: str = "You are a helpful and informative AI assistant.",
|
|
40
|
+
safety_settings: dict = None,
|
|
41
|
+
):
|
|
42
|
+
"""
|
|
43
|
+
Initializes the Gemini API with the given parameters.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
api_key (str, optional): Your Gemini API key. If None, it will use the environment variable "GEMINI_API_KEY".
|
|
47
|
+
Defaults to None.
|
|
48
|
+
model_name (str, optional): The name of the Gemini model to use.
|
|
49
|
+
Defaults to "gemini-1.5-flash-exp-0827".
|
|
50
|
+
temperature (float, optional): The temperature parameter for the model. Defaults to 1.
|
|
51
|
+
top_p (float, optional): The top_p parameter for the model. Defaults to 0.95.
|
|
52
|
+
top_k (int, optional): The top_k parameter for the model. Defaults to 64.
|
|
53
|
+
max_output_tokens (int, optional): The maximum number of output tokens. Defaults to 8192.
|
|
54
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
55
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
56
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
57
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
58
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
59
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
60
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
61
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
62
|
+
system_instruction (str, optional): System instruction to guide the AI's behavior.
|
|
63
|
+
Defaults to "You are a helpful and informative AI assistant.".
|
|
64
|
+
"""
|
|
65
|
+
self.api_key = api_key
|
|
66
|
+
self.model_name = model_name
|
|
67
|
+
self.temperature = temperature
|
|
68
|
+
self.top_p = top_p
|
|
69
|
+
self.top_k = top_k
|
|
70
|
+
self.max_output_tokens = max_output_tokens
|
|
71
|
+
self.system_instruction = system_instruction
|
|
72
|
+
self.safety_settings = safety_settings if safety_settings else {}
|
|
73
|
+
self.session = requests.Session() # Not directly used for Gemini API calls, but can be used for other requests
|
|
74
|
+
self.is_conversation = is_conversation
|
|
75
|
+
self.max_tokens_to_sample = max_output_tokens
|
|
76
|
+
self.timeout = timeout
|
|
77
|
+
self.last_response = {}
|
|
78
|
+
|
|
79
|
+
self.__available_optimizers = (
|
|
80
|
+
method
|
|
81
|
+
for method in dir(Optimizers)
|
|
82
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
83
|
+
)
|
|
84
|
+
Conversation.intro = (
|
|
85
|
+
AwesomePrompts().get_act(
|
|
86
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
87
|
+
)
|
|
88
|
+
if act
|
|
89
|
+
else intro or Conversation.intro
|
|
90
|
+
)
|
|
91
|
+
self.conversation = Conversation(
|
|
92
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
93
|
+
)
|
|
94
|
+
self.conversation.history_offset = history_offset
|
|
95
|
+
self.session.proxies = proxies
|
|
96
|
+
|
|
97
|
+
# Configure the Gemini API
|
|
98
|
+
genai.configure(api_key=self.api_key)
|
|
99
|
+
|
|
100
|
+
# Create the model with generation config
|
|
101
|
+
self.generation_config = {
|
|
102
|
+
"temperature": self.temperature,
|
|
103
|
+
"top_p": self.top_p,
|
|
104
|
+
"top_k": self.top_k,
|
|
105
|
+
"max_output_tokens": self.max_output_tokens,
|
|
106
|
+
"response_mime_type": "text/plain",
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
self.model = genai.GenerativeModel(
|
|
110
|
+
model_name=self.model_name,
|
|
111
|
+
generation_config=self.generation_config,
|
|
112
|
+
safety_settings=self.safety_settings,
|
|
113
|
+
system_instruction=self.system_instruction,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Start the chat session
|
|
117
|
+
self.chat_session = self.model.start_chat()
|
|
118
|
+
|
|
119
|
+
def ask(
|
|
120
|
+
self,
|
|
121
|
+
prompt: str,
|
|
122
|
+
stream: bool = False,
|
|
123
|
+
raw: bool = False,
|
|
124
|
+
optimizer: str = None,
|
|
125
|
+
conversationally: bool = False,
|
|
126
|
+
) -> dict:
|
|
127
|
+
"""Chat with AI
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
prompt (str): Prompt to be send.
|
|
131
|
+
stream (bool, optional): Not used for Gemini API. Defaults to False.
|
|
132
|
+
raw (bool, optional): Not used for Gemini API. Defaults to False.
|
|
133
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
134
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
135
|
+
Returns:
|
|
136
|
+
dict : {}
|
|
137
|
+
```json
|
|
138
|
+
{
|
|
139
|
+
"text" : "How may I assist you today?"
|
|
140
|
+
}
|
|
141
|
+
```
|
|
142
|
+
"""
|
|
143
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
144
|
+
if optimizer:
|
|
145
|
+
if optimizer in self.__available_optimizers:
|
|
146
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
147
|
+
conversation_prompt if conversationally else prompt
|
|
148
|
+
)
|
|
149
|
+
else:
|
|
150
|
+
raise Exception(
|
|
151
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# Send the message to the chat session and get the response
|
|
155
|
+
response = self.chat_session.send_message(conversation_prompt)
|
|
156
|
+
self.last_response.update(dict(text=response.text))
|
|
157
|
+
self.conversation.update_chat_history(
|
|
158
|
+
prompt, self.get_message(self.last_response)
|
|
159
|
+
)
|
|
160
|
+
return self.last_response
|
|
161
|
+
|
|
162
|
+
def chat(
|
|
163
|
+
self,
|
|
164
|
+
prompt: str,
|
|
165
|
+
stream: bool = False, # Streaming not supported by the current google-generativeai library
|
|
166
|
+
optimizer: str = None,
|
|
167
|
+
conversationally: bool = False,
|
|
168
|
+
) -> str:
|
|
169
|
+
"""Generate response `str`
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
prompt (str): Prompt to be send.
|
|
173
|
+
stream (bool, optional): Not used for Gemini API. Defaults to False.
|
|
174
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
175
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
176
|
+
Returns:
|
|
177
|
+
str: Response generated
|
|
178
|
+
"""
|
|
179
|
+
return self.get_message(
|
|
180
|
+
self.ask(
|
|
181
|
+
prompt,
|
|
182
|
+
optimizer=optimizer,
|
|
183
|
+
conversationally=conversationally,
|
|
184
|
+
)
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
def get_message(self, response: dict) -> str:
|
|
188
|
+
"""Retrieves message only from response
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
response (dict): Response generated by `self.ask`
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
str: Message extracted
|
|
195
|
+
"""
|
|
196
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
197
|
+
return response["text"]
|
|
198
|
+
if __name__ == "__main__":
|
|
199
|
+
safety_settings = {
|
|
200
|
+
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
|
|
201
|
+
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
|
|
202
|
+
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
|
203
|
+
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
|
204
|
+
}
|
|
205
|
+
ai = GEMINIAPI(api_key="" , safety_settings=safety_settings)
|
|
206
|
+
res = ai.chat(input(">>> "))
|
|
207
|
+
for r in res:
|
|
208
|
+
print(r, end="", flush=True)
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Dict, Optional, Generator
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Llama3Mitril(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the Llama3 Mitril API. Implements the WebScout provider interface.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
is_conversation: bool = True,
|
|
20
|
+
max_tokens: int = 2048,
|
|
21
|
+
timeout: int = 30,
|
|
22
|
+
intro: str = None,
|
|
23
|
+
filepath: str = None,
|
|
24
|
+
update_file: bool = True,
|
|
25
|
+
proxies: dict = {},
|
|
26
|
+
history_offset: int = 10250,
|
|
27
|
+
act: str = None,
|
|
28
|
+
system_prompt: str = "You are a helpful, respectful and honest assistant.",
|
|
29
|
+
temperature: float = 0.8,
|
|
30
|
+
):
|
|
31
|
+
"""Initializes the Llama3Mitril API."""
|
|
32
|
+
self.session = requests.Session()
|
|
33
|
+
self.is_conversation = is_conversation
|
|
34
|
+
self.max_tokens = max_tokens
|
|
35
|
+
self.temperature = temperature
|
|
36
|
+
self.api_endpoint = "https://llama3.mithrilsecurity.io/generate_stream"
|
|
37
|
+
self.timeout = timeout
|
|
38
|
+
self.last_response = {}
|
|
39
|
+
self.system_prompt = system_prompt
|
|
40
|
+
self.headers = {
|
|
41
|
+
"Content-Type": "application/json",
|
|
42
|
+
"DNT": "1",
|
|
43
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0"
|
|
44
|
+
}
|
|
45
|
+
self.__available_optimizers = (
|
|
46
|
+
method
|
|
47
|
+
for method in dir(Optimizers)
|
|
48
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
49
|
+
)
|
|
50
|
+
Conversation.intro = (
|
|
51
|
+
AwesomePrompts().get_act(
|
|
52
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
53
|
+
)
|
|
54
|
+
if act
|
|
55
|
+
else intro or Conversation.intro
|
|
56
|
+
)
|
|
57
|
+
self.conversation = Conversation(
|
|
58
|
+
is_conversation, self.max_tokens, filepath, update_file
|
|
59
|
+
)
|
|
60
|
+
self.conversation.history_offset = history_offset
|
|
61
|
+
self.session.proxies = proxies
|
|
62
|
+
|
|
63
|
+
def _format_prompt(self, prompt: str) -> str:
|
|
64
|
+
"""Format the prompt for the Llama3 model"""
|
|
65
|
+
return (
|
|
66
|
+
f"<|begin_of_text|>"
|
|
67
|
+
f"<|start_header_id|>system<|end_header_id|>{self.system_prompt}<|eot_id|>"
|
|
68
|
+
f"<|start_header_id|>user<|end_header_id|>{prompt}<|eot_id|>"
|
|
69
|
+
f"<|start_header_id|>assistant<|end_header_id|><|eot_id|>"
|
|
70
|
+
f"<|start_header_id|>assistant<|end_header_id|>"
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
def ask(
|
|
74
|
+
self,
|
|
75
|
+
prompt: str,
|
|
76
|
+
stream: bool = True,
|
|
77
|
+
raw: bool = False,
|
|
78
|
+
optimizer: str = None,
|
|
79
|
+
conversationally: bool = False,
|
|
80
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
81
|
+
"""Sends a prompt to the Llama3 Mitril API and returns the response."""
|
|
82
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
83
|
+
if optimizer:
|
|
84
|
+
if optimizer in self.__available_optimizers:
|
|
85
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
86
|
+
conversation_prompt if conversationally else prompt
|
|
87
|
+
)
|
|
88
|
+
else:
|
|
89
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
90
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
data = {
|
|
94
|
+
"inputs": self._format_prompt(conversation_prompt),
|
|
95
|
+
"parameters": {
|
|
96
|
+
"max_new_tokens": self.max_tokens,
|
|
97
|
+
"temperature": self.temperature,
|
|
98
|
+
"return_full_text": False
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
def for_stream():
|
|
103
|
+
response = self.session.post(
|
|
104
|
+
self.api_endpoint,
|
|
105
|
+
headers=self.headers,
|
|
106
|
+
json=data,
|
|
107
|
+
stream=True,
|
|
108
|
+
timeout=self.timeout
|
|
109
|
+
)
|
|
110
|
+
if not response.ok:
|
|
111
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
112
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
streaming_response = ""
|
|
116
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
117
|
+
if line:
|
|
118
|
+
try:
|
|
119
|
+
chunk = json.loads(line.split('data: ')[1])
|
|
120
|
+
if token_text := chunk.get('token', {}).get('text'):
|
|
121
|
+
if '<|eot_id|>' not in token_text:
|
|
122
|
+
streaming_response += token_text
|
|
123
|
+
yield token_text if raw else {"text": token_text}
|
|
124
|
+
except (json.JSONDecodeError, IndexError) as e:
|
|
125
|
+
continue
|
|
126
|
+
|
|
127
|
+
self.last_response.update({"text": streaming_response})
|
|
128
|
+
self.conversation.update_chat_history(
|
|
129
|
+
prompt, self.get_message(self.last_response)
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
def for_non_stream():
|
|
133
|
+
full_response = ""
|
|
134
|
+
for chunk in for_stream():
|
|
135
|
+
full_response += chunk if raw else chunk['text']
|
|
136
|
+
return {"text": full_response}
|
|
137
|
+
|
|
138
|
+
return for_stream() if stream else for_non_stream()
|
|
139
|
+
|
|
140
|
+
def chat(
|
|
141
|
+
self,
|
|
142
|
+
prompt: str,
|
|
143
|
+
stream: bool = True,
|
|
144
|
+
optimizer: str = None,
|
|
145
|
+
conversationally: bool = False,
|
|
146
|
+
) -> str | Generator[str, None, None]:
|
|
147
|
+
"""Generates a response from the Llama3 Mitril API."""
|
|
148
|
+
|
|
149
|
+
def for_stream():
|
|
150
|
+
for response in self.ask(
|
|
151
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
152
|
+
):
|
|
153
|
+
yield self.get_message(response)
|
|
154
|
+
|
|
155
|
+
def for_non_stream():
|
|
156
|
+
return self.get_message(
|
|
157
|
+
self.ask(
|
|
158
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
159
|
+
)
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
return for_stream() if stream else for_non_stream()
|
|
163
|
+
|
|
164
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
165
|
+
"""Extracts the message from the API response."""
|
|
166
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
167
|
+
return response["text"]
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
if __name__ == "__main__":
|
|
171
|
+
from rich import print
|
|
172
|
+
|
|
173
|
+
ai = Llama3Mitril(
|
|
174
|
+
max_tokens=2048,
|
|
175
|
+
temperature=0.8,
|
|
176
|
+
timeout=30
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
for response in ai.chat("Hello", stream=True):
|
|
180
|
+
print(response)
|