webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +20 -0
- webscout/Litlogger/core/logger.py +123 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +50 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +174 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +231 -0
- webscout/Litlogger/styles/formats.py +377 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +91 -78
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +74 -59
- webscout/Provider/Groq.py +30 -18
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +191 -137
- webscout/Provider/Netwrck.py +62 -50
- webscout/Provider/PI.py +79 -124
- webscout/Provider/PizzaGPT.py +129 -83
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +74 -47
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +144 -136
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +67 -39
- webscout/Provider/gaurish.py +105 -66
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +60 -35
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -230
- webscout/Provider/typegpt.py +359 -356
- webscout/Provider/yep.py +5 -5
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1142 -1140
- webscout/webscout_search_async.py +635 -635
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
- webscout-7.2.dist-info/RECORD +217 -0
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout/Provider/RUBIKSAI.py +0 -272
- webscout-7.0.dist-info/RECORD +0 -199
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
|
@@ -1,180 +1,180 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import re
|
|
4
|
-
from typing import Any, Dict, Optional, Generator
|
|
5
|
-
from webscout.AIutel import Optimizers
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout import exceptions
|
|
10
|
-
from webscout import LitAgent as Lit
|
|
11
|
-
|
|
12
|
-
class Llama3Mitril(Provider):
|
|
13
|
-
"""
|
|
14
|
-
A class to interact with the Llama3 Mitril API. Implements the WebScout provider interface.
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
def __init__(
|
|
18
|
-
self,
|
|
19
|
-
is_conversation: bool = True,
|
|
20
|
-
max_tokens: int = 2048,
|
|
21
|
-
timeout: int = 30,
|
|
22
|
-
intro: str = None,
|
|
23
|
-
filepath: str = None,
|
|
24
|
-
update_file: bool = True,
|
|
25
|
-
proxies: dict = {},
|
|
26
|
-
history_offset: int = 10250,
|
|
27
|
-
act: str = None,
|
|
28
|
-
system_prompt: str = "You are a helpful, respectful and honest assistant.",
|
|
29
|
-
temperature: float = 0.8,
|
|
30
|
-
):
|
|
31
|
-
"""Initializes the Llama3Mitril API."""
|
|
32
|
-
self.session = requests.Session()
|
|
33
|
-
self.is_conversation = is_conversation
|
|
34
|
-
self.max_tokens = max_tokens
|
|
35
|
-
self.temperature = temperature
|
|
36
|
-
self.api_endpoint = "https://llama3.mithrilsecurity.io/generate_stream"
|
|
37
|
-
self.timeout = timeout
|
|
38
|
-
self.last_response = {}
|
|
39
|
-
self.system_prompt = system_prompt
|
|
40
|
-
self.headers = {
|
|
41
|
-
"Content-Type": "application/json",
|
|
42
|
-
"DNT": "1",
|
|
43
|
-
"User-Agent": Lit().random(),
|
|
44
|
-
}
|
|
45
|
-
self.__available_optimizers = (
|
|
46
|
-
method
|
|
47
|
-
for method in dir(Optimizers)
|
|
48
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
49
|
-
)
|
|
50
|
-
Conversation.intro = (
|
|
51
|
-
AwesomePrompts().get_act(
|
|
52
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
53
|
-
)
|
|
54
|
-
if act
|
|
55
|
-
else intro or Conversation.intro
|
|
56
|
-
)
|
|
57
|
-
self.conversation = Conversation(
|
|
58
|
-
is_conversation, self.max_tokens, filepath, update_file
|
|
59
|
-
)
|
|
60
|
-
self.conversation.history_offset = history_offset
|
|
61
|
-
self.session.proxies = proxies
|
|
62
|
-
|
|
63
|
-
def _format_prompt(self, prompt: str) -> str:
|
|
64
|
-
"""Format the prompt for the Llama3 model"""
|
|
65
|
-
return (
|
|
66
|
-
f"<|begin_of_text|>"
|
|
67
|
-
f"<|start_header_id|>system<|end_header_id|>{self.system_prompt}<|eot_id|>"
|
|
68
|
-
f"<|start_header_id|>user<|end_header_id|>{prompt}<|eot_id|>"
|
|
69
|
-
f"<|start_header_id|>assistant<|end_header_id|><|eot_id|>"
|
|
70
|
-
f"<|start_header_id|>assistant<|end_header_id|>"
|
|
71
|
-
)
|
|
72
|
-
|
|
73
|
-
def ask(
|
|
74
|
-
self,
|
|
75
|
-
prompt: str,
|
|
76
|
-
stream: bool = True,
|
|
77
|
-
raw: bool = False,
|
|
78
|
-
optimizer: str = None,
|
|
79
|
-
conversationally: bool = False,
|
|
80
|
-
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
81
|
-
"""Sends a prompt to the Llama3 Mitril API and returns the response."""
|
|
82
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
83
|
-
if optimizer:
|
|
84
|
-
if optimizer in self.__available_optimizers:
|
|
85
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
86
|
-
conversation_prompt if conversationally else prompt
|
|
87
|
-
)
|
|
88
|
-
else:
|
|
89
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
90
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
91
|
-
)
|
|
92
|
-
|
|
93
|
-
data = {
|
|
94
|
-
"inputs": self._format_prompt(conversation_prompt),
|
|
95
|
-
"parameters": {
|
|
96
|
-
"max_new_tokens": self.max_tokens,
|
|
97
|
-
"temperature": self.temperature,
|
|
98
|
-
"return_full_text": False
|
|
99
|
-
}
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
def for_stream():
|
|
103
|
-
response = self.session.post(
|
|
104
|
-
self.api_endpoint,
|
|
105
|
-
headers=self.headers,
|
|
106
|
-
json=data,
|
|
107
|
-
stream=True,
|
|
108
|
-
timeout=self.timeout
|
|
109
|
-
)
|
|
110
|
-
if not response.ok:
|
|
111
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
112
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
113
|
-
)
|
|
114
|
-
|
|
115
|
-
streaming_response = ""
|
|
116
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
117
|
-
if line:
|
|
118
|
-
try:
|
|
119
|
-
chunk = json.loads(line.split('data: ')[1])
|
|
120
|
-
if token_text := chunk.get('token', {}).get('text'):
|
|
121
|
-
if '<|eot_id|>' not in token_text:
|
|
122
|
-
streaming_response += token_text
|
|
123
|
-
yield token_text if raw else {"text": token_text}
|
|
124
|
-
except (json.JSONDecodeError, IndexError) as e:
|
|
125
|
-
continue
|
|
126
|
-
|
|
127
|
-
self.last_response.update({"text": streaming_response})
|
|
128
|
-
self.conversation.update_chat_history(
|
|
129
|
-
prompt, self.get_message(self.last_response)
|
|
130
|
-
)
|
|
131
|
-
|
|
132
|
-
def for_non_stream():
|
|
133
|
-
full_response = ""
|
|
134
|
-
for chunk in for_stream():
|
|
135
|
-
full_response += chunk if raw else chunk['text']
|
|
136
|
-
return {"text": full_response}
|
|
137
|
-
|
|
138
|
-
return for_stream() if stream else for_non_stream()
|
|
139
|
-
|
|
140
|
-
def chat(
|
|
141
|
-
self,
|
|
142
|
-
prompt: str,
|
|
143
|
-
stream: bool = True,
|
|
144
|
-
optimizer: str = None,
|
|
145
|
-
conversationally: bool = False,
|
|
146
|
-
) -> str | Generator[str, None, None]:
|
|
147
|
-
"""Generates a response from the Llama3 Mitril API."""
|
|
148
|
-
|
|
149
|
-
def for_stream():
|
|
150
|
-
for response in self.ask(
|
|
151
|
-
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
152
|
-
):
|
|
153
|
-
yield self.get_message(response)
|
|
154
|
-
|
|
155
|
-
def for_non_stream():
|
|
156
|
-
return self.get_message(
|
|
157
|
-
self.ask(
|
|
158
|
-
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
159
|
-
)
|
|
160
|
-
)
|
|
161
|
-
|
|
162
|
-
return for_stream() if stream else for_non_stream()
|
|
163
|
-
|
|
164
|
-
def get_message(self, response: Dict[str, Any]) -> str:
|
|
165
|
-
"""Extracts the message from the API response."""
|
|
166
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
167
|
-
return response["text"]
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
if __name__ == "__main__":
|
|
171
|
-
from rich import print
|
|
172
|
-
|
|
173
|
-
ai = Llama3Mitril(
|
|
174
|
-
max_tokens=2048,
|
|
175
|
-
temperature=0.8,
|
|
176
|
-
timeout=30
|
|
177
|
-
)
|
|
178
|
-
|
|
179
|
-
for response in ai.chat("Hello", stream=True):
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Dict, Optional, Generator
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout import LitAgent as Lit
|
|
11
|
+
|
|
12
|
+
class Llama3Mitril(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the Llama3 Mitril API. Implements the WebScout provider interface.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
is_conversation: bool = True,
|
|
20
|
+
max_tokens: int = 2048,
|
|
21
|
+
timeout: int = 30,
|
|
22
|
+
intro: str = None,
|
|
23
|
+
filepath: str = None,
|
|
24
|
+
update_file: bool = True,
|
|
25
|
+
proxies: dict = {},
|
|
26
|
+
history_offset: int = 10250,
|
|
27
|
+
act: str = None,
|
|
28
|
+
system_prompt: str = "You are a helpful, respectful and honest assistant.",
|
|
29
|
+
temperature: float = 0.8,
|
|
30
|
+
):
|
|
31
|
+
"""Initializes the Llama3Mitril API."""
|
|
32
|
+
self.session = requests.Session()
|
|
33
|
+
self.is_conversation = is_conversation
|
|
34
|
+
self.max_tokens = max_tokens
|
|
35
|
+
self.temperature = temperature
|
|
36
|
+
self.api_endpoint = "https://llama3.mithrilsecurity.io/generate_stream"
|
|
37
|
+
self.timeout = timeout
|
|
38
|
+
self.last_response = {}
|
|
39
|
+
self.system_prompt = system_prompt
|
|
40
|
+
self.headers = {
|
|
41
|
+
"Content-Type": "application/json",
|
|
42
|
+
"DNT": "1",
|
|
43
|
+
"User-Agent": Lit().random(),
|
|
44
|
+
}
|
|
45
|
+
self.__available_optimizers = (
|
|
46
|
+
method
|
|
47
|
+
for method in dir(Optimizers)
|
|
48
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
49
|
+
)
|
|
50
|
+
Conversation.intro = (
|
|
51
|
+
AwesomePrompts().get_act(
|
|
52
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
53
|
+
)
|
|
54
|
+
if act
|
|
55
|
+
else intro or Conversation.intro
|
|
56
|
+
)
|
|
57
|
+
self.conversation = Conversation(
|
|
58
|
+
is_conversation, self.max_tokens, filepath, update_file
|
|
59
|
+
)
|
|
60
|
+
self.conversation.history_offset = history_offset
|
|
61
|
+
self.session.proxies = proxies
|
|
62
|
+
|
|
63
|
+
def _format_prompt(self, prompt: str) -> str:
|
|
64
|
+
"""Format the prompt for the Llama3 model"""
|
|
65
|
+
return (
|
|
66
|
+
f"<|begin_of_text|>"
|
|
67
|
+
f"<|start_header_id|>system<|end_header_id|>{self.system_prompt}<|eot_id|>"
|
|
68
|
+
f"<|start_header_id|>user<|end_header_id|>{prompt}<|eot_id|>"
|
|
69
|
+
f"<|start_header_id|>assistant<|end_header_id|><|eot_id|>"
|
|
70
|
+
f"<|start_header_id|>assistant<|end_header_id|>"
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
def ask(
|
|
74
|
+
self,
|
|
75
|
+
prompt: str,
|
|
76
|
+
stream: bool = True,
|
|
77
|
+
raw: bool = False,
|
|
78
|
+
optimizer: str = None,
|
|
79
|
+
conversationally: bool = False,
|
|
80
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
81
|
+
"""Sends a prompt to the Llama3 Mitril API and returns the response."""
|
|
82
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
83
|
+
if optimizer:
|
|
84
|
+
if optimizer in self.__available_optimizers:
|
|
85
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
86
|
+
conversation_prompt if conversationally else prompt
|
|
87
|
+
)
|
|
88
|
+
else:
|
|
89
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
90
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
data = {
|
|
94
|
+
"inputs": self._format_prompt(conversation_prompt),
|
|
95
|
+
"parameters": {
|
|
96
|
+
"max_new_tokens": self.max_tokens,
|
|
97
|
+
"temperature": self.temperature,
|
|
98
|
+
"return_full_text": False
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
def for_stream():
|
|
103
|
+
response = self.session.post(
|
|
104
|
+
self.api_endpoint,
|
|
105
|
+
headers=self.headers,
|
|
106
|
+
json=data,
|
|
107
|
+
stream=True,
|
|
108
|
+
timeout=self.timeout
|
|
109
|
+
)
|
|
110
|
+
if not response.ok:
|
|
111
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
112
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
streaming_response = ""
|
|
116
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
117
|
+
if line:
|
|
118
|
+
try:
|
|
119
|
+
chunk = json.loads(line.split('data: ')[1])
|
|
120
|
+
if token_text := chunk.get('token', {}).get('text'):
|
|
121
|
+
if '<|eot_id|>' not in token_text:
|
|
122
|
+
streaming_response += token_text
|
|
123
|
+
yield token_text if raw else {"text": token_text}
|
|
124
|
+
except (json.JSONDecodeError, IndexError) as e:
|
|
125
|
+
continue
|
|
126
|
+
|
|
127
|
+
self.last_response.update({"text": streaming_response})
|
|
128
|
+
self.conversation.update_chat_history(
|
|
129
|
+
prompt, self.get_message(self.last_response)
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
def for_non_stream():
|
|
133
|
+
full_response = ""
|
|
134
|
+
for chunk in for_stream():
|
|
135
|
+
full_response += chunk if raw else chunk['text']
|
|
136
|
+
return {"text": full_response}
|
|
137
|
+
|
|
138
|
+
return for_stream() if stream else for_non_stream()
|
|
139
|
+
|
|
140
|
+
def chat(
|
|
141
|
+
self,
|
|
142
|
+
prompt: str,
|
|
143
|
+
stream: bool = True,
|
|
144
|
+
optimizer: str = None,
|
|
145
|
+
conversationally: bool = False,
|
|
146
|
+
) -> str | Generator[str, None, None]:
|
|
147
|
+
"""Generates a response from the Llama3 Mitril API."""
|
|
148
|
+
|
|
149
|
+
def for_stream():
|
|
150
|
+
for response in self.ask(
|
|
151
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
152
|
+
):
|
|
153
|
+
yield self.get_message(response)
|
|
154
|
+
|
|
155
|
+
def for_non_stream():
|
|
156
|
+
return self.get_message(
|
|
157
|
+
self.ask(
|
|
158
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
159
|
+
)
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
return for_stream() if stream else for_non_stream()
|
|
163
|
+
|
|
164
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
165
|
+
"""Extracts the message from the API response."""
|
|
166
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
167
|
+
return response["text"]
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
if __name__ == "__main__":
|
|
171
|
+
from rich import print
|
|
172
|
+
|
|
173
|
+
ai = Llama3Mitril(
|
|
174
|
+
max_tokens=2048,
|
|
175
|
+
temperature=0.8,
|
|
176
|
+
timeout=30
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
for response in ai.chat("Hello", stream=True):
|
|
180
180
|
print(response, end="", flush=True)
|
webscout/Provider/llamatutor.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
|
|
1
2
|
import requests
|
|
2
3
|
import json
|
|
3
4
|
|
|
@@ -7,9 +8,11 @@ from webscout.AIutel import AwesomePrompts
|
|
|
7
8
|
from webscout.AIbase import Provider
|
|
8
9
|
from webscout import exceptions
|
|
9
10
|
from webscout import LitAgent as Lit
|
|
11
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
12
|
+
|
|
10
13
|
class LlamaTutor(Provider):
|
|
11
14
|
"""
|
|
12
|
-
A class to interact with the LlamaTutor API (Together.ai).
|
|
15
|
+
A class to interact with the LlamaTutor API (Together.ai) with comprehensive logging.
|
|
13
16
|
"""
|
|
14
17
|
|
|
15
18
|
def __init__(
|
|
@@ -24,23 +27,20 @@ class LlamaTutor(Provider):
|
|
|
24
27
|
history_offset: int = 10250,
|
|
25
28
|
act: str = None,
|
|
26
29
|
system_prompt: str = "You are a helpful AI assistant.",
|
|
30
|
+
logging: bool = False
|
|
27
31
|
):
|
|
28
32
|
"""
|
|
29
|
-
Initializes the LlamaTutor API with given parameters.
|
|
30
|
-
|
|
31
|
-
Args:
|
|
32
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
33
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
34
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
35
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
36
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
37
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
38
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
39
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
40
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
41
|
-
system_prompt (str, optional): System prompt for LlamaTutor.
|
|
42
|
-
Defaults to "You are a helpful AI assistant.".
|
|
33
|
+
Initializes the LlamaTutor API with given parameters and logging capabilities.
|
|
43
34
|
"""
|
|
35
|
+
self.logger = Logger(
|
|
36
|
+
name="LlamaTutor",
|
|
37
|
+
format=LogFormat.MODERN_EMOJI,
|
|
38
|
+
|
|
39
|
+
) if logging else None
|
|
40
|
+
|
|
41
|
+
if self.logger:
|
|
42
|
+
self.logger.info("Initializing LlamaTutor API")
|
|
43
|
+
|
|
44
44
|
self.session = requests.Session()
|
|
45
45
|
self.is_conversation = is_conversation
|
|
46
46
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -49,6 +49,7 @@ class LlamaTutor(Provider):
|
|
|
49
49
|
self.timeout = timeout
|
|
50
50
|
self.last_response = {}
|
|
51
51
|
self.system_prompt = system_prompt
|
|
52
|
+
|
|
52
53
|
self.headers = {
|
|
53
54
|
"Content-Type": "application/json",
|
|
54
55
|
"Accept": "*/*",
|
|
@@ -71,7 +72,12 @@ class LlamaTutor(Provider):
|
|
|
71
72
|
for method in dir(Optimizers)
|
|
72
73
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
73
74
|
)
|
|
75
|
+
|
|
74
76
|
self.session.headers.update(self.headers)
|
|
77
|
+
|
|
78
|
+
if self.logger:
|
|
79
|
+
self.logger.debug("Headers configured and session updated")
|
|
80
|
+
|
|
75
81
|
Conversation.intro = (
|
|
76
82
|
AwesomePrompts().get_act(
|
|
77
83
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -79,12 +85,16 @@ class LlamaTutor(Provider):
|
|
|
79
85
|
if act
|
|
80
86
|
else intro or Conversation.intro
|
|
81
87
|
)
|
|
88
|
+
|
|
82
89
|
self.conversation = Conversation(
|
|
83
90
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
84
91
|
)
|
|
85
92
|
self.conversation.history_offset = history_offset
|
|
86
93
|
self.session.proxies = proxies
|
|
87
94
|
|
|
95
|
+
if self.logger:
|
|
96
|
+
self.logger.info("LlamaTutor initialized successfully")
|
|
97
|
+
|
|
88
98
|
def ask(
|
|
89
99
|
self,
|
|
90
100
|
prompt: str,
|
|
@@ -93,32 +103,23 @@ class LlamaTutor(Provider):
|
|
|
93
103
|
optimizer: str = None,
|
|
94
104
|
conversationally: bool = False,
|
|
95
105
|
) -> dict:
|
|
96
|
-
"""Chat with LlamaTutor
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
102
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
103
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
104
|
-
Returns:
|
|
105
|
-
dict : {}
|
|
106
|
-
```json
|
|
107
|
-
{
|
|
108
|
-
"text" : "How may I assist you today?"
|
|
109
|
-
}
|
|
110
|
-
```
|
|
111
|
-
"""
|
|
106
|
+
"""Chat with LlamaTutor with logging capabilities"""
|
|
107
|
+
if self.logger:
|
|
108
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
109
|
+
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
110
|
+
|
|
112
111
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
113
112
|
if optimizer:
|
|
114
113
|
if optimizer in self.__available_optimizers:
|
|
115
114
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
116
115
|
conversation_prompt if conversationally else prompt
|
|
117
116
|
)
|
|
117
|
+
if self.logger:
|
|
118
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
118
119
|
else:
|
|
119
|
-
|
|
120
|
-
f"
|
|
121
|
-
)
|
|
120
|
+
if self.logger:
|
|
121
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
122
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
122
123
|
|
|
123
124
|
payload = {
|
|
124
125
|
"messages": [
|
|
@@ -135,19 +136,35 @@ class LlamaTutor(Provider):
|
|
|
135
136
|
|
|
136
137
|
def for_stream():
|
|
137
138
|
try:
|
|
138
|
-
|
|
139
|
+
if self.logger:
|
|
140
|
+
self.logger.debug("Initiating streaming request to API")
|
|
141
|
+
|
|
142
|
+
response = requests.post(
|
|
143
|
+
self.api_endpoint,
|
|
144
|
+
headers=self.headers,
|
|
145
|
+
data=json.dumps(payload),
|
|
146
|
+
stream=True,
|
|
147
|
+
timeout=self.timeout
|
|
148
|
+
)
|
|
139
149
|
response.raise_for_status()
|
|
140
150
|
|
|
141
|
-
|
|
151
|
+
if self.logger:
|
|
152
|
+
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
153
|
+
|
|
142
154
|
full_response = ''
|
|
143
155
|
for line in response.iter_lines(decode_unicode=True):
|
|
144
156
|
if line:
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
157
|
+
try:
|
|
158
|
+
decoded_line = line.decode('utf-8')
|
|
159
|
+
if decoded_line.startswith("data: "):
|
|
160
|
+
json_data = json.loads(decoded_line[6:])
|
|
161
|
+
if "text" in json_data:
|
|
162
|
+
full_response += json_data["text"]
|
|
163
|
+
yield json_data["text"] if raw else dict(text=json_data["text"])
|
|
164
|
+
except json.JSONDecodeError as e:
|
|
165
|
+
if self.logger:
|
|
166
|
+
self.logger.warning(f"Failed to parse response line: {e}")
|
|
167
|
+
continue
|
|
151
168
|
|
|
152
169
|
self.last_response.update(dict(text=full_response))
|
|
153
170
|
self.conversation.update_chat_history(
|
|
@@ -155,11 +172,17 @@ class LlamaTutor(Provider):
|
|
|
155
172
|
)
|
|
156
173
|
|
|
157
174
|
except requests.exceptions.HTTPError as http_err:
|
|
175
|
+
if self.logger:
|
|
176
|
+
self.logger.error(f"HTTP error occurred: {http_err}")
|
|
158
177
|
raise exceptions.FailedToGenerateResponseError(f"HTTP error occurred: {http_err}")
|
|
159
178
|
except requests.exceptions.RequestException as err:
|
|
179
|
+
if self.logger:
|
|
180
|
+
self.logger.error(f"Request error occurred: {err}")
|
|
160
181
|
raise exceptions.FailedToGenerateResponseError(f"An error occurred: {err}")
|
|
161
182
|
|
|
162
183
|
def for_non_stream():
|
|
184
|
+
if self.logger:
|
|
185
|
+
self.logger.debug("Processing non-streaming request")
|
|
163
186
|
for _ in for_stream():
|
|
164
187
|
pass
|
|
165
188
|
return self.last_response
|
|
@@ -173,15 +196,9 @@ class LlamaTutor(Provider):
|
|
|
173
196
|
optimizer: str = None,
|
|
174
197
|
conversationally: bool = False,
|
|
175
198
|
) -> str:
|
|
176
|
-
"""Generate response
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
180
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
181
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
182
|
-
Returns:
|
|
183
|
-
str: Response generated
|
|
184
|
-
"""
|
|
199
|
+
"""Generate response with logging capabilities"""
|
|
200
|
+
if self.logger:
|
|
201
|
+
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
185
202
|
|
|
186
203
|
def for_stream():
|
|
187
204
|
for response in self.ask(
|
|
@@ -202,21 +219,14 @@ class LlamaTutor(Provider):
|
|
|
202
219
|
return for_stream() if stream else for_non_stream()
|
|
203
220
|
|
|
204
221
|
def get_message(self, response: dict) -> str:
|
|
205
|
-
"""Retrieves message
|
|
206
|
-
|
|
207
|
-
Args:
|
|
208
|
-
response (dict): Response generated by `self.ask`
|
|
209
|
-
|
|
210
|
-
Returns:
|
|
211
|
-
str: Message extracted
|
|
212
|
-
"""
|
|
222
|
+
"""Retrieves message from response with validation"""
|
|
213
223
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
214
224
|
return response["text"]
|
|
215
225
|
|
|
216
226
|
if __name__ == "__main__":
|
|
217
227
|
from rich import print
|
|
218
|
-
|
|
219
|
-
ai = LlamaTutor()
|
|
220
|
-
response = ai.chat("
|
|
228
|
+
# Enable logging for testing
|
|
229
|
+
ai = LlamaTutor(logging=True)
|
|
230
|
+
response = ai.chat("Write a poem about AI", stream=True)
|
|
221
231
|
for chunk in response:
|
|
222
232
|
print(chunk, end="", flush=True)
|