webscout 7.0__py3-none-any.whl → 7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/LLM.py +1 -1
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/Cloudflare.py +92 -78
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Glider.py +74 -59
- webscout/Provider/Groq.py +26 -18
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +65 -10
- webscout/Provider/Netwrck.py +61 -49
- webscout/Provider/PI.py +77 -122
- webscout/Provider/PizzaGPT.py +129 -82
- webscout/Provider/TextPollinationsAI.py +75 -47
- webscout/Provider/__init__.py +1 -3
- webscout/Provider/dgaf.py +68 -39
- webscout/Provider/gaurish.py +106 -66
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +61 -35
- webscout/Provider/meta.py +6 -6
- webscout/Provider/multichat.py +205 -104
- webscout/Provider/typegpt.py +26 -23
- webscout/Provider/yep.py +3 -3
- webscout/version.py +1 -1
- webscout/webscout_search.py +1141 -1140
- webscout/webscout_search_async.py +635 -635
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/METADATA +18 -26
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/RECORD +31 -32
- webscout/Provider/RUBIKSAI.py +0 -272
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/LICENSE.md +0 -0
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/WHEEL +0 -0
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/entry_points.txt +0 -0
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/top_level.txt +0 -0
webscout/Provider/gaurish.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
|
-
import
|
|
4
|
-
from typing import Any, Dict, Optional, Generator, List, Union
|
|
3
|
+
from typing import Any, Dict, Generator, Union
|
|
5
4
|
import uuid
|
|
6
5
|
|
|
7
6
|
from webscout.AIutel import Optimizers
|
|
@@ -9,11 +8,12 @@ from webscout.AIutel import Conversation
|
|
|
9
8
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
11
10
|
from webscout import exceptions
|
|
12
|
-
|
|
13
11
|
from webscout import LitAgent
|
|
12
|
+
from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
|
|
13
|
+
|
|
14
14
|
class GaurishCerebras(Provider):
|
|
15
15
|
"""
|
|
16
|
-
A class to interact with the Gaurish Cerebras API.
|
|
16
|
+
A class to interact with the Gaurish Cerebras API with comprehensive logging.
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
19
|
def __init__(
|
|
@@ -27,52 +27,40 @@ class GaurishCerebras(Provider):
|
|
|
27
27
|
proxies: dict = {},
|
|
28
28
|
history_offset: int = 10250,
|
|
29
29
|
act: str = None,
|
|
30
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
30
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
31
|
+
logging: bool = False
|
|
31
32
|
):
|
|
32
|
-
"""Initializes the Gaurish Cerebras API client."""
|
|
33
|
-
self.
|
|
33
|
+
"""Initializes the Gaurish Cerebras API client with logging capabilities."""
|
|
34
|
+
self.logger = LitLogger(
|
|
35
|
+
name="GaurishCerebras",
|
|
36
|
+
format=LogFormat.MODERN_EMOJI,
|
|
37
|
+
color_scheme=ColorScheme.CYBERPUNK
|
|
38
|
+
) if logging else None
|
|
39
|
+
|
|
40
|
+
if self.logger:
|
|
41
|
+
self.logger.info("Initializing GaurishCerebras client")
|
|
42
|
+
|
|
43
|
+
self.chat_endpoint = "https://proxy.gaurish.xyz/api/cerebras/v1/chat/completions"
|
|
44
|
+
|
|
34
45
|
self.headers = {
|
|
35
46
|
"Content-Type": "application/json",
|
|
36
|
-
"Accept": "
|
|
37
|
-
"
|
|
38
|
-
"access-control-allow-headers": "*",
|
|
39
|
-
"access-control-allow-methods": "*",
|
|
40
|
-
"access-control-allow-origin": "*",
|
|
41
|
-
"cache-control": "public, max-age=0, must-revalidate",
|
|
42
|
-
"referrer-policy": "strict-origin-when-cross-origin",
|
|
43
|
-
"content-type": "text/event-stream; charset=utf-8",
|
|
44
|
-
"strict-transport-security": "max-age=3600; includeSubDomains",
|
|
45
|
-
"x-content-type-options": "nosniff",
|
|
46
|
-
"x-matched-path": "/api/cerebras/[...path]",
|
|
47
|
-
"x-ratelimit-limit-requests-day": "30000",
|
|
48
|
-
"x-ratelimit-limit-tokens-minute": "60000",
|
|
49
|
-
"x-ratelimit-remaining-requests-day": "29984",
|
|
50
|
-
"x-ratelimit-remaining-tokens-minute": "60000",
|
|
51
|
-
"x-ratelimit-reset-requests-day": "24092.23299384117",
|
|
52
|
-
"x-ratelimit-reset-tokens-minute": "32.232993841171265",
|
|
53
|
-
"x-request-id": "0vWYzSEvd9Ytk5Zvl8NGRfT_Ekjm0ErInwwxlihBPyqUBAjJpyXwCg==",
|
|
54
|
-
"x-vercel-id": "bom1::nsbfd-1729703907288-16e74bb1db50",
|
|
55
|
-
"accept": "application/json",
|
|
47
|
+
"Accept": "application/json",
|
|
48
|
+
"authorization": "Bearer 123",
|
|
56
49
|
"accept-encoding": "gzip, deflate, br, zstd",
|
|
57
50
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
58
51
|
"dnt": "1",
|
|
59
52
|
"origin": "https://chat.gaurish.xyz",
|
|
60
53
|
"priority": "u=1, i",
|
|
61
54
|
"referer": "https://chat.gaurish.xyz/",
|
|
62
|
-
"sec-ch-ua": "
|
|
55
|
+
"sec-ch-ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
|
|
63
56
|
"sec-ch-ua-mobile": "?0",
|
|
64
|
-
"sec-ch-ua-platform": "
|
|
57
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
65
58
|
"sec-fetch-dest": "empty",
|
|
66
59
|
"sec-fetch-mode": "cors",
|
|
67
60
|
"sec-fetch-site": "same-site",
|
|
68
|
-
"user-agent":
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
"x-stainless-os": "Unknown",
|
|
72
|
-
"x-stainless-package-version": "4.67.3",
|
|
73
|
-
"x-stainless-retry-count": "0",
|
|
74
|
-
"x-stainless-runtime": "browser:chrome",
|
|
75
|
-
"x-stainless-runtime-version": "130.0.0",
|
|
61
|
+
"user-agent": ("Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
62
|
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
63
|
+
"Chrome/132.0.0.0 Safari/537.36 Edg/132.0.0.0"),
|
|
76
64
|
}
|
|
77
65
|
self.session = requests.Session()
|
|
78
66
|
self.session.headers.update(self.headers)
|
|
@@ -82,6 +70,11 @@ class GaurishCerebras(Provider):
|
|
|
82
70
|
|
|
83
71
|
self.is_conversation = is_conversation
|
|
84
72
|
self.max_tokens_to_sample = max_tokens
|
|
73
|
+
|
|
74
|
+
if self.logger:
|
|
75
|
+
self.logger.debug(f"Session configured with timeout: {timeout}")
|
|
76
|
+
self.logger.debug(f"Max tokens set to: {max_tokens}")
|
|
77
|
+
|
|
85
78
|
self.__available_optimizers = (
|
|
86
79
|
method
|
|
87
80
|
for method in dir(Optimizers)
|
|
@@ -98,8 +91,10 @@ class GaurishCerebras(Provider):
|
|
|
98
91
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
99
92
|
)
|
|
100
93
|
self.conversation.history_offset = history_offset
|
|
101
|
-
self.system_prompt = system_prompt
|
|
94
|
+
self.system_prompt = system_prompt
|
|
102
95
|
|
|
96
|
+
if self.logger:
|
|
97
|
+
self.logger.info("GaurishCerebras initialization completed successfully")
|
|
103
98
|
|
|
104
99
|
def ask(
|
|
105
100
|
self,
|
|
@@ -109,6 +104,13 @@ class GaurishCerebras(Provider):
|
|
|
109
104
|
optimizer: str = None,
|
|
110
105
|
conversationally: bool = False,
|
|
111
106
|
) -> Union[Dict, Generator]:
|
|
107
|
+
"""
|
|
108
|
+
Sends a prompt to the API and returns the response with logging.
|
|
109
|
+
If stream is True, returns a generator for streamed responses.
|
|
110
|
+
"""
|
|
111
|
+
if self.logger:
|
|
112
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
113
|
+
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
112
114
|
|
|
113
115
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
114
116
|
if optimizer:
|
|
@@ -116,57 +118,81 @@ class GaurishCerebras(Provider):
|
|
|
116
118
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
117
119
|
conversation_prompt if conversationally else prompt
|
|
118
120
|
)
|
|
121
|
+
if self.logger:
|
|
122
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
119
123
|
else:
|
|
120
|
-
|
|
124
|
+
if self.logger:
|
|
125
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
126
|
+
raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
|
|
121
127
|
|
|
122
128
|
payload = {
|
|
123
129
|
"messages": [
|
|
124
130
|
{"role": "system", "content": self.system_prompt},
|
|
125
131
|
{"role": "user", "content": conversation_prompt},
|
|
126
132
|
],
|
|
127
|
-
"model": "llama3.
|
|
133
|
+
"model": "llama3.3-70b",
|
|
134
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
128
135
|
"temperature": 0.75,
|
|
129
136
|
"stream": stream,
|
|
130
137
|
}
|
|
131
138
|
|
|
132
139
|
def for_stream():
|
|
133
140
|
try:
|
|
134
|
-
|
|
135
|
-
|
|
141
|
+
if self.logger:
|
|
142
|
+
self.logger.debug("Initiating streaming request to API")
|
|
143
|
+
|
|
144
|
+
with self.session.post(self.chat_endpoint, json=payload, stream=True, timeout=self.timeout) as response:
|
|
145
|
+
if response.status_code != 200:
|
|
146
|
+
if self.logger:
|
|
147
|
+
self.logger.error(f"API request failed. Status: {response.status_code}")
|
|
148
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
149
|
+
f"Request failed with status code {response.status_code}"
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
if self.logger:
|
|
153
|
+
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
154
|
+
|
|
136
155
|
streaming_text = ""
|
|
137
156
|
for line in response.iter_lines(decode_unicode=True):
|
|
138
157
|
if line:
|
|
139
158
|
line = line.strip()
|
|
140
159
|
if line.startswith("data: "):
|
|
141
|
-
|
|
142
|
-
if
|
|
160
|
+
json_str = line[6:]
|
|
161
|
+
if json_str == "[DONE]":
|
|
162
|
+
if self.logger:
|
|
163
|
+
self.logger.debug("Stream completed")
|
|
143
164
|
break
|
|
144
165
|
try:
|
|
145
|
-
|
|
146
|
-
if
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
166
|
+
json_data = json.loads(json_str)
|
|
167
|
+
if 'choices' in json_data:
|
|
168
|
+
choice = json_data['choices'][0]
|
|
169
|
+
if 'delta' in choice and 'content' in choice['delta']:
|
|
170
|
+
content = choice['delta']['content']
|
|
171
|
+
streaming_text += content
|
|
172
|
+
yield dict(text=content) if raw else dict(text=content)
|
|
173
|
+
except json.JSONDecodeError as e:
|
|
174
|
+
if self.logger:
|
|
175
|
+
self.logger.error(f"JSON parsing error: {str(e)}")
|
|
153
176
|
pass
|
|
177
|
+
|
|
154
178
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
155
|
-
self.
|
|
179
|
+
if self.logger:
|
|
180
|
+
self.logger.debug("Response processing completed")
|
|
156
181
|
|
|
157
|
-
except requests.
|
|
182
|
+
except requests.RequestException as e:
|
|
183
|
+
if self.logger:
|
|
184
|
+
self.logger.error(f"Request failed: {str(e)}")
|
|
158
185
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
159
186
|
|
|
160
|
-
|
|
161
187
|
def for_non_stream():
|
|
188
|
+
if self.logger:
|
|
189
|
+
self.logger.debug("Processing non-streaming request")
|
|
162
190
|
for _ in for_stream():
|
|
163
191
|
pass
|
|
164
192
|
return self.last_response
|
|
165
193
|
|
|
166
194
|
return for_stream() if stream else for_non_stream()
|
|
167
195
|
|
|
168
|
-
|
|
169
|
-
|
|
170
196
|
def chat(
|
|
171
197
|
self,
|
|
172
198
|
prompt: str,
|
|
@@ -174,33 +200,47 @@ class GaurishCerebras(Provider):
|
|
|
174
200
|
optimizer: str = None,
|
|
175
201
|
conversationally: bool = False,
|
|
176
202
|
) -> Union[str, Generator]:
|
|
203
|
+
"""
|
|
204
|
+
A convenience method to return just the text message from the response with logging.
|
|
205
|
+
"""
|
|
206
|
+
if self.logger:
|
|
207
|
+
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
177
208
|
|
|
178
209
|
def for_stream():
|
|
179
210
|
for response in self.ask(
|
|
180
211
|
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
181
212
|
):
|
|
182
|
-
yield self.get_message(response)
|
|
213
|
+
yield response if isinstance(response, str) else self.get_message(response)
|
|
183
214
|
|
|
184
215
|
def for_non_stream():
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
188
|
-
)
|
|
216
|
+
resp = self.ask(
|
|
217
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
189
218
|
)
|
|
219
|
+
return resp if isinstance(resp, str) else self.get_message(resp)
|
|
190
220
|
|
|
191
221
|
return for_stream() if stream else for_non_stream()
|
|
192
222
|
|
|
193
223
|
def get_message(self, response: dict) -> str:
|
|
194
|
-
|
|
195
|
-
|
|
224
|
+
"""
|
|
225
|
+
Retrieve the message text from the API response with logging.
|
|
226
|
+
"""
|
|
227
|
+
if not isinstance(response, dict):
|
|
228
|
+
if self.logger:
|
|
229
|
+
self.logger.warning("Invalid response format received")
|
|
230
|
+
return ""
|
|
196
231
|
|
|
232
|
+
if "text" in response and response["text"]:
|
|
233
|
+
return response["text"]
|
|
197
234
|
|
|
235
|
+
if self.logger:
|
|
236
|
+
self.logger.warning("No valid message content found in response")
|
|
237
|
+
return ""
|
|
198
238
|
|
|
199
239
|
if __name__ == "__main__":
|
|
200
240
|
from rich import print
|
|
201
|
-
bot = GaurishCerebras()
|
|
241
|
+
bot = GaurishCerebras(logging=True)
|
|
202
242
|
try:
|
|
203
|
-
response = bot.chat("
|
|
243
|
+
response = bot.chat("what is meaning of life", stream=True)
|
|
204
244
|
for chunk in response:
|
|
205
245
|
print(chunk, end="", flush=True)
|
|
206
246
|
except Exception as e:
|
webscout/Provider/llamatutor.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
|
|
1
2
|
import requests
|
|
2
3
|
import json
|
|
3
4
|
|
|
@@ -7,9 +8,11 @@ from webscout.AIutel import AwesomePrompts
|
|
|
7
8
|
from webscout.AIbase import Provider
|
|
8
9
|
from webscout import exceptions
|
|
9
10
|
from webscout import LitAgent as Lit
|
|
11
|
+
from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
|
|
12
|
+
|
|
10
13
|
class LlamaTutor(Provider):
|
|
11
14
|
"""
|
|
12
|
-
A class to interact with the LlamaTutor API (Together.ai).
|
|
15
|
+
A class to interact with the LlamaTutor API (Together.ai) with comprehensive logging.
|
|
13
16
|
"""
|
|
14
17
|
|
|
15
18
|
def __init__(
|
|
@@ -24,23 +27,20 @@ class LlamaTutor(Provider):
|
|
|
24
27
|
history_offset: int = 10250,
|
|
25
28
|
act: str = None,
|
|
26
29
|
system_prompt: str = "You are a helpful AI assistant.",
|
|
30
|
+
logging: bool = False
|
|
27
31
|
):
|
|
28
32
|
"""
|
|
29
|
-
Initializes the LlamaTutor API with given parameters.
|
|
30
|
-
|
|
31
|
-
Args:
|
|
32
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
33
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
34
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
35
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
36
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
37
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
38
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
39
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
40
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
41
|
-
system_prompt (str, optional): System prompt for LlamaTutor.
|
|
42
|
-
Defaults to "You are a helpful AI assistant.".
|
|
33
|
+
Initializes the LlamaTutor API with given parameters and logging capabilities.
|
|
43
34
|
"""
|
|
35
|
+
self.logger = LitLogger(
|
|
36
|
+
name="LlamaTutor",
|
|
37
|
+
format=LogFormat.MODERN_EMOJI,
|
|
38
|
+
color_scheme=ColorScheme.CYBERPUNK
|
|
39
|
+
) if logging else None
|
|
40
|
+
|
|
41
|
+
if self.logger:
|
|
42
|
+
self.logger.info("Initializing LlamaTutor API")
|
|
43
|
+
|
|
44
44
|
self.session = requests.Session()
|
|
45
45
|
self.is_conversation = is_conversation
|
|
46
46
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -49,6 +49,7 @@ class LlamaTutor(Provider):
|
|
|
49
49
|
self.timeout = timeout
|
|
50
50
|
self.last_response = {}
|
|
51
51
|
self.system_prompt = system_prompt
|
|
52
|
+
|
|
52
53
|
self.headers = {
|
|
53
54
|
"Content-Type": "application/json",
|
|
54
55
|
"Accept": "*/*",
|
|
@@ -71,7 +72,12 @@ class LlamaTutor(Provider):
|
|
|
71
72
|
for method in dir(Optimizers)
|
|
72
73
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
73
74
|
)
|
|
75
|
+
|
|
74
76
|
self.session.headers.update(self.headers)
|
|
77
|
+
|
|
78
|
+
if self.logger:
|
|
79
|
+
self.logger.debug("Headers configured and session updated")
|
|
80
|
+
|
|
75
81
|
Conversation.intro = (
|
|
76
82
|
AwesomePrompts().get_act(
|
|
77
83
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -79,12 +85,16 @@ class LlamaTutor(Provider):
|
|
|
79
85
|
if act
|
|
80
86
|
else intro or Conversation.intro
|
|
81
87
|
)
|
|
88
|
+
|
|
82
89
|
self.conversation = Conversation(
|
|
83
90
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
84
91
|
)
|
|
85
92
|
self.conversation.history_offset = history_offset
|
|
86
93
|
self.session.proxies = proxies
|
|
87
94
|
|
|
95
|
+
if self.logger:
|
|
96
|
+
self.logger.info("LlamaTutor initialized successfully")
|
|
97
|
+
|
|
88
98
|
def ask(
|
|
89
99
|
self,
|
|
90
100
|
prompt: str,
|
|
@@ -93,32 +103,23 @@ class LlamaTutor(Provider):
|
|
|
93
103
|
optimizer: str = None,
|
|
94
104
|
conversationally: bool = False,
|
|
95
105
|
) -> dict:
|
|
96
|
-
"""Chat with LlamaTutor
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
102
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
103
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
104
|
-
Returns:
|
|
105
|
-
dict : {}
|
|
106
|
-
```json
|
|
107
|
-
{
|
|
108
|
-
"text" : "How may I assist you today?"
|
|
109
|
-
}
|
|
110
|
-
```
|
|
111
|
-
"""
|
|
106
|
+
"""Chat with LlamaTutor with logging capabilities"""
|
|
107
|
+
if self.logger:
|
|
108
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
109
|
+
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
110
|
+
|
|
112
111
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
113
112
|
if optimizer:
|
|
114
113
|
if optimizer in self.__available_optimizers:
|
|
115
114
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
116
115
|
conversation_prompt if conversationally else prompt
|
|
117
116
|
)
|
|
117
|
+
if self.logger:
|
|
118
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
118
119
|
else:
|
|
119
|
-
|
|
120
|
-
f"
|
|
121
|
-
)
|
|
120
|
+
if self.logger:
|
|
121
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
122
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
122
123
|
|
|
123
124
|
payload = {
|
|
124
125
|
"messages": [
|
|
@@ -135,19 +136,35 @@ class LlamaTutor(Provider):
|
|
|
135
136
|
|
|
136
137
|
def for_stream():
|
|
137
138
|
try:
|
|
138
|
-
|
|
139
|
+
if self.logger:
|
|
140
|
+
self.logger.debug("Initiating streaming request to API")
|
|
141
|
+
|
|
142
|
+
response = requests.post(
|
|
143
|
+
self.api_endpoint,
|
|
144
|
+
headers=self.headers,
|
|
145
|
+
data=json.dumps(payload),
|
|
146
|
+
stream=True,
|
|
147
|
+
timeout=self.timeout
|
|
148
|
+
)
|
|
139
149
|
response.raise_for_status()
|
|
140
150
|
|
|
141
|
-
|
|
151
|
+
if self.logger:
|
|
152
|
+
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
153
|
+
|
|
142
154
|
full_response = ''
|
|
143
155
|
for line in response.iter_lines(decode_unicode=True):
|
|
144
156
|
if line:
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
157
|
+
try:
|
|
158
|
+
decoded_line = line.decode('utf-8')
|
|
159
|
+
if decoded_line.startswith("data: "):
|
|
160
|
+
json_data = json.loads(decoded_line[6:])
|
|
161
|
+
if "text" in json_data:
|
|
162
|
+
full_response += json_data["text"]
|
|
163
|
+
yield json_data["text"] if raw else dict(text=json_data["text"])
|
|
164
|
+
except json.JSONDecodeError as e:
|
|
165
|
+
if self.logger:
|
|
166
|
+
self.logger.warning(f"Failed to parse response line: {e}")
|
|
167
|
+
continue
|
|
151
168
|
|
|
152
169
|
self.last_response.update(dict(text=full_response))
|
|
153
170
|
self.conversation.update_chat_history(
|
|
@@ -155,11 +172,17 @@ class LlamaTutor(Provider):
|
|
|
155
172
|
)
|
|
156
173
|
|
|
157
174
|
except requests.exceptions.HTTPError as http_err:
|
|
175
|
+
if self.logger:
|
|
176
|
+
self.logger.error(f"HTTP error occurred: {http_err}")
|
|
158
177
|
raise exceptions.FailedToGenerateResponseError(f"HTTP error occurred: {http_err}")
|
|
159
178
|
except requests.exceptions.RequestException as err:
|
|
179
|
+
if self.logger:
|
|
180
|
+
self.logger.error(f"Request error occurred: {err}")
|
|
160
181
|
raise exceptions.FailedToGenerateResponseError(f"An error occurred: {err}")
|
|
161
182
|
|
|
162
183
|
def for_non_stream():
|
|
184
|
+
if self.logger:
|
|
185
|
+
self.logger.debug("Processing non-streaming request")
|
|
163
186
|
for _ in for_stream():
|
|
164
187
|
pass
|
|
165
188
|
return self.last_response
|
|
@@ -173,15 +196,9 @@ class LlamaTutor(Provider):
|
|
|
173
196
|
optimizer: str = None,
|
|
174
197
|
conversationally: bool = False,
|
|
175
198
|
) -> str:
|
|
176
|
-
"""Generate response
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
180
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
181
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
182
|
-
Returns:
|
|
183
|
-
str: Response generated
|
|
184
|
-
"""
|
|
199
|
+
"""Generate response with logging capabilities"""
|
|
200
|
+
if self.logger:
|
|
201
|
+
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
185
202
|
|
|
186
203
|
def for_stream():
|
|
187
204
|
for response in self.ask(
|
|
@@ -202,21 +219,14 @@ class LlamaTutor(Provider):
|
|
|
202
219
|
return for_stream() if stream else for_non_stream()
|
|
203
220
|
|
|
204
221
|
def get_message(self, response: dict) -> str:
|
|
205
|
-
"""Retrieves message
|
|
206
|
-
|
|
207
|
-
Args:
|
|
208
|
-
response (dict): Response generated by `self.ask`
|
|
209
|
-
|
|
210
|
-
Returns:
|
|
211
|
-
str: Message extracted
|
|
212
|
-
"""
|
|
222
|
+
"""Retrieves message from response with validation"""
|
|
213
223
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
214
224
|
return response["text"]
|
|
215
225
|
|
|
216
226
|
if __name__ == "__main__":
|
|
217
227
|
from rich import print
|
|
218
|
-
|
|
219
|
-
ai = LlamaTutor()
|
|
220
|
-
response = ai.chat("
|
|
228
|
+
# Enable logging for testing
|
|
229
|
+
ai = LlamaTutor(logging=True)
|
|
230
|
+
response = ai.chat("Write a poem about AI", stream=True)
|
|
221
231
|
for chunk in response:
|
|
222
232
|
print(chunk, end="", flush=True)
|