webscout 7.4__py3-none-any.whl → 7.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Provider/C4ai.py +414 -0
- webscout/Provider/Cloudflare.py +18 -21
- webscout/Provider/DeepSeek.py +3 -32
- webscout/Provider/Deepinfra.py +30 -21
- webscout/Provider/GithubChat.py +362 -0
- webscout/Provider/HeckAI.py +20 -3
- webscout/Provider/HuggingFaceChat.py +462 -0
- webscout/Provider/Marcus.py +7 -50
- webscout/Provider/Netwrck.py +6 -53
- webscout/Provider/Phind.py +29 -3
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
- webscout/Provider/Venice.py +200 -200
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +13 -2
- webscout/Provider/akashgpt.py +8 -5
- webscout/Provider/copilot.py +416 -0
- webscout/Provider/flowith.py +181 -0
- webscout/Provider/granite.py +17 -53
- webscout/Provider/llamatutor.py +6 -46
- webscout/Provider/llmchat.py +7 -46
- webscout/Provider/multichat.py +29 -91
- webscout/exceptions.py +19 -9
- webscout/update_checker.py +55 -93
- webscout/version.py +1 -1
- webscout-7.5.dist-info/LICENSE.md +146 -0
- {webscout-7.4.dist-info → webscout-7.5.dist-info}/METADATA +5 -126
- {webscout-7.4.dist-info → webscout-7.5.dist-info}/RECORD +32 -33
- webscout/Local/__init__.py +0 -10
- webscout/Local/_version.py +0 -3
- webscout/Local/formats.py +0 -747
- webscout/Local/model.py +0 -1368
- webscout/Local/samplers.py +0 -125
- webscout/Local/thread.py +0 -539
- webscout/Local/ui.py +0 -401
- webscout/Local/utils.py +0 -388
- webscout/Provider/dgaf.py +0 -214
- webscout-7.4.dist-info/LICENSE.md +0 -211
- {webscout-7.4.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
- {webscout-7.4.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
- {webscout-7.4.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
webscout/Provider/granite.py
CHANGED
|
@@ -5,15 +5,15 @@ from typing import Any, Dict, Generator
|
|
|
5
5
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
6
|
from webscout.AIbase import Provider
|
|
7
7
|
from webscout import exceptions
|
|
8
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
9
8
|
from webscout import LitAgent as Lit
|
|
9
|
+
|
|
10
10
|
class IBMGranite(Provider):
|
|
11
11
|
"""
|
|
12
12
|
A class to interact with the IBM Granite API (accessed via d18n68ssusgr7r.cloudfront.net)
|
|
13
|
-
|
|
13
|
+
using Lit agent for the user agent.
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
|
-
AVAILABLE_MODELS = ["granite-3-8b-instruct"]
|
|
16
|
+
AVAILABLE_MODELS = ["granite-3-8b-instruct", "granite-3-2-8b-instruct"]
|
|
17
17
|
|
|
18
18
|
def __init__(
|
|
19
19
|
self,
|
|
@@ -27,24 +27,14 @@ class IBMGranite(Provider):
|
|
|
27
27
|
proxies: dict = {},
|
|
28
28
|
history_offset: int = 10250,
|
|
29
29
|
act: str = None,
|
|
30
|
-
model: str = "granite-3-8b-instruct",
|
|
30
|
+
model: str = "granite-3-2-8b-instruct",
|
|
31
31
|
system_prompt: str = "You are a helpful AI assistant.",
|
|
32
|
-
|
|
32
|
+
thinking: bool = False,
|
|
33
33
|
):
|
|
34
|
-
"""Initializes the
|
|
34
|
+
"""Initializes the IBMGranite API client using Lit agent for the user agent."""
|
|
35
35
|
if model not in self.AVAILABLE_MODELS:
|
|
36
36
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
37
37
|
|
|
38
|
-
# Setup logging if enabled
|
|
39
|
-
self.logger = Logger(
|
|
40
|
-
name="IBMGranite",
|
|
41
|
-
format=LogFormat.MODERN_EMOJI,
|
|
42
|
-
|
|
43
|
-
) if logging else None
|
|
44
|
-
|
|
45
|
-
if self.logger:
|
|
46
|
-
self.logger.info(f"Initializing IBMGranite with model: {model}")
|
|
47
|
-
|
|
48
38
|
self.session = requests.Session()
|
|
49
39
|
self.is_conversation = is_conversation
|
|
50
40
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -54,6 +44,7 @@ class IBMGranite(Provider):
|
|
|
54
44
|
self.last_response = {}
|
|
55
45
|
self.model = model
|
|
56
46
|
self.system_prompt = system_prompt
|
|
47
|
+
self.thinking = thinking
|
|
57
48
|
|
|
58
49
|
# Use Lit agent to generate a random User-Agent
|
|
59
50
|
self.headers = {
|
|
@@ -101,20 +92,13 @@ class IBMGranite(Provider):
|
|
|
101
92
|
Returns:
|
|
102
93
|
Union[Dict, Generator[Dict, None, None]]: Response generated
|
|
103
94
|
"""
|
|
104
|
-
if self.logger:
|
|
105
|
-
self.logger.debug(f"Ask method initiated - Prompt (first 50 chars): {prompt[:50]}")
|
|
106
|
-
|
|
107
95
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
108
96
|
if optimizer:
|
|
109
97
|
if optimizer in self.__available_optimizers:
|
|
110
98
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
111
99
|
conversation_prompt if conversationally else prompt
|
|
112
100
|
)
|
|
113
|
-
if self.logger:
|
|
114
|
-
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
115
101
|
else:
|
|
116
|
-
if self.logger:
|
|
117
|
-
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
118
102
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
119
103
|
|
|
120
104
|
payload = {
|
|
@@ -123,20 +107,17 @@ class IBMGranite(Provider):
|
|
|
123
107
|
{"role": "system", "content": self.system_prompt},
|
|
124
108
|
{"role": "user", "content": conversation_prompt},
|
|
125
109
|
],
|
|
126
|
-
"stream": stream
|
|
110
|
+
"stream": stream,
|
|
111
|
+
"thinking": self.thinking,
|
|
127
112
|
}
|
|
128
113
|
|
|
129
114
|
def for_stream():
|
|
130
115
|
try:
|
|
131
|
-
if self.logger:
|
|
132
|
-
self.logger.debug(f"Sending POST request to {self.api_endpoint} with payload: {payload}")
|
|
133
116
|
response = self.session.post(
|
|
134
117
|
self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
|
|
135
118
|
)
|
|
136
119
|
if not response.ok:
|
|
137
120
|
msg = f"Request failed with status code {response.status_code}: {response.text}"
|
|
138
|
-
if self.logger:
|
|
139
|
-
self.logger.error(msg)
|
|
140
121
|
raise exceptions.FailedToGenerateResponseError(msg)
|
|
141
122
|
|
|
142
123
|
streaming_text = ""
|
|
@@ -149,28 +130,17 @@ class IBMGranite(Provider):
|
|
|
149
130
|
streaming_text += content
|
|
150
131
|
yield content if raw else dict(text=content)
|
|
151
132
|
else:
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
except json.JSONDecodeError
|
|
155
|
-
if self.logger:
|
|
156
|
-
self.logger.error(f"JSON decode error: {e}")
|
|
133
|
+
# Skip unrecognized lines
|
|
134
|
+
pass
|
|
135
|
+
except json.JSONDecodeError:
|
|
157
136
|
continue
|
|
158
137
|
self.last_response.update(dict(text=streaming_text))
|
|
159
138
|
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
160
|
-
if self.logger:
|
|
161
|
-
self.logger.info("Stream processing completed.")
|
|
162
|
-
|
|
163
139
|
except requests.exceptions.RequestException as e:
|
|
164
|
-
if self.logger:
|
|
165
|
-
self.logger.error(f"Request exception: {e}")
|
|
166
140
|
raise exceptions.ProviderConnectionError(f"Request failed: {e}")
|
|
167
141
|
except json.JSONDecodeError as e:
|
|
168
|
-
if self.logger:
|
|
169
|
-
self.logger.error(f"Invalid JSON received: {e}")
|
|
170
142
|
raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}")
|
|
171
143
|
except Exception as e:
|
|
172
|
-
if self.logger:
|
|
173
|
-
self.logger.error(f"Unexpected error: {e}")
|
|
174
144
|
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
|
|
175
145
|
|
|
176
146
|
def for_non_stream():
|
|
@@ -189,20 +159,14 @@ class IBMGranite(Provider):
|
|
|
189
159
|
conversationally: bool = False,
|
|
190
160
|
) -> str | Generator[str, None, None]:
|
|
191
161
|
"""Generate response as a string using chat method"""
|
|
192
|
-
if self.logger:
|
|
193
|
-
self.logger.debug(f"Chat method initiated - Prompt (first 50 chars): {prompt[:50]}")
|
|
194
|
-
|
|
195
162
|
def for_stream():
|
|
196
163
|
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
197
164
|
yield self.get_message(response)
|
|
198
165
|
|
|
199
166
|
def for_non_stream():
|
|
200
|
-
|
|
167
|
+
return self.get_message(
|
|
201
168
|
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
202
169
|
)
|
|
203
|
-
if self.logger:
|
|
204
|
-
self.logger.info("Chat method completed.")
|
|
205
|
-
return result
|
|
206
170
|
|
|
207
171
|
return for_stream() if stream else for_non_stream()
|
|
208
172
|
|
|
@@ -213,11 +177,11 @@ class IBMGranite(Provider):
|
|
|
213
177
|
|
|
214
178
|
if __name__ == "__main__":
|
|
215
179
|
from rich import print
|
|
216
|
-
# Example usage: Initialize
|
|
180
|
+
# Example usage: Initialize without logging.
|
|
217
181
|
ai = IBMGranite(
|
|
218
|
-
api_key="",
|
|
219
|
-
|
|
182
|
+
api_key="", # press f12 to see the API key
|
|
183
|
+
thinking=True,
|
|
220
184
|
)
|
|
221
185
|
response = ai.chat("write a poem about AI", stream=True)
|
|
222
186
|
for chunk in response:
|
|
223
|
-
print(chunk, end="", flush=True)
|
|
187
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/llamatutor.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
|
|
2
1
|
import requests
|
|
3
2
|
import json
|
|
4
3
|
|
|
@@ -8,11 +7,10 @@ from webscout.AIutel import AwesomePrompts
|
|
|
8
7
|
from webscout.AIbase import Provider
|
|
9
8
|
from webscout import exceptions
|
|
10
9
|
from webscout import LitAgent as Lit
|
|
11
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
12
10
|
|
|
13
11
|
class LlamaTutor(Provider):
|
|
14
12
|
"""
|
|
15
|
-
A class to interact with the LlamaTutor API (Together.ai)
|
|
13
|
+
A class to interact with the LlamaTutor API (Together.ai)
|
|
16
14
|
"""
|
|
17
15
|
|
|
18
16
|
def __init__(
|
|
@@ -26,20 +24,11 @@ class LlamaTutor(Provider):
|
|
|
26
24
|
proxies: dict = {},
|
|
27
25
|
history_offset: int = 10250,
|
|
28
26
|
act: str = None,
|
|
29
|
-
system_prompt: str = "You are a helpful AI assistant."
|
|
30
|
-
logging: bool = False
|
|
27
|
+
system_prompt: str = "You are a helpful AI assistant."
|
|
31
28
|
):
|
|
32
29
|
"""
|
|
33
|
-
Initializes the LlamaTutor API with given parameters
|
|
30
|
+
Initializes the LlamaTutor API with given parameters.
|
|
34
31
|
"""
|
|
35
|
-
self.logger = Logger(
|
|
36
|
-
name="LlamaTutor",
|
|
37
|
-
format=LogFormat.MODERN_EMOJI,
|
|
38
|
-
|
|
39
|
-
) if logging else None
|
|
40
|
-
|
|
41
|
-
if self.logger:
|
|
42
|
-
self.logger.info("Initializing LlamaTutor API")
|
|
43
32
|
|
|
44
33
|
self.session = requests.Session()
|
|
45
34
|
self.is_conversation = is_conversation
|
|
@@ -74,9 +63,6 @@ class LlamaTutor(Provider):
|
|
|
74
63
|
)
|
|
75
64
|
|
|
76
65
|
self.session.headers.update(self.headers)
|
|
77
|
-
|
|
78
|
-
if self.logger:
|
|
79
|
-
self.logger.debug("Headers configured and session updated")
|
|
80
66
|
|
|
81
67
|
Conversation.intro = (
|
|
82
68
|
AwesomePrompts().get_act(
|
|
@@ -92,9 +78,6 @@ class LlamaTutor(Provider):
|
|
|
92
78
|
self.conversation.history_offset = history_offset
|
|
93
79
|
self.session.proxies = proxies
|
|
94
80
|
|
|
95
|
-
if self.logger:
|
|
96
|
-
self.logger.info("LlamaTutor initialized successfully")
|
|
97
|
-
|
|
98
81
|
def ask(
|
|
99
82
|
self,
|
|
100
83
|
prompt: str,
|
|
@@ -103,10 +86,7 @@ class LlamaTutor(Provider):
|
|
|
103
86
|
optimizer: str = None,
|
|
104
87
|
conversationally: bool = False,
|
|
105
88
|
) -> dict:
|
|
106
|
-
"""Chat with LlamaTutor
|
|
107
|
-
if self.logger:
|
|
108
|
-
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
109
|
-
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
89
|
+
"""Chat with LlamaTutor"""
|
|
110
90
|
|
|
111
91
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
112
92
|
if optimizer:
|
|
@@ -114,11 +94,7 @@ class LlamaTutor(Provider):
|
|
|
114
94
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
115
95
|
conversation_prompt if conversationally else prompt
|
|
116
96
|
)
|
|
117
|
-
if self.logger:
|
|
118
|
-
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
119
97
|
else:
|
|
120
|
-
if self.logger:
|
|
121
|
-
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
122
98
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
123
99
|
|
|
124
100
|
payload = {
|
|
@@ -136,8 +112,6 @@ class LlamaTutor(Provider):
|
|
|
136
112
|
|
|
137
113
|
def for_stream():
|
|
138
114
|
try:
|
|
139
|
-
if self.logger:
|
|
140
|
-
self.logger.debug("Initiating streaming request to API")
|
|
141
115
|
|
|
142
116
|
response = requests.post(
|
|
143
117
|
self.api_endpoint,
|
|
@@ -148,9 +122,6 @@ class LlamaTutor(Provider):
|
|
|
148
122
|
)
|
|
149
123
|
response.raise_for_status()
|
|
150
124
|
|
|
151
|
-
if self.logger:
|
|
152
|
-
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
153
|
-
|
|
154
125
|
full_response = ''
|
|
155
126
|
for line in response.iter_lines(decode_unicode=True):
|
|
156
127
|
if line:
|
|
@@ -162,8 +133,6 @@ class LlamaTutor(Provider):
|
|
|
162
133
|
full_response += json_data["text"]
|
|
163
134
|
yield json_data["text"] if raw else dict(text=json_data["text"])
|
|
164
135
|
except json.JSONDecodeError as e:
|
|
165
|
-
if self.logger:
|
|
166
|
-
self.logger.warning(f"Failed to parse response line: {e}")
|
|
167
136
|
continue
|
|
168
137
|
|
|
169
138
|
self.last_response.update(dict(text=full_response))
|
|
@@ -172,17 +141,11 @@ class LlamaTutor(Provider):
|
|
|
172
141
|
)
|
|
173
142
|
|
|
174
143
|
except requests.exceptions.HTTPError as http_err:
|
|
175
|
-
if self.logger:
|
|
176
|
-
self.logger.error(f"HTTP error occurred: {http_err}")
|
|
177
144
|
raise exceptions.FailedToGenerateResponseError(f"HTTP error occurred: {http_err}")
|
|
178
145
|
except requests.exceptions.RequestException as err:
|
|
179
|
-
if self.logger:
|
|
180
|
-
self.logger.error(f"Request error occurred: {err}")
|
|
181
146
|
raise exceptions.FailedToGenerateResponseError(f"An error occurred: {err}")
|
|
182
147
|
|
|
183
148
|
def for_non_stream():
|
|
184
|
-
if self.logger:
|
|
185
|
-
self.logger.debug("Processing non-streaming request")
|
|
186
149
|
for _ in for_stream():
|
|
187
150
|
pass
|
|
188
151
|
return self.last_response
|
|
@@ -196,9 +159,7 @@ class LlamaTutor(Provider):
|
|
|
196
159
|
optimizer: str = None,
|
|
197
160
|
conversationally: bool = False,
|
|
198
161
|
) -> str:
|
|
199
|
-
"""Generate response
|
|
200
|
-
if self.logger:
|
|
201
|
-
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
162
|
+
"""Generate response"""
|
|
202
163
|
|
|
203
164
|
def for_stream():
|
|
204
165
|
for response in self.ask(
|
|
@@ -225,8 +186,7 @@ class LlamaTutor(Provider):
|
|
|
225
186
|
|
|
226
187
|
if __name__ == "__main__":
|
|
227
188
|
from rich import print
|
|
228
|
-
|
|
229
|
-
ai = LlamaTutor(logging=True)
|
|
189
|
+
ai = LlamaTutor()
|
|
230
190
|
response = ai.chat("Write a poem about AI", stream=True)
|
|
231
191
|
for chunk in response:
|
|
232
192
|
print(chunk, end="", flush=True)
|
webscout/Provider/llmchat.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
|
|
2
1
|
import requests
|
|
3
2
|
import json
|
|
4
3
|
from typing import Any, Dict, Optional, Generator, List
|
|
@@ -8,12 +7,11 @@ from webscout.AIutel import Conversation
|
|
|
8
7
|
from webscout.AIutel import AwesomePrompts
|
|
9
8
|
from webscout.AIbase import Provider
|
|
10
9
|
from webscout import exceptions
|
|
11
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
12
10
|
from webscout import LitAgent as Lit
|
|
13
11
|
|
|
14
12
|
class LLMChat(Provider):
|
|
15
13
|
"""
|
|
16
|
-
A class to interact with the LLMChat API
|
|
14
|
+
A class to interact with the LLMChat API
|
|
17
15
|
"""
|
|
18
16
|
|
|
19
17
|
AVAILABLE_MODELS = [
|
|
@@ -37,23 +35,13 @@ class LLMChat(Provider):
|
|
|
37
35
|
history_offset: int = 10250,
|
|
38
36
|
act: str = None,
|
|
39
37
|
model: str = "@cf/meta/llama-3.1-70b-instruct",
|
|
40
|
-
system_prompt: str = "You are a helpful assistant."
|
|
41
|
-
logging: bool = False
|
|
38
|
+
system_prompt: str = "You are a helpful assistant."
|
|
42
39
|
):
|
|
43
40
|
"""
|
|
44
|
-
Initializes the LLMChat API with given parameters
|
|
41
|
+
Initializes the LLMChat API with given parameters.
|
|
45
42
|
"""
|
|
46
|
-
self.logger = Logger(
|
|
47
|
-
name="LLMChat",
|
|
48
|
-
format=LogFormat.MODERN_EMOJI,
|
|
49
|
-
) if logging else None
|
|
50
|
-
|
|
51
|
-
if self.logger:
|
|
52
|
-
self.logger.info(f"Initializing LLMChat with model: {model}")
|
|
53
43
|
|
|
54
44
|
if model not in self.AVAILABLE_MODELS:
|
|
55
|
-
if self.logger:
|
|
56
|
-
self.logger.error(f"Invalid model selected: {model}")
|
|
57
45
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
58
46
|
|
|
59
47
|
self.session = requests.Session()
|
|
@@ -93,9 +81,6 @@ class LLMChat(Provider):
|
|
|
93
81
|
self.conversation.history_offset = history_offset
|
|
94
82
|
self.session.proxies = proxies
|
|
95
83
|
|
|
96
|
-
if self.logger:
|
|
97
|
-
self.logger.info("LLMChat initialized successfully")
|
|
98
|
-
|
|
99
84
|
def ask(
|
|
100
85
|
self,
|
|
101
86
|
prompt: str,
|
|
@@ -105,9 +90,6 @@ class LLMChat(Provider):
|
|
|
105
90
|
conversationally: bool = False,
|
|
106
91
|
) -> Dict[str, Any]:
|
|
107
92
|
"""Chat with LLMChat with logging capabilities"""
|
|
108
|
-
if self.logger:
|
|
109
|
-
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
110
|
-
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
111
93
|
|
|
112
94
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
113
95
|
if optimizer:
|
|
@@ -115,11 +97,7 @@ class LLMChat(Provider):
|
|
|
115
97
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
116
98
|
conversation_prompt if conversationally else prompt
|
|
117
99
|
)
|
|
118
|
-
if self.logger:
|
|
119
|
-
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
120
100
|
else:
|
|
121
|
-
if self.logger:
|
|
122
|
-
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
123
101
|
raise exceptions.FailedToGenerateResponseError(
|
|
124
102
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
125
103
|
)
|
|
@@ -136,14 +114,9 @@ class LLMChat(Provider):
|
|
|
136
114
|
|
|
137
115
|
def for_stream():
|
|
138
116
|
try:
|
|
139
|
-
if self.logger:
|
|
140
|
-
self.logger.debug("Initiating streaming request to API")
|
|
141
117
|
|
|
142
118
|
with requests.post(url, json=payload, headers=self.headers, stream=True, timeout=self.timeout) as response:
|
|
143
119
|
response.raise_for_status()
|
|
144
|
-
|
|
145
|
-
if self.logger:
|
|
146
|
-
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
147
120
|
|
|
148
121
|
full_response = ""
|
|
149
122
|
for line in response.iter_lines():
|
|
@@ -158,9 +131,7 @@ class LLMChat(Provider):
|
|
|
158
131
|
yield response_text if raw else dict(text=response_text)
|
|
159
132
|
except json.JSONDecodeError:
|
|
160
133
|
if line.strip() != 'data: [DONE]':
|
|
161
|
-
|
|
162
|
-
self.logger.warning(f"Failed to parse line: {line}")
|
|
163
|
-
continue
|
|
134
|
+
continue
|
|
164
135
|
|
|
165
136
|
self.last_response.update(dict(text=full_response))
|
|
166
137
|
self.conversation.update_chat_history(
|
|
@@ -168,21 +139,14 @@ class LLMChat(Provider):
|
|
|
168
139
|
)
|
|
169
140
|
|
|
170
141
|
except requests.exceptions.RequestException as e:
|
|
171
|
-
if self.logger:
|
|
172
|
-
self.logger.error(f"API request failed: {str(e)}")
|
|
173
142
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
174
143
|
|
|
175
144
|
def for_non_stream():
|
|
176
|
-
|
|
177
|
-
self.logger.debug("Processing non-streaming request")
|
|
178
|
-
|
|
145
|
+
|
|
179
146
|
full_response = ""
|
|
180
147
|
for line in for_stream():
|
|
181
148
|
full_response += line['text'] if not raw else line
|
|
182
|
-
|
|
183
|
-
if self.logger:
|
|
184
|
-
self.logger.debug("Response processing completed")
|
|
185
|
-
|
|
149
|
+
|
|
186
150
|
return dict(text=full_response)
|
|
187
151
|
|
|
188
152
|
return for_stream() if stream else for_non_stream()
|
|
@@ -195,8 +159,6 @@ class LLMChat(Provider):
|
|
|
195
159
|
conversationally: bool = False,
|
|
196
160
|
) -> str | Generator[str, None, None]:
|
|
197
161
|
"""Generate response with logging capabilities"""
|
|
198
|
-
if self.logger:
|
|
199
|
-
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
200
162
|
|
|
201
163
|
def for_stream():
|
|
202
164
|
for response in self.ask(
|
|
@@ -223,8 +185,7 @@ class LLMChat(Provider):
|
|
|
223
185
|
|
|
224
186
|
if __name__ == "__main__":
|
|
225
187
|
from rich import print
|
|
226
|
-
|
|
227
|
-
ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct', logging=True)
|
|
188
|
+
ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct')
|
|
228
189
|
response = ai.chat("What's the meaning of life?", stream=True)
|
|
229
190
|
for chunk in response:
|
|
230
191
|
print(chunk, end="", flush=True)
|