webscout 7.3__py3-none-any.whl → 7.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Provider/AISEARCH/__init__.py +4 -3
- webscout/Provider/AISEARCH/genspark_search.py +208 -0
- webscout/Provider/AllenAI.py +282 -0
- webscout/Provider/C4ai.py +414 -0
- webscout/Provider/Cloudflare.py +18 -21
- webscout/Provider/DeepSeek.py +3 -32
- webscout/Provider/Deepinfra.py +52 -44
- webscout/Provider/ElectronHub.py +634 -0
- webscout/Provider/GithubChat.py +362 -0
- webscout/Provider/Glider.py +7 -41
- webscout/Provider/HeckAI.py +217 -0
- webscout/Provider/HuggingFaceChat.py +462 -0
- webscout/Provider/Jadve.py +49 -63
- webscout/Provider/Marcus.py +7 -50
- webscout/Provider/Netwrck.py +6 -53
- webscout/Provider/PI.py +106 -93
- webscout/Provider/Perplexitylabs.py +395 -0
- webscout/Provider/Phind.py +29 -3
- webscout/Provider/QwenLM.py +7 -61
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TextPollinationsAI.py +3 -2
- webscout/Provider/TwoAI.py +200 -0
- webscout/Provider/Venice.py +200 -0
- webscout/Provider/WiseCat.py +1 -18
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +25 -2
- webscout/Provider/akashgpt.py +315 -0
- webscout/Provider/chatglm.py +5 -5
- webscout/Provider/copilot.py +416 -0
- webscout/Provider/flowith.py +181 -0
- webscout/Provider/freeaichat.py +251 -221
- webscout/Provider/granite.py +17 -53
- webscout/Provider/koala.py +9 -1
- webscout/Provider/llamatutor.py +6 -46
- webscout/Provider/llmchat.py +7 -46
- webscout/Provider/multichat.py +29 -91
- webscout/Provider/yep.py +4 -24
- webscout/exceptions.py +19 -9
- webscout/update_checker.py +55 -93
- webscout/version.py +1 -1
- webscout-7.5.dist-info/LICENSE.md +146 -0
- {webscout-7.3.dist-info → webscout-7.5.dist-info}/METADATA +46 -172
- {webscout-7.3.dist-info → webscout-7.5.dist-info}/RECORD +52 -42
- webscout/Local/__init__.py +0 -10
- webscout/Local/_version.py +0 -3
- webscout/Local/formats.py +0 -747
- webscout/Local/model.py +0 -1368
- webscout/Local/samplers.py +0 -125
- webscout/Local/thread.py +0 -539
- webscout/Local/ui.py +0 -401
- webscout/Local/utils.py +0 -388
- webscout/Provider/dgaf.py +0 -214
- webscout-7.3.dist-info/LICENSE.md +0 -211
- {webscout-7.3.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
- {webscout-7.3.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
- {webscout-7.3.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
webscout/Provider/freeaichat.py
CHANGED
|
@@ -1,221 +1,251 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import time
|
|
4
|
-
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
-
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
from webscout import LitAgent
|
|
12
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
13
|
-
|
|
14
|
-
class FreeAIChat(Provider):
|
|
15
|
-
"""
|
|
16
|
-
A class to interact with the FreeAIChat API with logging and LitAgent user-agent.
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
AVAILABLE_MODELS = [
|
|
20
|
-
"mistral-nemo",
|
|
21
|
-
"mistral-large",
|
|
22
|
-
"
|
|
23
|
-
"gemini-
|
|
24
|
-
"gemini-1.5-
|
|
25
|
-
"gemini-
|
|
26
|
-
"
|
|
27
|
-
"deepseek-
|
|
28
|
-
"
|
|
29
|
-
"Deepseek r1
|
|
30
|
-
"
|
|
31
|
-
"o3-mini-
|
|
32
|
-
"o3-mini-
|
|
33
|
-
"o3-mini
|
|
34
|
-
"
|
|
35
|
-
"
|
|
36
|
-
"o1",
|
|
37
|
-
"
|
|
38
|
-
"
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
'
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
self.
|
|
77
|
-
self.
|
|
78
|
-
self.
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
self.
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
)
|
|
90
|
-
if
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
)
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
self.
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout import LitAgent
|
|
12
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
13
|
+
|
|
14
|
+
class FreeAIChat(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the FreeAIChat API with logging and LitAgent user-agent.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
AVAILABLE_MODELS = [
|
|
20
|
+
"mistral-nemo",
|
|
21
|
+
"mistral-large",
|
|
22
|
+
"gemini-2.0-flash",
|
|
23
|
+
"gemini-1.5-pro",
|
|
24
|
+
"gemini-1.5-flash",
|
|
25
|
+
"gemini-2.0-pro-exp-02-05",
|
|
26
|
+
"deepseek-r1",
|
|
27
|
+
"deepseek-v3",
|
|
28
|
+
"Deepseek r1 14B",
|
|
29
|
+
"Deepseek r1 32B",
|
|
30
|
+
"o3-mini-high",
|
|
31
|
+
"o3-mini-medium",
|
|
32
|
+
"o3-mini-low",
|
|
33
|
+
"o3-mini",
|
|
34
|
+
"GPT-4o-mini",
|
|
35
|
+
"o1",
|
|
36
|
+
"o1-mini",
|
|
37
|
+
"GPT-4o",
|
|
38
|
+
"Qwen coder",
|
|
39
|
+
"Qwen 2.5 72B",
|
|
40
|
+
"Llama 3.1 405B",
|
|
41
|
+
"llama3.1-70b-fast",
|
|
42
|
+
"Llama 3.3 70B",
|
|
43
|
+
"claude 3.5 haiku",
|
|
44
|
+
"claude 3.5 sonnet",
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
def __init__(
|
|
48
|
+
self,
|
|
49
|
+
is_conversation: bool = True,
|
|
50
|
+
max_tokens: int = 2049,
|
|
51
|
+
timeout: int = 30,
|
|
52
|
+
intro: str = None,
|
|
53
|
+
filepath: str = None,
|
|
54
|
+
update_file: bool = True,
|
|
55
|
+
proxies: dict = {},
|
|
56
|
+
history_offset: int = 10250,
|
|
57
|
+
act: str = None,
|
|
58
|
+
model: str = "GPT-4o",
|
|
59
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
60
|
+
logging: bool = False
|
|
61
|
+
):
|
|
62
|
+
"""Initializes the FreeAIChat API client with logging support."""
|
|
63
|
+
if model not in self.AVAILABLE_MODELS:
|
|
64
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
65
|
+
|
|
66
|
+
self.url = "https://freeaichatplayground.com/api/v1/chat/completions"
|
|
67
|
+
self.headers = {
|
|
68
|
+
'User-Agent': LitAgent().random(),
|
|
69
|
+
'Accept': '*/*',
|
|
70
|
+
'Content-Type': 'application/json',
|
|
71
|
+
'Origin': 'https://freeaichatplayground.com',
|
|
72
|
+
'Referer': 'https://freeaichatplayground.com/',
|
|
73
|
+
'Sec-Fetch-Mode': 'cors',
|
|
74
|
+
'Sec-Fetch-Site': 'same-origin'
|
|
75
|
+
}
|
|
76
|
+
self.session = requests.Session()
|
|
77
|
+
self.session.headers.update(self.headers)
|
|
78
|
+
self.session.proxies.update(proxies)
|
|
79
|
+
|
|
80
|
+
self.is_conversation = is_conversation
|
|
81
|
+
self.max_tokens_to_sample = max_tokens
|
|
82
|
+
self.timeout = timeout
|
|
83
|
+
self.last_response = {}
|
|
84
|
+
self.model = model
|
|
85
|
+
self.system_prompt = system_prompt
|
|
86
|
+
|
|
87
|
+
self.__available_optimizers = (
|
|
88
|
+
method
|
|
89
|
+
for method in dir(Optimizers)
|
|
90
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
91
|
+
)
|
|
92
|
+
Conversation.intro = (
|
|
93
|
+
AwesomePrompts().get_act(
|
|
94
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
95
|
+
)
|
|
96
|
+
if act
|
|
97
|
+
else intro or Conversation.intro
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
self.conversation = Conversation(
|
|
101
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
102
|
+
)
|
|
103
|
+
self.conversation.history_offset = history_offset
|
|
104
|
+
|
|
105
|
+
self.logger = Logger(
|
|
106
|
+
name="FreeAIChat",
|
|
107
|
+
format=LogFormat.MODERN_EMOJI,
|
|
108
|
+
) if logging else None
|
|
109
|
+
|
|
110
|
+
if self.logger:
|
|
111
|
+
self.logger.info(f"FreeAIChat initialized successfully with model: {model}")
|
|
112
|
+
|
|
113
|
+
def ask(
|
|
114
|
+
self,
|
|
115
|
+
prompt: str,
|
|
116
|
+
stream: bool = False,
|
|
117
|
+
raw: bool = False,
|
|
118
|
+
optimizer: str = None,
|
|
119
|
+
conversationally: bool = False,
|
|
120
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
121
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
122
|
+
if optimizer:
|
|
123
|
+
if optimizer in self.__available_optimizers:
|
|
124
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
125
|
+
conversation_prompt if conversationally else prompt
|
|
126
|
+
)
|
|
127
|
+
if self.logger:
|
|
128
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
129
|
+
else:
|
|
130
|
+
if self.logger:
|
|
131
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
132
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
133
|
+
|
|
134
|
+
messages = [
|
|
135
|
+
{
|
|
136
|
+
"role": "system",
|
|
137
|
+
"content": self.system_prompt
|
|
138
|
+
},
|
|
139
|
+
{
|
|
140
|
+
"role": "user",
|
|
141
|
+
"content": conversation_prompt
|
|
142
|
+
}
|
|
143
|
+
]
|
|
144
|
+
|
|
145
|
+
payload = {
|
|
146
|
+
"model": self.model,
|
|
147
|
+
"messages": messages
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
def for_stream():
|
|
151
|
+
if self.logger:
|
|
152
|
+
self.logger.debug("Sending streaming request to FreeAIChat API...")
|
|
153
|
+
try:
|
|
154
|
+
with requests.post(self.url, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
|
|
155
|
+
if response.status_code != 200:
|
|
156
|
+
if self.logger:
|
|
157
|
+
self.logger.error(f"Request failed with status code {response.status_code}")
|
|
158
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
159
|
+
f"Request failed with status code {response.status_code}"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
streaming_text = ""
|
|
163
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
164
|
+
if line:
|
|
165
|
+
line = line.strip()
|
|
166
|
+
if line.startswith("data: "):
|
|
167
|
+
json_str = line[6:] # Remove "data: " prefix
|
|
168
|
+
if json_str == "[DONE]":
|
|
169
|
+
break
|
|
170
|
+
try:
|
|
171
|
+
json_data = json.loads(json_str)
|
|
172
|
+
if 'choices' in json_data:
|
|
173
|
+
choice = json_data['choices'][0]
|
|
174
|
+
if 'delta' in choice and 'content' in choice['delta']:
|
|
175
|
+
content = choice['delta']['content']
|
|
176
|
+
streaming_text += content
|
|
177
|
+
resp = dict(text=content)
|
|
178
|
+
yield resp if raw else resp
|
|
179
|
+
except json.JSONDecodeError:
|
|
180
|
+
if self.logger:
|
|
181
|
+
self.logger.error("JSON decode error in streaming data")
|
|
182
|
+
pass
|
|
183
|
+
|
|
184
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
185
|
+
if self.logger:
|
|
186
|
+
self.logger.info("Streaming response completed successfully")
|
|
187
|
+
|
|
188
|
+
except requests.RequestException as e:
|
|
189
|
+
if self.logger:
|
|
190
|
+
self.logger.error(f"Request failed: {e}")
|
|
191
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
192
|
+
|
|
193
|
+
def for_non_stream():
|
|
194
|
+
full_text = ""
|
|
195
|
+
for chunk in for_stream():
|
|
196
|
+
full_text += chunk["text"]
|
|
197
|
+
return {"text": full_text}
|
|
198
|
+
|
|
199
|
+
return for_stream() if stream else for_non_stream()
|
|
200
|
+
|
|
201
|
+
def chat(
|
|
202
|
+
self,
|
|
203
|
+
prompt: str,
|
|
204
|
+
stream: bool = False,
|
|
205
|
+
optimizer: str = None,
|
|
206
|
+
conversationally: bool = False,
|
|
207
|
+
) -> str:
|
|
208
|
+
def for_stream():
|
|
209
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
210
|
+
yield self.get_message(response)
|
|
211
|
+
|
|
212
|
+
def for_non_stream():
|
|
213
|
+
return self.get_message(
|
|
214
|
+
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
return for_stream() if stream else for_non_stream()
|
|
218
|
+
|
|
219
|
+
def get_message(self, response: dict) -> str:
|
|
220
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
221
|
+
return response["text"]
|
|
222
|
+
|
|
223
|
+
@staticmethod
|
|
224
|
+
def fix_encoding(text):
|
|
225
|
+
if isinstance(text, dict) and "text" in text:
|
|
226
|
+
try:
|
|
227
|
+
text["text"] = text["text"].encode("latin1").decode("utf-8")
|
|
228
|
+
return text
|
|
229
|
+
except (UnicodeError, AttributeError) as e:
|
|
230
|
+
return text
|
|
231
|
+
elif isinstance(text, str):
|
|
232
|
+
try:
|
|
233
|
+
return text.encode("latin1").decode("utf-8")
|
|
234
|
+
except (UnicodeError, AttributeError) as e:
|
|
235
|
+
return text
|
|
236
|
+
return text
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
if __name__ == "__main__":
|
|
240
|
+
from rich import print
|
|
241
|
+
ai = FreeAIChat(model="GPT-4o", logging=True)
|
|
242
|
+
# response = ai.chat(input(">>>"), stream=True)
|
|
243
|
+
# full_text = ""
|
|
244
|
+
|
|
245
|
+
# for chunk in response:
|
|
246
|
+
# corrected_chunk = ai.fix_encoding(chunk)
|
|
247
|
+
# full_text += corrected_chunk
|
|
248
|
+
|
|
249
|
+
response = ai.chat(input(">>>"), stream=False)
|
|
250
|
+
response = ai.fix_encoding(response)
|
|
251
|
+
print(response)
|
webscout/Provider/granite.py
CHANGED
|
@@ -5,15 +5,15 @@ from typing import Any, Dict, Generator
|
|
|
5
5
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
6
|
from webscout.AIbase import Provider
|
|
7
7
|
from webscout import exceptions
|
|
8
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
9
8
|
from webscout import LitAgent as Lit
|
|
9
|
+
|
|
10
10
|
class IBMGranite(Provider):
|
|
11
11
|
"""
|
|
12
12
|
A class to interact with the IBM Granite API (accessed via d18n68ssusgr7r.cloudfront.net)
|
|
13
|
-
|
|
13
|
+
using Lit agent for the user agent.
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
|
-
AVAILABLE_MODELS = ["granite-3-8b-instruct"]
|
|
16
|
+
AVAILABLE_MODELS = ["granite-3-8b-instruct", "granite-3-2-8b-instruct"]
|
|
17
17
|
|
|
18
18
|
def __init__(
|
|
19
19
|
self,
|
|
@@ -27,24 +27,14 @@ class IBMGranite(Provider):
|
|
|
27
27
|
proxies: dict = {},
|
|
28
28
|
history_offset: int = 10250,
|
|
29
29
|
act: str = None,
|
|
30
|
-
model: str = "granite-3-8b-instruct",
|
|
30
|
+
model: str = "granite-3-2-8b-instruct",
|
|
31
31
|
system_prompt: str = "You are a helpful AI assistant.",
|
|
32
|
-
|
|
32
|
+
thinking: bool = False,
|
|
33
33
|
):
|
|
34
|
-
"""Initializes the
|
|
34
|
+
"""Initializes the IBMGranite API client using Lit agent for the user agent."""
|
|
35
35
|
if model not in self.AVAILABLE_MODELS:
|
|
36
36
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
37
37
|
|
|
38
|
-
# Setup logging if enabled
|
|
39
|
-
self.logger = Logger(
|
|
40
|
-
name="IBMGranite",
|
|
41
|
-
format=LogFormat.MODERN_EMOJI,
|
|
42
|
-
|
|
43
|
-
) if logging else None
|
|
44
|
-
|
|
45
|
-
if self.logger:
|
|
46
|
-
self.logger.info(f"Initializing IBMGranite with model: {model}")
|
|
47
|
-
|
|
48
38
|
self.session = requests.Session()
|
|
49
39
|
self.is_conversation = is_conversation
|
|
50
40
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -54,6 +44,7 @@ class IBMGranite(Provider):
|
|
|
54
44
|
self.last_response = {}
|
|
55
45
|
self.model = model
|
|
56
46
|
self.system_prompt = system_prompt
|
|
47
|
+
self.thinking = thinking
|
|
57
48
|
|
|
58
49
|
# Use Lit agent to generate a random User-Agent
|
|
59
50
|
self.headers = {
|
|
@@ -101,20 +92,13 @@ class IBMGranite(Provider):
|
|
|
101
92
|
Returns:
|
|
102
93
|
Union[Dict, Generator[Dict, None, None]]: Response generated
|
|
103
94
|
"""
|
|
104
|
-
if self.logger:
|
|
105
|
-
self.logger.debug(f"Ask method initiated - Prompt (first 50 chars): {prompt[:50]}")
|
|
106
|
-
|
|
107
95
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
108
96
|
if optimizer:
|
|
109
97
|
if optimizer in self.__available_optimizers:
|
|
110
98
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
111
99
|
conversation_prompt if conversationally else prompt
|
|
112
100
|
)
|
|
113
|
-
if self.logger:
|
|
114
|
-
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
115
101
|
else:
|
|
116
|
-
if self.logger:
|
|
117
|
-
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
118
102
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
119
103
|
|
|
120
104
|
payload = {
|
|
@@ -123,20 +107,17 @@ class IBMGranite(Provider):
|
|
|
123
107
|
{"role": "system", "content": self.system_prompt},
|
|
124
108
|
{"role": "user", "content": conversation_prompt},
|
|
125
109
|
],
|
|
126
|
-
"stream": stream
|
|
110
|
+
"stream": stream,
|
|
111
|
+
"thinking": self.thinking,
|
|
127
112
|
}
|
|
128
113
|
|
|
129
114
|
def for_stream():
|
|
130
115
|
try:
|
|
131
|
-
if self.logger:
|
|
132
|
-
self.logger.debug(f"Sending POST request to {self.api_endpoint} with payload: {payload}")
|
|
133
116
|
response = self.session.post(
|
|
134
117
|
self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
|
|
135
118
|
)
|
|
136
119
|
if not response.ok:
|
|
137
120
|
msg = f"Request failed with status code {response.status_code}: {response.text}"
|
|
138
|
-
if self.logger:
|
|
139
|
-
self.logger.error(msg)
|
|
140
121
|
raise exceptions.FailedToGenerateResponseError(msg)
|
|
141
122
|
|
|
142
123
|
streaming_text = ""
|
|
@@ -149,28 +130,17 @@ class IBMGranite(Provider):
|
|
|
149
130
|
streaming_text += content
|
|
150
131
|
yield content if raw else dict(text=content)
|
|
151
132
|
else:
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
except json.JSONDecodeError
|
|
155
|
-
if self.logger:
|
|
156
|
-
self.logger.error(f"JSON decode error: {e}")
|
|
133
|
+
# Skip unrecognized lines
|
|
134
|
+
pass
|
|
135
|
+
except json.JSONDecodeError:
|
|
157
136
|
continue
|
|
158
137
|
self.last_response.update(dict(text=streaming_text))
|
|
159
138
|
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
160
|
-
if self.logger:
|
|
161
|
-
self.logger.info("Stream processing completed.")
|
|
162
|
-
|
|
163
139
|
except requests.exceptions.RequestException as e:
|
|
164
|
-
if self.logger:
|
|
165
|
-
self.logger.error(f"Request exception: {e}")
|
|
166
140
|
raise exceptions.ProviderConnectionError(f"Request failed: {e}")
|
|
167
141
|
except json.JSONDecodeError as e:
|
|
168
|
-
if self.logger:
|
|
169
|
-
self.logger.error(f"Invalid JSON received: {e}")
|
|
170
142
|
raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}")
|
|
171
143
|
except Exception as e:
|
|
172
|
-
if self.logger:
|
|
173
|
-
self.logger.error(f"Unexpected error: {e}")
|
|
174
144
|
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
|
|
175
145
|
|
|
176
146
|
def for_non_stream():
|
|
@@ -189,20 +159,14 @@ class IBMGranite(Provider):
|
|
|
189
159
|
conversationally: bool = False,
|
|
190
160
|
) -> str | Generator[str, None, None]:
|
|
191
161
|
"""Generate response as a string using chat method"""
|
|
192
|
-
if self.logger:
|
|
193
|
-
self.logger.debug(f"Chat method initiated - Prompt (first 50 chars): {prompt[:50]}")
|
|
194
|
-
|
|
195
162
|
def for_stream():
|
|
196
163
|
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
197
164
|
yield self.get_message(response)
|
|
198
165
|
|
|
199
166
|
def for_non_stream():
|
|
200
|
-
|
|
167
|
+
return self.get_message(
|
|
201
168
|
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
202
169
|
)
|
|
203
|
-
if self.logger:
|
|
204
|
-
self.logger.info("Chat method completed.")
|
|
205
|
-
return result
|
|
206
170
|
|
|
207
171
|
return for_stream() if stream else for_non_stream()
|
|
208
172
|
|
|
@@ -213,11 +177,11 @@ class IBMGranite(Provider):
|
|
|
213
177
|
|
|
214
178
|
if __name__ == "__main__":
|
|
215
179
|
from rich import print
|
|
216
|
-
# Example usage: Initialize
|
|
180
|
+
# Example usage: Initialize without logging.
|
|
217
181
|
ai = IBMGranite(
|
|
218
|
-
api_key="",
|
|
219
|
-
|
|
182
|
+
api_key="", # press f12 to see the API key
|
|
183
|
+
thinking=True,
|
|
220
184
|
)
|
|
221
185
|
response = ai.chat("write a poem about AI", stream=True)
|
|
222
186
|
for chunk in response:
|
|
223
|
-
print(chunk, end="", flush=True)
|
|
187
|
+
print(chunk, end="", flush=True)
|