webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
webscout/Provider/elmo.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
from typing import Union, Any, Dict, Generator
|
|
5
|
+
from webscout import exceptions
|
|
2
6
|
from webscout.AIutel import Optimizers
|
|
3
7
|
from webscout.AIutel import Conversation
|
|
4
8
|
from webscout.AIutel import AwesomePrompts
|
|
5
9
|
from webscout.AIbase import Provider
|
|
6
10
|
from webscout.litagent import LitAgent
|
|
7
11
|
|
|
12
|
+
|
|
8
13
|
class Elmo(Provider):
|
|
9
14
|
"""
|
|
10
15
|
A class to interact with the Elmo.chat API.
|
|
@@ -13,7 +18,7 @@ class Elmo(Provider):
|
|
|
13
18
|
def __init__(
|
|
14
19
|
self,
|
|
15
20
|
is_conversation: bool = True,
|
|
16
|
-
max_tokens: int = 600,
|
|
21
|
+
max_tokens: int = 600, # Note: max_tokens is not used by this API
|
|
17
22
|
timeout: int = 30,
|
|
18
23
|
intro: str = None,
|
|
19
24
|
filepath: str = None,
|
|
@@ -22,7 +27,6 @@ class Elmo(Provider):
|
|
|
22
27
|
history_offset: int = 10250,
|
|
23
28
|
act: str = None,
|
|
24
29
|
system_prompt: str = "You are a helpful AI assistant. Provide clear, concise, and well-structured information. Organize your responses into paragraphs for better readability.",
|
|
25
|
-
|
|
26
30
|
) -> None:
|
|
27
31
|
"""Instantiates Elmo
|
|
28
32
|
|
|
@@ -37,9 +41,9 @@ class Elmo(Provider):
|
|
|
37
41
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
38
42
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
39
43
|
system_prompt (str, optional): System prompt for Elmo. Defaults to the provided string.
|
|
40
|
-
web_search (bool, optional): Enables web search mode when True. Defaults to False.
|
|
41
44
|
"""
|
|
42
|
-
|
|
45
|
+
# Initialize curl_cffi Session
|
|
46
|
+
self.session = Session()
|
|
43
47
|
self.is_conversation = is_conversation
|
|
44
48
|
self.max_tokens_to_sample = max_tokens
|
|
45
49
|
self.api_endpoint = "https://www.elmo.chat/api/v1/prompt"
|
|
@@ -49,20 +53,14 @@ class Elmo(Provider):
|
|
|
49
53
|
self.system_prompt = system_prompt
|
|
50
54
|
self.headers = {
|
|
51
55
|
"accept": "*/*",
|
|
52
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
53
56
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
54
|
-
"content-length": "763",
|
|
55
57
|
"content-type": "text/plain;charset=UTF-8",
|
|
56
58
|
"dnt": "1",
|
|
57
59
|
"origin": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm",
|
|
58
60
|
"priority": "u=1, i",
|
|
59
|
-
"sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
60
|
-
"sec-ch-ua-mobile": "?0",
|
|
61
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
62
61
|
"sec-fetch-dest": "empty",
|
|
63
62
|
"sec-fetch-mode": "cors",
|
|
64
63
|
"sec-fetch-site": "cross-site",
|
|
65
|
-
"user-agent": LitAgent().random(),
|
|
66
64
|
}
|
|
67
65
|
|
|
68
66
|
self.__available_optimizers = (
|
|
@@ -70,7 +68,10 @@ class Elmo(Provider):
|
|
|
70
68
|
for method in dir(Optimizers)
|
|
71
69
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
72
70
|
)
|
|
71
|
+
# Update curl_cffi session headers and proxies
|
|
73
72
|
self.session.headers.update(self.headers)
|
|
73
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
74
|
+
|
|
74
75
|
Conversation.intro = (
|
|
75
76
|
AwesomePrompts().get_act(
|
|
76
77
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -82,16 +83,15 @@ class Elmo(Provider):
|
|
|
82
83
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
83
84
|
)
|
|
84
85
|
self.conversation.history_offset = history_offset
|
|
85
|
-
self.session.proxies = proxies
|
|
86
86
|
|
|
87
87
|
def ask(
|
|
88
88
|
self,
|
|
89
89
|
prompt: str,
|
|
90
|
-
stream: bool = False,
|
|
90
|
+
stream: bool = False, # API supports streaming
|
|
91
91
|
raw: bool = False,
|
|
92
92
|
optimizer: str = None,
|
|
93
93
|
conversationally: bool = False,
|
|
94
|
-
) ->
|
|
94
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]: # Corrected return type hint
|
|
95
95
|
"""Chat with AI
|
|
96
96
|
|
|
97
97
|
Args:
|
|
@@ -106,7 +106,7 @@ class Elmo(Provider):
|
|
|
106
106
|
{
|
|
107
107
|
"text" : "How may I assist you today?"
|
|
108
108
|
}
|
|
109
|
-
```
|
|
109
|
+
```json
|
|
110
110
|
"""
|
|
111
111
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
112
112
|
if optimizer:
|
|
@@ -119,7 +119,6 @@ class Elmo(Provider):
|
|
|
119
119
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
120
120
|
)
|
|
121
121
|
|
|
122
|
-
|
|
123
122
|
payload = {
|
|
124
123
|
"metadata": {
|
|
125
124
|
"system": {"language": "en-US"},
|
|
@@ -145,36 +144,75 @@ class Elmo(Provider):
|
|
|
145
144
|
}
|
|
146
145
|
|
|
147
146
|
def for_stream():
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
147
|
+
full_response = "" # Initialize outside try block
|
|
148
|
+
try:
|
|
149
|
+
# Use curl_cffi session post with impersonate
|
|
150
|
+
# Note: The API expects 'text/plain' but we send JSON.
|
|
151
|
+
# If this fails, try sending json.dumps(payload) as data with 'Content-Type': 'application/json'
|
|
152
|
+
response = self.session.post(
|
|
153
|
+
self.api_endpoint,
|
|
154
|
+
# headers are set on the session, but content-type might need override if sending JSON
|
|
155
|
+
json=payload, # Sending as JSON
|
|
156
|
+
stream=True,
|
|
157
|
+
timeout=self.timeout,
|
|
158
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
158
159
|
)
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
160
|
+
response.raise_for_status() # Check for HTTP errors
|
|
161
|
+
|
|
162
|
+
# Iterate over bytes and decode manually
|
|
163
|
+
for line_bytes in response.iter_lines():
|
|
164
|
+
if line_bytes:
|
|
165
|
+
try:
|
|
166
|
+
line = line_bytes.decode('utf-8')
|
|
167
|
+
if line.startswith('0:'):
|
|
168
|
+
# Extract content after '0:"' and before the closing '"'
|
|
169
|
+
match = line.split(':"', 1)
|
|
170
|
+
if len(match) > 1:
|
|
171
|
+
chunk = match[1]
|
|
172
|
+
if chunk.endswith('"'):
|
|
173
|
+
chunk = chunk[:-1] # Remove trailing quote
|
|
174
|
+
|
|
175
|
+
# Handle potential escape sequences like \\n
|
|
176
|
+
formatted_output = chunk.encode().decode('unicode_escape')
|
|
177
|
+
|
|
178
|
+
if formatted_output: # Ensure content is not None or empty
|
|
179
|
+
full_response += formatted_output
|
|
180
|
+
resp = dict(text=formatted_output)
|
|
181
|
+
# Yield dict or raw string chunk
|
|
182
|
+
yield resp if not raw else formatted_output
|
|
183
|
+
except (UnicodeDecodeError, IndexError):
|
|
184
|
+
continue # Ignore lines that cannot be decoded or parsed
|
|
185
|
+
|
|
186
|
+
# Update history after stream finishes
|
|
187
|
+
self.last_response = dict(text=full_response)
|
|
188
|
+
self.conversation.update_chat_history(
|
|
189
|
+
prompt, full_response
|
|
190
|
+
)
|
|
191
|
+
except CurlError as e: # Catch CurlError
|
|
192
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
193
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
194
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
195
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
|
|
173
196
|
|
|
174
197
|
def for_non_stream():
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
198
|
+
# Aggregate the stream using the updated for_stream logic
|
|
199
|
+
collected_text = ""
|
|
200
|
+
try:
|
|
201
|
+
# Ensure raw=False so for_stream yields dicts
|
|
202
|
+
for chunk_data in for_stream():
|
|
203
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
204
|
+
collected_text += chunk_data["text"]
|
|
205
|
+
# Handle raw string case if raw=True was passed
|
|
206
|
+
elif raw and isinstance(chunk_data, str):
|
|
207
|
+
collected_text += chunk_data
|
|
208
|
+
except Exception as e:
|
|
209
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
210
|
+
if not collected_text:
|
|
211
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
212
|
+
|
|
213
|
+
# last_response and history are updated within for_stream
|
|
214
|
+
# Return the final aggregated response dict or raw string
|
|
215
|
+
return collected_text if raw else self.last_response
|
|
178
216
|
|
|
179
217
|
return for_stream() if stream else for_non_stream()
|
|
180
218
|
|
|
@@ -184,7 +222,7 @@ class Elmo(Provider):
|
|
|
184
222
|
stream: bool = False,
|
|
185
223
|
optimizer: str = None,
|
|
186
224
|
conversationally: bool = False,
|
|
187
|
-
) -> str:
|
|
225
|
+
) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
|
|
188
226
|
"""Generate response `str`
|
|
189
227
|
Args:
|
|
190
228
|
prompt (str): Prompt to be send.
|
|
@@ -195,23 +233,27 @@ class Elmo(Provider):
|
|
|
195
233
|
str: Response generated
|
|
196
234
|
"""
|
|
197
235
|
|
|
198
|
-
def
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
236
|
+
def for_stream_chat(): # Renamed inner function
|
|
237
|
+
# ask() yields dicts or strings when streaming
|
|
238
|
+
gen = self.ask(
|
|
239
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
240
|
+
optimizer=optimizer, conversationally=conversationally
|
|
241
|
+
)
|
|
242
|
+
for response_dict in gen:
|
|
243
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
203
244
|
|
|
204
|
-
def
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
245
|
+
def for_non_stream_chat(): # Renamed inner function
|
|
246
|
+
# ask() returns dict or str when not streaming
|
|
247
|
+
response_data = self.ask(
|
|
248
|
+
prompt,
|
|
249
|
+
stream=False,
|
|
250
|
+
raw=False, # Ensure ask returns dict
|
|
251
|
+
optimizer=optimizer,
|
|
252
|
+
conversationally=conversationally,
|
|
212
253
|
)
|
|
254
|
+
return self.get_message(response_data) # get_message expects dict
|
|
213
255
|
|
|
214
|
-
return
|
|
256
|
+
return for_stream_chat() if stream else for_non_stream_chat() # Use renamed functions
|
|
215
257
|
|
|
216
258
|
def get_message(self, response: dict) -> str:
|
|
217
259
|
"""Retrieves message only from response
|
|
@@ -227,6 +269,7 @@ class Elmo(Provider):
|
|
|
227
269
|
|
|
228
270
|
|
|
229
271
|
if __name__ == "__main__":
|
|
272
|
+
# Ensure curl_cffi is installed
|
|
230
273
|
from rich import print
|
|
231
274
|
ai = Elmo()
|
|
232
275
|
response = ai.chat("write a poem about AI", stream=True)
|
webscout/Provider/granite.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
from typing import Union, Any, Dict, Generator
|
|
4
5
|
|
|
@@ -19,7 +20,7 @@ class IBMGranite(Provider):
|
|
|
19
20
|
self,
|
|
20
21
|
api_key: str,
|
|
21
22
|
is_conversation: bool = True,
|
|
22
|
-
max_tokens: int = 600,
|
|
23
|
+
max_tokens: int = 600, # Note: max_tokens is not used by this API
|
|
23
24
|
timeout: int = 30,
|
|
24
25
|
intro: str = None,
|
|
25
26
|
filepath: str = None,
|
|
@@ -35,7 +36,8 @@ class IBMGranite(Provider):
|
|
|
35
36
|
if model not in self.AVAILABLE_MODELS:
|
|
36
37
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
37
38
|
|
|
38
|
-
|
|
39
|
+
# Initialize curl_cffi Session
|
|
40
|
+
self.session = Session()
|
|
39
41
|
self.is_conversation = is_conversation
|
|
40
42
|
self.max_tokens_to_sample = max_tokens
|
|
41
43
|
self.api_endpoint = "https://d18n68ssusgr7r.cloudfront.net/v1/chat/completions"
|
|
@@ -46,18 +48,19 @@ class IBMGranite(Provider):
|
|
|
46
48
|
self.system_prompt = system_prompt
|
|
47
49
|
self.thinking = thinking
|
|
48
50
|
|
|
49
|
-
# Use Lit agent
|
|
51
|
+
# Use Lit agent (keep if needed for other headers or logic)
|
|
50
52
|
self.headers = {
|
|
51
|
-
"authority": "d18n68ssusgr7r.cloudfront.net",
|
|
52
|
-
"accept": "application/json,application/jsonl",
|
|
53
|
+
"authority": "d18n68ssusgr7r.cloudfront.net", # Keep authority
|
|
54
|
+
"accept": "application/json,application/jsonl", # Keep accept
|
|
53
55
|
"content-type": "application/json",
|
|
54
|
-
"origin": "https://www.ibm.com",
|
|
55
|
-
"referer": "https://www.ibm.com/",
|
|
56
|
-
"user-agent": Lit().random(),
|
|
56
|
+
"origin": "https://www.ibm.com", # Keep origin
|
|
57
|
+
"referer": "https://www.ibm.com/", # Keep referer
|
|
57
58
|
}
|
|
58
59
|
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
60
|
+
|
|
61
|
+
# Update curl_cffi session headers and proxies
|
|
59
62
|
self.session.headers.update(self.headers)
|
|
60
|
-
self.session.proxies = proxies
|
|
63
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
61
64
|
|
|
62
65
|
self.__available_optimizers = (
|
|
63
66
|
method for method in dir(Optimizers)
|
|
@@ -77,7 +80,7 @@ class IBMGranite(Provider):
|
|
|
77
80
|
def ask(
|
|
78
81
|
self,
|
|
79
82
|
prompt: str,
|
|
80
|
-
stream: bool = False,
|
|
83
|
+
stream: bool = False, # API supports streaming
|
|
81
84
|
raw: bool = False,
|
|
82
85
|
optimizer: str = None,
|
|
83
86
|
conversationally: bool = False,
|
|
@@ -107,48 +110,81 @@ class IBMGranite(Provider):
|
|
|
107
110
|
{"role": "system", "content": self.system_prompt},
|
|
108
111
|
{"role": "user", "content": conversation_prompt},
|
|
109
112
|
],
|
|
110
|
-
"stream": stream
|
|
111
|
-
"thinking": self.thinking,
|
|
113
|
+
"stream": True # API seems to require stream=True based on response format
|
|
112
114
|
}
|
|
113
115
|
|
|
114
116
|
def for_stream():
|
|
117
|
+
streaming_text = "" # Initialize outside try block
|
|
115
118
|
try:
|
|
119
|
+
# Use curl_cffi session post with impersonate
|
|
116
120
|
response = self.session.post(
|
|
117
|
-
self.api_endpoint,
|
|
121
|
+
self.api_endpoint,
|
|
122
|
+
# headers are set on the session
|
|
123
|
+
json=payload,
|
|
124
|
+
stream=True,
|
|
125
|
+
timeout=self.timeout,
|
|
126
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
118
127
|
)
|
|
119
|
-
|
|
120
|
-
msg = f"Request failed with status code {response.status_code}: {response.text}"
|
|
121
|
-
raise exceptions.FailedToGenerateResponseError(msg)
|
|
128
|
+
response.raise_for_status() # Check for HTTP errors
|
|
122
129
|
|
|
123
|
-
|
|
124
|
-
for
|
|
125
|
-
if
|
|
130
|
+
# Iterate over bytes and decode manually
|
|
131
|
+
for line_bytes in response.iter_lines():
|
|
132
|
+
if line_bytes:
|
|
126
133
|
try:
|
|
134
|
+
line = line_bytes.decode('utf-8')
|
|
127
135
|
data = json.loads(line)
|
|
128
|
-
|
|
136
|
+
# Check the specific format [3, "text_chunk"]
|
|
137
|
+
if isinstance(data, list) and len(data) == 2 and data[0] == 3 and isinstance(data[1], str):
|
|
129
138
|
content = data[1]
|
|
130
|
-
|
|
131
|
-
|
|
139
|
+
if content: # Ensure content is not None or empty
|
|
140
|
+
streaming_text += content
|
|
141
|
+
resp = dict(text=content)
|
|
142
|
+
# Yield dict or raw string chunk
|
|
143
|
+
yield resp if not raw else content
|
|
132
144
|
else:
|
|
133
|
-
# Skip unrecognized lines
|
|
145
|
+
# Skip unrecognized lines/formats
|
|
134
146
|
pass
|
|
135
|
-
except json.JSONDecodeError:
|
|
136
|
-
continue
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
147
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
148
|
+
continue # Ignore lines that are not valid JSON or cannot be decoded
|
|
149
|
+
|
|
150
|
+
# Update history after stream finishes
|
|
151
|
+
self.last_response = dict(text=streaming_text)
|
|
152
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
153
|
+
|
|
154
|
+
except CurlError as e: # Catch CurlError
|
|
155
|
+
raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
|
|
156
|
+
except json.JSONDecodeError as e: # Keep specific JSON error handling
|
|
157
|
+
raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}") from e
|
|
158
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
159
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
160
|
+
# Use specific exception type if available, otherwise generic
|
|
161
|
+
ex_type = exceptions.FailedToGenerateResponseError if not isinstance(e, exceptions.ProviderConnectionError) else type(e)
|
|
162
|
+
raise ex_type(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
|
|
163
|
+
|
|
145
164
|
|
|
146
165
|
def for_non_stream():
|
|
147
|
-
#
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
166
|
+
# Aggregate the stream using the updated for_stream logic
|
|
167
|
+
full_text = ""
|
|
168
|
+
try:
|
|
169
|
+
# Ensure raw=False so for_stream yields dicts
|
|
170
|
+
for chunk_data in for_stream():
|
|
171
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
172
|
+
full_text += chunk_data["text"]
|
|
173
|
+
# Handle raw string case if raw=True was passed
|
|
174
|
+
elif raw and isinstance(chunk_data, str):
|
|
175
|
+
full_text += chunk_data
|
|
176
|
+
except Exception as e:
|
|
177
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
178
|
+
if not full_text:
|
|
179
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
180
|
+
|
|
181
|
+
# last_response and history are updated within for_stream
|
|
182
|
+
# Return the final aggregated response dict or raw string
|
|
183
|
+
return full_text if raw else self.last_response
|
|
151
184
|
|
|
185
|
+
|
|
186
|
+
# Since the API endpoint suggests streaming, always call the stream generator.
|
|
187
|
+
# The non-stream wrapper will handle aggregation if stream=False.
|
|
152
188
|
return for_stream() if stream else for_non_stream()
|
|
153
189
|
|
|
154
190
|
def chat(
|
|
@@ -159,16 +195,24 @@ class IBMGranite(Provider):
|
|
|
159
195
|
conversationally: bool = False,
|
|
160
196
|
) -> Union[str, Generator[str, None, None]]:
|
|
161
197
|
"""Generate response as a string using chat method"""
|
|
162
|
-
def
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
198
|
+
def for_stream_chat():
|
|
199
|
+
# ask() yields dicts or strings when streaming
|
|
200
|
+
gen = self.ask(
|
|
201
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
202
|
+
optimizer=optimizer, conversationally=conversationally
|
|
203
|
+
)
|
|
204
|
+
for response_dict in gen:
|
|
205
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
206
|
+
|
|
207
|
+
def for_non_stream_chat():
|
|
208
|
+
# ask() returns dict or str when not streaming
|
|
209
|
+
response_data = self.ask(
|
|
210
|
+
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
211
|
+
optimizer=optimizer, conversationally=conversationally
|
|
169
212
|
)
|
|
213
|
+
return self.get_message(response_data) # get_message expects dict
|
|
170
214
|
|
|
171
|
-
return
|
|
215
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
172
216
|
|
|
173
217
|
def get_message(self, response: dict) -> str:
|
|
174
218
|
"""Retrieves message only from response"""
|
|
@@ -176,6 +220,7 @@ class IBMGranite(Provider):
|
|
|
176
220
|
return response["text"]
|
|
177
221
|
|
|
178
222
|
if __name__ == "__main__":
|
|
223
|
+
# Ensure curl_cffi is installed
|
|
179
224
|
from rich import print
|
|
180
225
|
# Example usage: Initialize without logging.
|
|
181
226
|
ai = IBMGranite(
|