webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (114) hide show
  1. webscout/AIutel.py +180 -78
  2. webscout/Bing_search.py +417 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/ExaChat.py +84 -58
  8. webscout/Provider/GeminiProxy.py +140 -0
  9. webscout/Provider/HeckAI.py +85 -80
  10. webscout/Provider/Jadve.py +56 -50
  11. webscout/Provider/MCPCore.py +78 -75
  12. webscout/Provider/MiniMax.py +207 -0
  13. webscout/Provider/Nemotron.py +41 -13
  14. webscout/Provider/Netwrck.py +34 -51
  15. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
  16. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  17. webscout/Provider/OPENAI/MiniMax.py +298 -0
  18. webscout/Provider/OPENAI/README.md +32 -29
  19. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  20. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  21. webscout/Provider/OPENAI/__init__.py +17 -1
  22. webscout/Provider/OPENAI/autoproxy.py +1067 -39
  23. webscout/Provider/OPENAI/base.py +17 -76
  24. webscout/Provider/OPENAI/deepinfra.py +42 -108
  25. webscout/Provider/OPENAI/e2b.py +0 -1
  26. webscout/Provider/OPENAI/flowith.py +179 -166
  27. webscout/Provider/OPENAI/friendli.py +233 -0
  28. webscout/Provider/OPENAI/mcpcore.py +109 -70
  29. webscout/Provider/OPENAI/monochat.py +329 -0
  30. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  31. webscout/Provider/OPENAI/scirachat.py +59 -51
  32. webscout/Provider/OPENAI/toolbaz.py +3 -9
  33. webscout/Provider/OPENAI/typegpt.py +1 -1
  34. webscout/Provider/OPENAI/utils.py +19 -42
  35. webscout/Provider/OPENAI/x0gpt.py +14 -2
  36. webscout/Provider/OPENAI/xenai.py +514 -0
  37. webscout/Provider/OPENAI/yep.py +8 -2
  38. webscout/Provider/OpenGPT.py +54 -32
  39. webscout/Provider/PI.py +58 -84
  40. webscout/Provider/StandardInput.py +32 -13
  41. webscout/Provider/TTI/README.md +9 -9
  42. webscout/Provider/TTI/__init__.py +3 -1
  43. webscout/Provider/TTI/aiarta.py +92 -78
  44. webscout/Provider/TTI/bing.py +231 -0
  45. webscout/Provider/TTI/infip.py +212 -0
  46. webscout/Provider/TTI/monochat.py +220 -0
  47. webscout/Provider/TTS/speechma.py +45 -39
  48. webscout/Provider/TeachAnything.py +11 -3
  49. webscout/Provider/TextPollinationsAI.py +78 -70
  50. webscout/Provider/TogetherAI.py +350 -0
  51. webscout/Provider/Venice.py +37 -46
  52. webscout/Provider/VercelAI.py +27 -24
  53. webscout/Provider/WiseCat.py +35 -35
  54. webscout/Provider/WrDoChat.py +22 -26
  55. webscout/Provider/WritingMate.py +26 -22
  56. webscout/Provider/XenAI.py +324 -0
  57. webscout/Provider/__init__.py +10 -5
  58. webscout/Provider/deepseek_assistant.py +378 -0
  59. webscout/Provider/granite.py +48 -57
  60. webscout/Provider/koala.py +51 -39
  61. webscout/Provider/learnfastai.py +49 -64
  62. webscout/Provider/llmchat.py +79 -93
  63. webscout/Provider/llmchatco.py +63 -78
  64. webscout/Provider/multichat.py +51 -40
  65. webscout/Provider/oivscode.py +1 -1
  66. webscout/Provider/scira_chat.py +159 -96
  67. webscout/Provider/scnet.py +13 -13
  68. webscout/Provider/searchchat.py +13 -13
  69. webscout/Provider/sonus.py +12 -11
  70. webscout/Provider/toolbaz.py +25 -8
  71. webscout/Provider/turboseek.py +41 -42
  72. webscout/Provider/typefully.py +27 -12
  73. webscout/Provider/typegpt.py +41 -46
  74. webscout/Provider/uncovr.py +55 -90
  75. webscout/Provider/x0gpt.py +33 -17
  76. webscout/Provider/yep.py +79 -96
  77. webscout/auth/__init__.py +55 -0
  78. webscout/auth/api_key_manager.py +189 -0
  79. webscout/auth/auth_system.py +100 -0
  80. webscout/auth/config.py +76 -0
  81. webscout/auth/database.py +400 -0
  82. webscout/auth/exceptions.py +67 -0
  83. webscout/auth/middleware.py +248 -0
  84. webscout/auth/models.py +130 -0
  85. webscout/auth/providers.py +279 -0
  86. webscout/auth/rate_limiter.py +254 -0
  87. webscout/auth/request_models.py +127 -0
  88. webscout/auth/request_processing.py +226 -0
  89. webscout/auth/routes.py +550 -0
  90. webscout/auth/schemas.py +103 -0
  91. webscout/auth/server.py +367 -0
  92. webscout/client.py +121 -70
  93. webscout/litagent/Readme.md +68 -55
  94. webscout/litagent/agent.py +99 -9
  95. webscout/scout/core/scout.py +104 -26
  96. webscout/scout/element.py +139 -18
  97. webscout/swiftcli/core/cli.py +14 -3
  98. webscout/swiftcli/decorators/output.py +59 -9
  99. webscout/update_checker.py +31 -49
  100. webscout/version.py +1 -1
  101. webscout/webscout_search.py +4 -12
  102. webscout/webscout_search_async.py +3 -10
  103. webscout/yep_search.py +2 -11
  104. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
  105. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
  106. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
  107. webscout/Provider/HF_space/__init__.py +0 -0
  108. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  109. webscout/Provider/OPENAI/api.py +0 -1320
  110. webscout/Provider/TTI/fastflux.py +0 -233
  111. webscout/Provider/Writecream.py +0 -246
  112. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  113. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  114. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -68,7 +68,6 @@ from .ExaAI import ExaAI
68
68
  from .OpenGPT import OpenGPT
69
69
  from .scira_chat import *
70
70
  from .StandardInput import *
71
- from .Writecream import Writecream
72
71
  from .toolbaz import Toolbaz
73
72
  from .scnet import SCNet
74
73
  from .WritingMate import WritingMate
@@ -83,13 +82,20 @@ from .Flowith import Flowith
83
82
  from .samurai import samurai
84
83
  from .lmarena import lmarena
85
84
  from .oivscode import oivscode
86
-
87
- # Import STT providers
88
- from .STT import *
85
+ from .XenAI import XenAI
86
+ from .deepseek_assistant import DeepSeekAssistant
87
+ from .GeminiProxy import GeminiProxy
88
+ from .TogetherAI import TogetherAI
89
+ from .MiniMax import MiniMax
89
90
  __all__ = [
90
91
  'SCNet',
92
+ 'MiniMax',
93
+ 'GeminiProxy',
94
+ 'TogetherAI',
91
95
  'oivscode',
96
+ 'DeepSeekAssistant',
92
97
  'lmarena',
98
+ 'XenAI',
93
99
  'NEMOTRON',
94
100
  'Flowith',
95
101
  'samurai',
@@ -168,7 +174,6 @@ __all__ = [
168
174
  'AskSteve',
169
175
  'Aitopia',
170
176
  'SearchChatAI',
171
- 'Writecream',
172
177
  'Toolbaz',
173
178
  'MCPCore',
174
179
  'TypliAI',
@@ -0,0 +1,378 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import re
5
+ from typing import Any, Dict, Optional, Generator, Union, List
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+
15
+ class DeepSeekAssistant(Provider):
16
+ """
17
+ A class to interact with the DeepSeek Assistant API.
18
+
19
+ This provider interfaces with the deepseek-assistant.com API to provide
20
+ AI chat completions using the V3 model.
21
+
22
+ Attributes:
23
+ AVAILABLE_MODELS (list): List of available models for the provider.
24
+
25
+ Examples:
26
+ >>> from webscout.Provider.deepseek_assistant import DeepSeekAssistant
27
+ >>> ai = DeepSeekAssistant()
28
+ >>> response = ai.chat("What's the weather today?")
29
+ >>> print(response)
30
+ 'I can help you with weather information...'
31
+ """
32
+
33
+ AVAILABLE_MODELS = ["V3 model", "R1 model"]
34
+
35
+ @staticmethod
36
+ def _deepseek_assistant_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
37
+ """Extracts content from DeepSeek Assistant stream JSON objects."""
38
+ if isinstance(chunk, dict):
39
+ return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
40
+ return None
41
+
42
+ def __init__(
43
+ self,
44
+ is_conversation: bool = True,
45
+ max_tokens: int = 2049,
46
+ timeout: int = 30,
47
+ intro: str = None,
48
+ filepath: str = None,
49
+ update_file: bool = True,
50
+ proxies: dict = {},
51
+ history_offset: int = 10250,
52
+ act: str = None,
53
+ model: str = "V3 model",
54
+ system_prompt: str = "You are a helpful assistant.",
55
+ browser: str = "chrome"
56
+ ):
57
+ """
58
+ Initializes the DeepSeek Assistant API client.
59
+
60
+ Args:
61
+ is_conversation (bool): Whether the provider is in conversation mode.
62
+ max_tokens (int): Maximum number of tokens to sample.
63
+ timeout (int): Timeout for API requests.
64
+ intro (str): Introduction message for the conversation.
65
+ filepath (str): Filepath for storing conversation history.
66
+ update_file (bool): Whether to update the conversation history file.
67
+ proxies (dict): Proxies for the API requests.
68
+ history_offset (int): Offset for conversation history.
69
+ act (str): Act for the conversation.
70
+ model (str): The model to use for completions.
71
+ system_prompt (str): The system prompt to define the assistant's role.
72
+ browser (str): Browser type for fingerprinting.
73
+
74
+ Examples:
75
+ >>> ai = DeepSeekAssistant(model="V3 model")
76
+ >>> print(ai.model)
77
+ 'V3 model'
78
+ """
79
+ if model not in self.AVAILABLE_MODELS:
80
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
81
+
82
+ self.url = "https://deepseek-assistant.com/api/search-stream-deep-chat-testing.php"
83
+
84
+ # Initialize LitAgent for user agent generation
85
+ self.agent = LitAgent()
86
+ self.fingerprint = self.agent.generate_fingerprint(browser)
87
+
88
+ # Headers based on the JavaScript code
89
+ self.headers = {
90
+ "accept": "*/*",
91
+ "accept-language": "id-ID,id;q=0.9",
92
+ "cache-control": "no-cache",
93
+ "content-type": "application/json",
94
+ "cookie": "click_id=OS3Hz0E1yKfu4YnZNwedESMEdKEgMTzL; organic_user_deepseek_assistant_ch=%7B%22pixel%22%3A%22OS3Hz0E1yKfu4YnZNwedESMEdKEgMTzL%22%2C%22cc%22%3A%22ID%22%2C%22channel%22%3A%22organic_flag%22%7D",
95
+ "origin": "https://deepseek-assistant.com",
96
+ **self.fingerprint
97
+
98
+ }
99
+
100
+ # Initialize curl_cffi Session
101
+ self.session = Session()
102
+ self.session.headers.update(self.headers)
103
+ self.session.proxies = proxies
104
+
105
+ self.system_prompt = system_prompt
106
+ self.is_conversation = is_conversation
107
+ self.max_tokens_to_sample = max_tokens
108
+ self.timeout = timeout
109
+ self.last_response = {}
110
+ self.model = model
111
+
112
+ self.__available_optimizers = (
113
+ method
114
+ for method in dir(Optimizers)
115
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
116
+ )
117
+
118
+ Conversation.intro = (
119
+ AwesomePrompts().get_act(
120
+ act, raise_not_found=True, default=None, case_insensitive=True
121
+ )
122
+ if act
123
+ else intro or Conversation.intro
124
+ )
125
+
126
+ self.conversation = Conversation(
127
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
128
+ )
129
+ self.conversation.history_offset = history_offset
130
+
131
+ def refresh_identity(self, browser: str = None):
132
+ """
133
+ Refreshes the browser identity fingerprint.
134
+
135
+ Args:
136
+ browser: Specific browser to use for the new fingerprint
137
+ """
138
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
139
+ self.fingerprint = self.agent.generate_fingerprint(browser)
140
+
141
+ # Update user-agent header with new fingerprint
142
+ self.headers.update({
143
+ "user-agent": self.fingerprint.get("user_agent", self.headers["user-agent"])
144
+ })
145
+
146
+ # Update session headers
147
+ self.session.headers.update(self.headers)
148
+
149
+ return self.fingerprint
150
+
151
+ def _parse_chat_response(self, input_text: str) -> str:
152
+ """
153
+ Parses the chat response from the API, similar to the JavaScript parseChatResponse method.
154
+
155
+ Args:
156
+ input_text (str): The raw response text from the API
157
+
158
+ Returns:
159
+ str: The parsed content from the response
160
+ """
161
+ lines = input_text.strip().split("\n")
162
+ result = ""
163
+
164
+ for line in lines:
165
+ trimmed_line = line.strip()
166
+ if trimmed_line.startswith("data: {") and trimmed_line.endswith("}"):
167
+ try:
168
+ # Extract JSON from the line
169
+ json_start = trimmed_line.find("{")
170
+ if json_start != -1:
171
+ json_str = trimmed_line[json_start:]
172
+ parsed_data = json.loads(json_str)
173
+
174
+ # Extract content from the parsed data
175
+ content = parsed_data.get("choices", [{}])[0].get("delta", {}).get("content")
176
+ if content is not None:
177
+ result += content
178
+ except (json.JSONDecodeError, KeyError, IndexError):
179
+ # Skip malformed JSON or missing keys
180
+ continue
181
+
182
+ return result.strip()
183
+
184
+ def ask(
185
+ self,
186
+ prompt: str,
187
+ stream: bool = False,
188
+ raw: bool = False,
189
+ optimizer: str = None,
190
+ conversationally: bool = False,
191
+ ) -> Union[Dict[str, Any], Generator]:
192
+ """
193
+ Sends a prompt to the DeepSeek Assistant API and returns the response.
194
+
195
+ Args:
196
+ prompt (str): The prompt to send to the API.
197
+ stream (bool): Whether to stream the response.
198
+ raw (bool): Whether to return the raw response.
199
+ optimizer (str): Optimizer to use for the prompt.
200
+ conversationally (bool): Whether to generate the prompt conversationally.
201
+
202
+ Returns:
203
+ Union[Dict[str, Any], Generator]: The API response.
204
+
205
+ Examples:
206
+ >>> ai = DeepSeekAssistant()
207
+ >>> response = ai.ask("Tell me a joke!")
208
+ >>> print(response)
209
+ {'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
210
+ """
211
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
212
+ if optimizer:
213
+ if optimizer in self.__available_optimizers:
214
+ conversation_prompt = getattr(Optimizers, optimizer)(
215
+ conversation_prompt if conversationally else prompt
216
+ )
217
+ else:
218
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
219
+
220
+ payload = {
221
+ "model": self.model,
222
+ "messages": [
223
+ {"role": "system", "content": self.system_prompt}, # Add system role
224
+ {"role": "user", "content": conversation_prompt}
225
+ ]
226
+ }
227
+
228
+ def for_stream():
229
+ streaming_text = ""
230
+ try:
231
+ response = self.session.post(
232
+ self.url,
233
+ data=json.dumps(payload),
234
+ stream=True,
235
+ timeout=self.timeout,
236
+ impersonate="chrome110"
237
+ )
238
+ response.raise_for_status()
239
+
240
+ # Use sanitize_stream to process the response
241
+ processed_stream = sanitize_stream(
242
+ data=response.iter_content(chunk_size=None),
243
+ intro_value="data:",
244
+ to_json=True,
245
+ skip_markers=["[DONE]"],
246
+ content_extractor=self._deepseek_assistant_extractor,
247
+ yield_raw_on_error=False
248
+ )
249
+
250
+ for content_chunk in processed_stream:
251
+ if content_chunk and isinstance(content_chunk, str):
252
+ streaming_text += content_chunk
253
+ resp = dict(text=content_chunk)
254
+ yield resp if not raw else content_chunk
255
+
256
+ except CurlError as e:
257
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
258
+ except Exception as e:
259
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
260
+ finally:
261
+ # Update history after stream finishes or fails
262
+ if streaming_text:
263
+ self.last_response = {"text": streaming_text}
264
+ self.conversation.update_chat_history(prompt, streaming_text)
265
+
266
+ def for_non_stream():
267
+ try:
268
+ response = self.session.post(
269
+ self.url,
270
+ data=json.dumps(payload),
271
+ timeout=self.timeout,
272
+ impersonate="chrome110"
273
+ )
274
+ response.raise_for_status()
275
+
276
+ # Parse the response using the custom parser
277
+ content = self._parse_chat_response(response.text)
278
+
279
+ self.last_response = {"text": content}
280
+ self.conversation.update_chat_history(prompt, content)
281
+ return self.last_response if not raw else content
282
+
283
+ except CurlError as e:
284
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
285
+ except Exception as e:
286
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
287
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
288
+
289
+ return for_stream() if stream else for_non_stream()
290
+
291
+ def chat(
292
+ self,
293
+ prompt: str,
294
+ stream: bool = False,
295
+ optimizer: str = None,
296
+ conversationally: bool = False,
297
+ ) -> Union[str, Generator[str, None, None]]:
298
+ """
299
+ Initiates a chat with the DeepSeek Assistant API using the provided prompt.
300
+
301
+ Args:
302
+ prompt (str): The prompt to send to the API.
303
+ stream (bool): Whether to stream the response.
304
+ optimizer (str): Optimizer to use for the prompt.
305
+ conversationally (bool): Whether to generate the prompt conversationally.
306
+
307
+ Returns:
308
+ Union[str, Generator[str, None, None]]: The chat response.
309
+
310
+ Examples:
311
+ >>> ai = DeepSeekAssistant()
312
+ >>> response = ai.chat("Tell me a joke")
313
+ >>> print(response)
314
+ 'Why did the scarecrow win an award? Because he was outstanding in his field!'
315
+ """
316
+ def for_stream_chat():
317
+ gen = self.ask(
318
+ prompt, stream=True, raw=False,
319
+ optimizer=optimizer, conversationally=conversationally
320
+ )
321
+ for response_dict in gen:
322
+ yield self.get_message(response_dict)
323
+
324
+ def for_non_stream_chat():
325
+ response_data = self.ask(
326
+ prompt, stream=False, raw=False,
327
+ optimizer=optimizer, conversationally=conversationally
328
+ )
329
+ return self.get_message(response_data)
330
+
331
+ return for_stream_chat() if stream else for_non_stream_chat()
332
+
333
+ def get_message(self, response: dict) -> str:
334
+ """
335
+ Extracts the message content from the API response.
336
+
337
+ Args:
338
+ response (dict): The API response.
339
+
340
+ Returns:
341
+ str: The message content.
342
+
343
+ Examples:
344
+ >>> ai = DeepSeekAssistant()
345
+ >>> response = ai.ask("Tell me a joke!")
346
+ >>> message = ai.get_message(response)
347
+ >>> print(message)
348
+ 'Why did the scarecrow win an award? Because he was outstanding in his field!'
349
+ """
350
+ assert isinstance(response, dict), "Response should be of dict data-type only"
351
+ return response["text"]
352
+
353
+
354
+ if __name__ == "__main__":
355
+ # Test the provider
356
+ print("-" * 80)
357
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
358
+ print("-" * 80)
359
+
360
+ for model in DeepSeekAssistant.AVAILABLE_MODELS:
361
+ try:
362
+ test_ai = DeepSeekAssistant(model=model, timeout=60)
363
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
364
+ response_text = ""
365
+ for chunk in response:
366
+ response_text += chunk
367
+
368
+ if response_text and len(response_text.strip()) > 0:
369
+ status = "✓"
370
+ # Clean and truncate response
371
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
372
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
373
+ else:
374
+ status = "✗"
375
+ display_text = "Empty or invalid response"
376
+ print(f"\r{model:<50} {status:<10} {display_text}")
377
+ except Exception as e:
378
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -83,10 +83,12 @@ class IBMGranite(Provider):
83
83
  self.conversation.history_offset = history_offset
84
84
 
85
85
  @staticmethod
86
- def _granite_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
87
- """Extracts content from IBM Granite stream JSON lists [3, "text"]."""
88
- if isinstance(chunk, list) and len(chunk) == 2 and chunk[0] == 3 and isinstance(chunk[1], str):
89
- return chunk[1]
86
+ def _granite_extractor(chunk: Union[str, Dict[str, Any], list]) -> Optional[str]:
87
+ """Extracts content from IBM Granite stream JSON lists [6, "text"] or [3, "text"]."""
88
+ # Accept both [3, str] and [6, str] as content chunks
89
+ if isinstance(chunk, list) and len(chunk) == 2 and isinstance(chunk[1], str):
90
+ if chunk[0] in (3, 6):
91
+ return chunk[1]
90
92
  return None
91
93
 
92
94
  @staticmethod
@@ -157,73 +159,60 @@ class IBMGranite(Provider):
157
159
  payload["thinking"] = True
158
160
 
159
161
  def for_stream():
160
- streaming_text = "" # Initialize outside try block
162
+ streaming_text = ""
161
163
  try:
162
- # Use curl_cffi session post with impersonate
163
164
  response = self.session.post(
164
165
  self.api_endpoint,
165
- # headers are set on the session
166
166
  json=payload,
167
167
  stream=True,
168
168
  timeout=self.timeout,
169
- impersonate="chrome110" # Use a common impersonation profile
169
+ impersonate="chrome110"
170
170
  )
171
- response.raise_for_status() # Check for HTTP errors
172
-
173
- # Use sanitize_stream
171
+ response.raise_for_status()
174
172
  processed_stream = sanitize_stream(
175
- data=response.iter_content(chunk_size=None), # Pass byte iterator
176
- intro_value=None, # No prefix
177
- to_json=True, # Stream sends JSON lines (which are lists)
178
- content_extractor=self._granite_extractor, # Use the specific extractor
179
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
173
+ data=response.iter_content(chunk_size=None),
174
+ intro_value=None,
175
+ to_json=True,
176
+ content_extractor=self._granite_extractor,
177
+ yield_raw_on_error=False,
178
+ raw=raw
180
179
  )
181
-
182
180
  for content_chunk in processed_stream:
183
- # content_chunk is the string extracted by _granite_extractor
184
- if content_chunk and isinstance(content_chunk, str):
185
- streaming_text += content_chunk
186
- resp = dict(text=content_chunk)
187
- yield resp if not raw else content_chunk
188
-
189
- # Update history after stream finishes
181
+ if raw:
182
+ if content_chunk and isinstance(content_chunk, str):
183
+ streaming_text += content_chunk
184
+ yield content_chunk
185
+ else:
186
+ if content_chunk and isinstance(content_chunk, str):
187
+ streaming_text += content_chunk
188
+ resp = dict(text=content_chunk)
189
+ yield resp
190
190
  self.last_response = dict(text=streaming_text)
191
191
  self.conversation.update_chat_history(prompt, streaming_text)
192
-
193
- except CurlError as e: # Catch CurlError
192
+ except CurlError as e:
194
193
  raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
195
- except json.JSONDecodeError as e: # Keep specific JSON error handling
194
+ except json.JSONDecodeError as e:
196
195
  raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}") from e
197
- except Exception as e: # Catch other potential exceptions (like HTTPError)
196
+ except Exception as e:
198
197
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
199
- # Use specific exception type if available, otherwise generic
200
198
  ex_type = exceptions.FailedToGenerateResponseError if not isinstance(e, exceptions.ProviderConnectionError) else type(e)
201
199
  raise ex_type(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
202
200
 
203
-
204
201
  def for_non_stream():
205
- # Aggregate the stream using the updated for_stream logic
206
202
  full_text = ""
207
203
  try:
208
- # Ensure raw=False so for_stream yields dicts
209
204
  for chunk_data in for_stream():
210
- if isinstance(chunk_data, dict) and "text" in chunk_data:
211
- full_text += chunk_data["text"]
212
- # Handle raw string case if raw=True was passed
213
- elif raw and isinstance(chunk_data, str):
214
- full_text += chunk_data
205
+ if raw:
206
+ if isinstance(chunk_data, str):
207
+ full_text += chunk_data
208
+ else:
209
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
210
+ full_text += chunk_data["text"]
215
211
  except Exception as e:
216
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
217
- if not full_text:
218
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
219
-
220
- # last_response and history are updated within for_stream
221
- # Return the final aggregated response dict or raw string
212
+ if not full_text:
213
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
222
214
  return full_text if raw else self.last_response
223
215
 
224
-
225
- # Since the API endpoint suggests streaming, always call the stream generator.
226
- # The non-stream wrapper will handle aggregation if stream=False.
227
216
  return for_stream() if stream else for_non_stream()
228
217
 
229
218
  def chat(
@@ -232,25 +221,27 @@ class IBMGranite(Provider):
232
221
  stream: bool = False,
233
222
  optimizer: str = None,
234
223
  conversationally: bool = False,
224
+ raw: bool = False,
235
225
  ) -> Union[str, Generator[str, None, None]]:
236
226
  """Generate response as a string using chat method"""
237
227
  def for_stream_chat():
238
- # ask() yields dicts or strings when streaming
239
228
  gen = self.ask(
240
- prompt, stream=True, raw=False, # Ensure ask yields dicts
229
+ prompt, stream=True, raw=raw,
241
230
  optimizer=optimizer, conversationally=conversationally
242
231
  )
243
- for response_dict in gen:
244
- yield self.get_message(response_dict) # get_message expects dict
245
-
232
+ for response in gen:
233
+ if raw:
234
+ yield response
235
+ else:
236
+ yield self.get_message(response)
246
237
  def for_non_stream_chat():
247
- # ask() returns dict or str when not streaming
248
238
  response_data = self.ask(
249
- prompt, stream=False, raw=False, # Ensure ask returns dict
239
+ prompt, stream=False, raw=raw,
250
240
  optimizer=optimizer, conversationally=conversationally
251
241
  )
252
- return self.get_message(response_data) # get_message expects dict
253
-
242
+ if raw:
243
+ return response_data if isinstance(response_data, str) else str(response_data)
244
+ return self.get_message(response_data)
254
245
  return for_stream_chat() if stream else for_non_stream_chat()
255
246
 
256
247
  def get_message(self, response: dict) -> str:
@@ -265,6 +256,6 @@ if __name__ == "__main__":
265
256
  ai = IBMGranite(
266
257
  thinking=True,
267
258
  )
268
- response = ai.chat("How many r in strawberry", stream=True)
259
+ response = ai.chat("How many r in strawberry", stream=True, raw=False)
269
260
  for chunk in response:
270
- print(chunk, end="", flush=True)
261
+ print(chunk, end="", flush=True) # Print each chunk without newline