webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (117) hide show
  1. webscout/AIutel.py +367 -41
  2. webscout/Bard.py +2 -22
  3. webscout/Bing_search.py +1 -2
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/scira_search.py +24 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/Deepinfra.py +75 -57
  8. webscout/Provider/ExaChat.py +93 -63
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/HeckAI.py +85 -80
  14. webscout/Provider/Jadve.py +56 -50
  15. webscout/Provider/LambdaChat.py +39 -31
  16. webscout/Provider/MiniMax.py +207 -0
  17. webscout/Provider/Nemotron.py +41 -13
  18. webscout/Provider/Netwrck.py +39 -59
  19. webscout/Provider/OLLAMA.py +8 -9
  20. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  21. webscout/Provider/OPENAI/MiniMax.py +298 -0
  22. webscout/Provider/OPENAI/README.md +31 -30
  23. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  24. webscout/Provider/OPENAI/__init__.py +4 -2
  25. webscout/Provider/OPENAI/autoproxy.py +753 -18
  26. webscout/Provider/OPENAI/base.py +7 -76
  27. webscout/Provider/OPENAI/copilot.py +73 -26
  28. webscout/Provider/OPENAI/deepinfra.py +96 -132
  29. webscout/Provider/OPENAI/exachat.py +9 -5
  30. webscout/Provider/OPENAI/flowith.py +179 -166
  31. webscout/Provider/OPENAI/friendli.py +233 -0
  32. webscout/Provider/OPENAI/monochat.py +329 -0
  33. webscout/Provider/OPENAI/netwrck.py +4 -7
  34. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  35. webscout/Provider/OPENAI/qodo.py +630 -0
  36. webscout/Provider/OPENAI/scirachat.py +82 -49
  37. webscout/Provider/OPENAI/textpollinations.py +13 -12
  38. webscout/Provider/OPENAI/toolbaz.py +1 -0
  39. webscout/Provider/OPENAI/typegpt.py +4 -4
  40. webscout/Provider/OPENAI/utils.py +19 -42
  41. webscout/Provider/OPENAI/x0gpt.py +14 -2
  42. webscout/Provider/OpenGPT.py +54 -32
  43. webscout/Provider/PI.py +58 -84
  44. webscout/Provider/Qodo.py +454 -0
  45. webscout/Provider/StandardInput.py +32 -13
  46. webscout/Provider/TTI/README.md +9 -9
  47. webscout/Provider/TTI/__init__.py +2 -1
  48. webscout/Provider/TTI/aiarta.py +92 -78
  49. webscout/Provider/TTI/infip.py +212 -0
  50. webscout/Provider/TTI/monochat.py +220 -0
  51. webscout/Provider/TeachAnything.py +11 -3
  52. webscout/Provider/TextPollinationsAI.py +91 -82
  53. webscout/Provider/TogetherAI.py +32 -48
  54. webscout/Provider/Venice.py +37 -46
  55. webscout/Provider/VercelAI.py +27 -24
  56. webscout/Provider/WiseCat.py +35 -35
  57. webscout/Provider/WrDoChat.py +22 -26
  58. webscout/Provider/WritingMate.py +26 -22
  59. webscout/Provider/__init__.py +6 -6
  60. webscout/Provider/copilot.py +58 -61
  61. webscout/Provider/freeaichat.py +64 -55
  62. webscout/Provider/granite.py +48 -57
  63. webscout/Provider/koala.py +51 -39
  64. webscout/Provider/learnfastai.py +49 -64
  65. webscout/Provider/llmchat.py +79 -93
  66. webscout/Provider/llmchatco.py +63 -78
  67. webscout/Provider/monochat.py +275 -0
  68. webscout/Provider/multichat.py +51 -40
  69. webscout/Provider/oivscode.py +1 -1
  70. webscout/Provider/scira_chat.py +257 -104
  71. webscout/Provider/scnet.py +13 -13
  72. webscout/Provider/searchchat.py +13 -13
  73. webscout/Provider/sonus.py +12 -11
  74. webscout/Provider/toolbaz.py +25 -8
  75. webscout/Provider/turboseek.py +41 -42
  76. webscout/Provider/typefully.py +27 -12
  77. webscout/Provider/typegpt.py +43 -48
  78. webscout/Provider/uncovr.py +55 -90
  79. webscout/Provider/x0gpt.py +325 -299
  80. webscout/Provider/yep.py +79 -96
  81. webscout/__init__.py +7 -2
  82. webscout/auth/__init__.py +12 -1
  83. webscout/auth/providers.py +27 -5
  84. webscout/auth/routes.py +146 -105
  85. webscout/auth/server.py +367 -312
  86. webscout/client.py +121 -116
  87. webscout/litagent/Readme.md +68 -55
  88. webscout/litagent/agent.py +99 -9
  89. webscout/version.py +1 -1
  90. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
  91. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
  92. webscout/Provider/AI21.py +0 -177
  93. webscout/Provider/HuggingFaceChat.py +0 -469
  94. webscout/Provider/OPENAI/freeaichat.py +0 -363
  95. webscout/Provider/TTI/fastflux.py +0 -233
  96. webscout/Provider/Writecream.py +0 -246
  97. webscout/auth/static/favicon.svg +0 -11
  98. webscout/auth/swagger_ui.py +0 -203
  99. webscout/auth/templates/components/authentication.html +0 -237
  100. webscout/auth/templates/components/base.html +0 -103
  101. webscout/auth/templates/components/endpoints.html +0 -750
  102. webscout/auth/templates/components/examples.html +0 -491
  103. webscout/auth/templates/components/footer.html +0 -75
  104. webscout/auth/templates/components/header.html +0 -27
  105. webscout/auth/templates/components/models.html +0 -286
  106. webscout/auth/templates/components/navigation.html +0 -70
  107. webscout/auth/templates/static/api.js +0 -455
  108. webscout/auth/templates/static/icons.js +0 -168
  109. webscout/auth/templates/static/main.js +0 -784
  110. webscout/auth/templates/static/particles.js +0 -201
  111. webscout/auth/templates/static/styles.css +0 -3353
  112. webscout/auth/templates/static/ui.js +0 -374
  113. webscout/auth/templates/swagger_ui.html +0 -170
  114. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  115. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  116. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  117. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -112,10 +112,18 @@ class Copilot(Provider):
112
112
  raw: bool = False,
113
113
  optimizer: str = None,
114
114
  conversationally: bool = False,
115
- images = None,
115
+ images=None,
116
116
  api_key: str = None,
117
117
  **kwargs
118
118
  ) -> Union[Dict[str, Any], Generator]:
119
+ """
120
+ Enhanced Copilot.ask with:
121
+ - return_conversation support
122
+ - multiple image upload
123
+ - event dispatch for websocket events
124
+ - suggested followups and metadata
125
+ - improved error handling
126
+ """
119
127
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
120
128
  if optimizer:
121
129
  if optimizer in self.__available_optimizers:
@@ -125,7 +133,33 @@ class Copilot(Provider):
125
133
  else:
126
134
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
127
135
 
128
- # Main logic for calling Copilot API
136
+ def handle_event(msg, state):
137
+ event = msg.get("event")
138
+ if event == "appendText":
139
+ state["is_started"] = True
140
+ content = msg.get("text")
141
+ state["streaming_text"] += content
142
+ resp = {"text": content}
143
+ return resp if raw else resp
144
+ elif event == "generatingImage":
145
+ state["image_prompt"] = msg.get("prompt")
146
+ elif event == "imageGenerated":
147
+ return {"type": "image", "url": msg.get("url"), "prompt": state.get("image_prompt"), "preview": msg.get("thumbnailUrl")}
148
+ elif event == "done":
149
+ state["done"] = True
150
+ elif event == "suggestedFollowups":
151
+ return {"type": "suggested_followups", "suggestions": msg.get("suggestions")}
152
+ elif event == "replaceText":
153
+ content = msg.get("text")
154
+ state["streaming_text"] += content
155
+ resp = {"text": content}
156
+ return resp if raw else resp
157
+ elif event == "error":
158
+ raise exceptions.FailedToGenerateResponseError(f"Error: {msg}")
159
+ elif event not in ["received", "startMessage", "citation", "partCompleted"]:
160
+ pass
161
+ return None
162
+
129
163
  def for_stream():
130
164
  try:
131
165
  if not has_curl_cffi:
@@ -133,15 +167,15 @@ class Copilot(Provider):
133
167
 
134
168
  websocket_url = self.websocket_url
135
169
  headers = None
136
-
137
- if images is not None:
170
+
171
+ # Auth logic (token/cookies)
172
+ if images is not None or api_key is not None:
138
173
  if api_key is not None:
139
174
  self._access_token = api_key
140
175
  if self._access_token is None:
141
176
  try:
142
177
  self._access_token, self._cookies = readHAR(self.url)
143
178
  except NoValidHarFileError as h:
144
- # print(f"Copilot: {h}")
145
179
  if has_nodriver:
146
180
  yield {"type": "login", "provider": self.label, "url": os.environ.get("webscout_login", "")}
147
181
  self._access_token, self._cookies = asyncio.run(get_access_token_and_cookies(self.url, self.proxies.get("https")))
@@ -159,7 +193,7 @@ class Copilot(Provider):
159
193
  ) as session:
160
194
  if self._access_token is not None:
161
195
  self._cookies = session.cookies.jar if hasattr(session.cookies, "jar") else session.cookies
162
-
196
+
163
197
  response = session.get(f"{self.url}/c/api/user")
164
198
  if response.status_code == 401:
165
199
  raise exceptions.AuthenticationError("Status 401: Invalid access token")
@@ -168,9 +202,8 @@ class Copilot(Provider):
168
202
  user = response.json().get('firstName')
169
203
  if user is None:
170
204
  self._access_token = None
171
- # print(f"Copilot: User: {user or 'null'}")
172
205
 
173
- # Create or use existing conversation
206
+ # Conversation management
174
207
  conversation = kwargs.get("conversation", None)
175
208
  if conversation is None:
176
209
  response = session.post(self.conversation_url)
@@ -180,30 +213,26 @@ class Copilot(Provider):
180
213
  conversation = CopilotConversation(conversation_id)
181
214
  if kwargs.get("return_conversation", False):
182
215
  yield conversation
183
- # print(f"Copilot: Created conversation: {conversation_id}")
184
216
  else:
185
217
  conversation_id = conversation.conversation_id
186
- # print(f"Copilot: Use conversation: {conversation_id}")
187
218
 
188
- # Handle image uploads if any
219
+ # Multiple image upload
189
220
  uploaded_images = []
190
221
  if images is not None:
191
- for image, _ in images:
222
+ for image_tuple in images:
223
+ image = image_tuple[0] if isinstance(image_tuple, (tuple, list)) else image_tuple
192
224
  # Convert image to bytes if needed
193
225
  if isinstance(image, str):
194
226
  if image.startswith("data:"):
195
- # Data URL
196
227
  header, encoded = image.split(",", 1)
197
228
  data = base64.b64decode(encoded)
198
229
  else:
199
- # File path or URL
200
230
  with open(image, "rb") as f:
201
231
  data = f.read()
202
232
  else:
203
233
  data = image
204
-
205
234
  # Get content type
206
- content_type = "image/jpeg" # Default
235
+ content_type = "image/jpeg"
207
236
  if data[:2] == b'\xff\xd8':
208
237
  content_type = "image/jpeg"
209
238
  elif data[:8] == b'\x89PNG\r\n\x1a\n':
@@ -212,7 +241,6 @@ class Copilot(Provider):
212
241
  content_type = "image/gif"
213
242
  elif data[:2] in (b'BM', b'BA'):
214
243
  content_type = "image/bmp"
215
-
216
244
  response = session.post(
217
245
  f"{self.url}/c/api/attachments",
218
246
  headers={"content-type": content_type},
@@ -220,12 +248,11 @@ class Copilot(Provider):
220
248
  )
221
249
  if response.status_code != 200:
222
250
  raise exceptions.APIConnectionError(f"Status {response.status_code}: {response.text}")
223
- uploaded_images.append({"type":"image", "url": response.json().get("url")})
224
- break
251
+ uploaded_images.append({"type": "image", "url": response.json().get("url")})
225
252
 
226
- # Connect to WebSocket
253
+ # WebSocket connection
227
254
  wss = session.ws_connect(websocket_url)
228
- wss.send(json.dumps({"event":"setOptions","supportedCards":["weather","local","image","sports","video","ads","finance"],"ads":{"supportedTypes":["multimedia","product","tourActivity","propertyPromotion","text"]}}));
255
+ wss.send(json.dumps({"event": "setOptions", "supportedCards": ["weather", "local", "image", "sports", "video", "ads", "finance"], "ads": {"supportedTypes": ["multimedia", "product", "tourActivity", "propertyPromotion", "text"]}}))
229
256
  wss.send(json.dumps({
230
257
  "event": "send",
231
258
  "conversationId": conversation_id,
@@ -236,56 +263,26 @@ class Copilot(Provider):
236
263
  "mode": "reasoning" if "Think" in self.model else "chat"
237
264
  }).encode(), CurlWsFlag.TEXT)
238
265
 
239
- # Process response
240
- is_started = False
241
- msg = None
242
- image_prompt: str = None
266
+ # Event-driven response loop
267
+ state = {"is_started": False, "image_prompt": None, "done": False, "streaming_text": ""}
243
268
  last_msg = None
244
- streaming_text = ""
245
-
246
269
  try:
247
- while True:
270
+ while not state["done"]:
248
271
  try:
249
272
  msg = wss.recv()[0]
250
273
  msg = json.loads(msg)
251
- except:
274
+ except Exception:
252
275
  break
253
276
  last_msg = msg
254
- if msg.get("event") == "appendText":
255
- is_started = True
256
- content = msg.get("text")
257
- streaming_text += content
258
- resp = {"text": content}
259
- yield resp if raw else resp
260
- elif msg.get("event") == "generatingImage":
261
- image_prompt = msg.get("prompt")
262
- elif msg.get("event") == "imageGenerated":
263
- yield {"type": "image", "url": msg.get("url"), "prompt": image_prompt, "preview": msg.get("thumbnailUrl")}
264
- elif msg.get("event") == "done":
265
- break
266
- elif msg.get("event") == "suggestedFollowups":
267
- yield {"type": "suggested_followups", "suggestions": msg.get("suggestions")}
268
- break
269
- elif msg.get("event") == "replaceText":
270
- content = msg.get("text")
271
- streaming_text += content
272
- resp = {"text": content}
273
- yield resp if raw else resp
274
- elif msg.get("event") == "error":
275
- raise exceptions.FailedToGenerateResponseError(f"Error: {msg}")
276
- elif msg.get("event") not in ["received", "startMessage", "citation", "partCompleted"]:
277
- print(f"Copilot Message: {msg}")
278
-
279
- if not is_started:
277
+ result = handle_event(msg, state)
278
+ if result is not None:
279
+ yield result
280
+ if not state["is_started"]:
280
281
  raise exceptions.FailedToGenerateResponseError(f"Invalid response: {last_msg}")
281
-
282
- # Update conversation history
283
- self.conversation.update_chat_history(prompt, streaming_text)
284
- self.last_response = {"text": streaming_text}
285
-
282
+ self.conversation.update_chat_history(prompt, state["streaming_text"])
283
+ self.last_response = {"text": state["streaming_text"]}
286
284
  finally:
287
285
  wss.close()
288
-
289
286
  except Exception as e:
290
287
  raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
291
288
 
@@ -2,6 +2,8 @@ import re
2
2
  import requests
3
3
  import json
4
4
  import uuid
5
+ import random
6
+ import string
5
7
  from typing import Any, Dict, Optional, Generator, Union
6
8
 
7
9
  from webscout.AIutel import Optimizers
@@ -18,70 +20,74 @@ class FreeAIChat(Provider):
18
20
 
19
21
  AVAILABLE_MODELS = [
20
22
  # OpenAI Models
23
+ "Deepseek R1 Latest",
21
24
  "GPT 4o",
22
- "GPT 4.5 Preview",
23
- "GPT 4o Latest",
24
- "GPT 4o mini",
25
- "GPT 4o Search Preview",
26
- "O1",
27
- "O1 Mini",
28
- "O3 Mini",
29
- "O3 Mini High",
30
- "O3 Mini Low",
31
25
  "O4 Mini",
32
26
  "O4 Mini High",
27
+ "QwQ Plus",
28
+ "Llama 4 Maverick",
29
+ "Grok 3",
30
+ "GPT 4o mini",
31
+ "Deepseek v3 0324",
32
+ "Grok 3 Mini",
33
33
  "GPT 4.1",
34
- "o3",
35
34
  "GPT 4.1 Mini",
36
-
37
-
38
- # Anthropic Models
39
- "Claude 3.5 haiku",
40
- "claude 3.5 sonnet",
41
- "Claude 3.7 Sonnet",
42
35
  "Claude 3.7 Sonnet (Thinking)",
43
-
44
- # Deepseek Models
45
- "Deepseek R1",
46
- "Deepseek R1 Fast",
47
- "Deepseek V3",
48
- "Deepseek v3 0324",
49
-
50
- # Google Models
51
- "Gemini 1.5 Flash",
52
- "Gemini 1.5 Pro",
53
- "Gemini 2.0 Flash",
54
- "Gemini 2.0 Pro",
55
- "Gemini 2.5 Pro",
56
-
57
- # Llama Models
58
- "Llama 3.1 405B",
59
- "Llama 3.1 70B Fast",
60
- "Llama 3.3 70B",
61
- "Llama 3.2 90B Vision",
62
36
  "Llama 4 Scout",
63
- "Llama 4 Maverick",
64
-
65
- # Mistral Models
66
- "Mistral Large",
67
- "Mistral Nemo",
68
- "Mixtral 8x22B",
69
-
70
- # Qwen Models
71
- "Qwen Max",
72
- "Qwen Plus",
73
- "Qwen Turbo",
74
- "QwQ 32B",
75
- "QwQ Plus",
76
-
77
- # XAI Models
78
- "Grok 2",
79
- "Grok 3",
37
+ "O3 High",
38
+ "Gemini 2.5 Pro",
39
+ "Magistral Medium 2506",
40
+ "O3",
41
+ "Gemini 2.5 Flash",
42
+ "Qwen 3 235B A22B",
43
+ "Claude 4 Sonnet",
44
+ "Claude 4 Sonnet (Thinking)",
45
+ "Claude 4 Opus",
46
+ "Claude 4 Opus (Thinking)",
47
+ "Google: Gemini 2.5 Pro (thinking)",
80
48
  ]
81
49
 
50
+ def _auto_fetch_api_key(self, proxies=None, timeout=30):
51
+ """
52
+ Automatically register a new user and fetch an API key from FreeAIChat Playground.
53
+ """
54
+ session = requests.Session()
55
+ if proxies:
56
+ session.proxies.update(proxies)
57
+ def random_email():
58
+ user = ''.join(random.choices(string.ascii_lowercase + string.digits, k=12))
59
+ return f"{user}@bltiwd.com"
60
+ email = random_email()
61
+ payload = {"email": email, "password": email}
62
+ headers = {
63
+ 'User-Agent': LitAgent().random(),
64
+ 'Accept': '*/*',
65
+ 'Content-Type': 'application/json',
66
+ 'Origin': 'https://freeaichatplayground.com',
67
+ 'Referer': 'https://freeaichatplayground.com/register',
68
+ }
69
+ try:
70
+ resp = session.post(
71
+ "https://freeaichatplayground.com/api/v1/auth/register",
72
+ headers=headers,
73
+ json=payload,
74
+ timeout=timeout
75
+ )
76
+ if resp.status_code == 201:
77
+ data = resp.json()
78
+ apikey = data.get("user", {}).get("apikey")
79
+ if apikey:
80
+ return apikey
81
+ else:
82
+ raise exceptions.FailedToGenerateResponseError("API key not found in registration response.")
83
+ else:
84
+ raise exceptions.FailedToGenerateResponseError(f"Registration failed: {resp.status_code} {resp.text}")
85
+ except Exception as e:
86
+ raise exceptions.FailedToGenerateResponseError(f"API key auto-fetch failed: {e}")
87
+
82
88
  def __init__(
83
89
  self,
84
- api_key: str,
90
+ api_key: str = None,
85
91
  is_conversation: bool = True,
86
92
  max_tokens: int = 150,
87
93
  timeout: int = 30,
@@ -95,7 +101,7 @@ class FreeAIChat(Provider):
95
101
  system_prompt: str = "You are a helpful AI assistant.",
96
102
  temperature: float = 0.7,
97
103
  ):
98
- """Initializes the FreeAIChat API client."""
104
+ """Initializes the FreeAIChat API client. If api_key is not provided, auto-register and fetch one."""
99
105
  if model not in self.AVAILABLE_MODELS:
100
106
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
101
107
 
@@ -120,7 +126,10 @@ class FreeAIChat(Provider):
120
126
  self.model = model
121
127
  self.system_prompt = system_prompt
122
128
  self.temperature = temperature
123
- self.api_key = api_key
129
+ if not api_key:
130
+ self.api_key = self._auto_fetch_api_key(proxies=proxies, timeout=timeout)
131
+ else:
132
+ self.api_key = api_key
124
133
 
125
134
  self.__available_optimizers = (
126
135
  method
@@ -83,10 +83,12 @@ class IBMGranite(Provider):
83
83
  self.conversation.history_offset = history_offset
84
84
 
85
85
  @staticmethod
86
- def _granite_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
87
- """Extracts content from IBM Granite stream JSON lists [3, "text"]."""
88
- if isinstance(chunk, list) and len(chunk) == 2 and chunk[0] == 3 and isinstance(chunk[1], str):
89
- return chunk[1]
86
+ def _granite_extractor(chunk: Union[str, Dict[str, Any], list]) -> Optional[str]:
87
+ """Extracts content from IBM Granite stream JSON lists [6, "text"] or [3, "text"]."""
88
+ # Accept both [3, str] and [6, str] as content chunks
89
+ if isinstance(chunk, list) and len(chunk) == 2 and isinstance(chunk[1], str):
90
+ if chunk[0] in (3, 6):
91
+ return chunk[1]
90
92
  return None
91
93
 
92
94
  @staticmethod
@@ -157,73 +159,60 @@ class IBMGranite(Provider):
157
159
  payload["thinking"] = True
158
160
 
159
161
  def for_stream():
160
- streaming_text = "" # Initialize outside try block
162
+ streaming_text = ""
161
163
  try:
162
- # Use curl_cffi session post with impersonate
163
164
  response = self.session.post(
164
165
  self.api_endpoint,
165
- # headers are set on the session
166
166
  json=payload,
167
167
  stream=True,
168
168
  timeout=self.timeout,
169
- impersonate="chrome110" # Use a common impersonation profile
169
+ impersonate="chrome110"
170
170
  )
171
- response.raise_for_status() # Check for HTTP errors
172
-
173
- # Use sanitize_stream
171
+ response.raise_for_status()
174
172
  processed_stream = sanitize_stream(
175
- data=response.iter_content(chunk_size=None), # Pass byte iterator
176
- intro_value=None, # No prefix
177
- to_json=True, # Stream sends JSON lines (which are lists)
178
- content_extractor=self._granite_extractor, # Use the specific extractor
179
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
173
+ data=response.iter_content(chunk_size=None),
174
+ intro_value=None,
175
+ to_json=True,
176
+ content_extractor=self._granite_extractor,
177
+ yield_raw_on_error=False,
178
+ raw=raw
180
179
  )
181
-
182
180
  for content_chunk in processed_stream:
183
- # content_chunk is the string extracted by _granite_extractor
184
- if content_chunk and isinstance(content_chunk, str):
185
- streaming_text += content_chunk
186
- resp = dict(text=content_chunk)
187
- yield resp if not raw else content_chunk
188
-
189
- # Update history after stream finishes
181
+ if raw:
182
+ if content_chunk and isinstance(content_chunk, str):
183
+ streaming_text += content_chunk
184
+ yield content_chunk
185
+ else:
186
+ if content_chunk and isinstance(content_chunk, str):
187
+ streaming_text += content_chunk
188
+ resp = dict(text=content_chunk)
189
+ yield resp
190
190
  self.last_response = dict(text=streaming_text)
191
191
  self.conversation.update_chat_history(prompt, streaming_text)
192
-
193
- except CurlError as e: # Catch CurlError
192
+ except CurlError as e:
194
193
  raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
195
- except json.JSONDecodeError as e: # Keep specific JSON error handling
194
+ except json.JSONDecodeError as e:
196
195
  raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}") from e
197
- except Exception as e: # Catch other potential exceptions (like HTTPError)
196
+ except Exception as e:
198
197
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
199
- # Use specific exception type if available, otherwise generic
200
198
  ex_type = exceptions.FailedToGenerateResponseError if not isinstance(e, exceptions.ProviderConnectionError) else type(e)
201
199
  raise ex_type(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
202
200
 
203
-
204
201
  def for_non_stream():
205
- # Aggregate the stream using the updated for_stream logic
206
202
  full_text = ""
207
203
  try:
208
- # Ensure raw=False so for_stream yields dicts
209
204
  for chunk_data in for_stream():
210
- if isinstance(chunk_data, dict) and "text" in chunk_data:
211
- full_text += chunk_data["text"]
212
- # Handle raw string case if raw=True was passed
213
- elif raw and isinstance(chunk_data, str):
214
- full_text += chunk_data
205
+ if raw:
206
+ if isinstance(chunk_data, str):
207
+ full_text += chunk_data
208
+ else:
209
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
210
+ full_text += chunk_data["text"]
215
211
  except Exception as e:
216
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
217
- if not full_text:
218
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
219
-
220
- # last_response and history are updated within for_stream
221
- # Return the final aggregated response dict or raw string
212
+ if not full_text:
213
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
222
214
  return full_text if raw else self.last_response
223
215
 
224
-
225
- # Since the API endpoint suggests streaming, always call the stream generator.
226
- # The non-stream wrapper will handle aggregation if stream=False.
227
216
  return for_stream() if stream else for_non_stream()
228
217
 
229
218
  def chat(
@@ -232,25 +221,27 @@ class IBMGranite(Provider):
232
221
  stream: bool = False,
233
222
  optimizer: str = None,
234
223
  conversationally: bool = False,
224
+ raw: bool = False,
235
225
  ) -> Union[str, Generator[str, None, None]]:
236
226
  """Generate response as a string using chat method"""
237
227
  def for_stream_chat():
238
- # ask() yields dicts or strings when streaming
239
228
  gen = self.ask(
240
- prompt, stream=True, raw=False, # Ensure ask yields dicts
229
+ prompt, stream=True, raw=raw,
241
230
  optimizer=optimizer, conversationally=conversationally
242
231
  )
243
- for response_dict in gen:
244
- yield self.get_message(response_dict) # get_message expects dict
245
-
232
+ for response in gen:
233
+ if raw:
234
+ yield response
235
+ else:
236
+ yield self.get_message(response)
246
237
  def for_non_stream_chat():
247
- # ask() returns dict or str when not streaming
248
238
  response_data = self.ask(
249
- prompt, stream=False, raw=False, # Ensure ask returns dict
239
+ prompt, stream=False, raw=raw,
250
240
  optimizer=optimizer, conversationally=conversationally
251
241
  )
252
- return self.get_message(response_data) # get_message expects dict
253
-
242
+ if raw:
243
+ return response_data if isinstance(response_data, str) else str(response_data)
244
+ return self.get_message(response_data)
254
245
  return for_stream_chat() if stream else for_non_stream_chat()
255
246
 
256
247
  def get_message(self, response: dict) -> str:
@@ -265,6 +256,6 @@ if __name__ == "__main__":
265
256
  ai = IBMGranite(
266
257
  thinking=True,
267
258
  )
268
- response = ai.chat("How many r in strawberry", stream=True)
259
+ response = ai.chat("How many r in strawberry", stream=True, raw=False)
269
260
  for chunk in response:
270
- print(chunk, end="", flush=True)
261
+ print(chunk, end="", flush=True) # Print each chunk without newline