webscout 8.2.4__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (80) hide show
  1. webscout/Extra/gguf.py +2 -0
  2. webscout/Provider/AISEARCH/scira_search.py +2 -5
  3. webscout/Provider/Aitopia.py +75 -51
  4. webscout/Provider/AllenAI.py +64 -67
  5. webscout/Provider/ChatGPTClone.py +33 -34
  6. webscout/Provider/ChatSandbox.py +342 -0
  7. webscout/Provider/Cloudflare.py +79 -32
  8. webscout/Provider/Deepinfra.py +69 -56
  9. webscout/Provider/ElectronHub.py +48 -39
  10. webscout/Provider/ExaChat.py +36 -20
  11. webscout/Provider/GPTWeb.py +24 -18
  12. webscout/Provider/GithubChat.py +52 -49
  13. webscout/Provider/GizAI.py +283 -0
  14. webscout/Provider/Glider.py +39 -28
  15. webscout/Provider/Groq.py +48 -20
  16. webscout/Provider/HeckAI.py +18 -36
  17. webscout/Provider/Jadve.py +30 -37
  18. webscout/Provider/LambdaChat.py +36 -59
  19. webscout/Provider/MCPCore.py +18 -21
  20. webscout/Provider/Marcus.py +23 -14
  21. webscout/Provider/Netwrck.py +35 -26
  22. webscout/Provider/OPENAI/__init__.py +1 -1
  23. webscout/Provider/OPENAI/exachat.py +4 -0
  24. webscout/Provider/OPENAI/scirachat.py +2 -4
  25. webscout/Provider/OPENAI/textpollinations.py +20 -22
  26. webscout/Provider/OPENAI/toolbaz.py +1 -0
  27. webscout/Provider/PI.py +22 -13
  28. webscout/Provider/StandardInput.py +42 -30
  29. webscout/Provider/TeachAnything.py +16 -7
  30. webscout/Provider/TextPollinationsAI.py +78 -76
  31. webscout/Provider/TwoAI.py +120 -88
  32. webscout/Provider/TypliAI.py +305 -0
  33. webscout/Provider/Venice.py +24 -22
  34. webscout/Provider/VercelAI.py +31 -12
  35. webscout/Provider/__init__.py +7 -7
  36. webscout/Provider/asksteve.py +53 -44
  37. webscout/Provider/cerebras.py +77 -31
  38. webscout/Provider/chatglm.py +47 -37
  39. webscout/Provider/elmo.py +38 -32
  40. webscout/Provider/granite.py +24 -21
  41. webscout/Provider/hermes.py +27 -20
  42. webscout/Provider/learnfastai.py +25 -20
  43. webscout/Provider/llmchatco.py +48 -78
  44. webscout/Provider/multichat.py +13 -3
  45. webscout/Provider/scira_chat.py +49 -30
  46. webscout/Provider/scnet.py +23 -20
  47. webscout/Provider/searchchat.py +16 -24
  48. webscout/Provider/sonus.py +37 -39
  49. webscout/Provider/toolbaz.py +24 -46
  50. webscout/Provider/turboseek.py +37 -41
  51. webscout/Provider/typefully.py +30 -22
  52. webscout/Provider/typegpt.py +47 -51
  53. webscout/Provider/uncovr.py +46 -40
  54. webscout/cli.py +256 -0
  55. webscout/conversation.py +0 -2
  56. webscout/exceptions.py +3 -0
  57. webscout/version.py +1 -1
  58. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/METADATA +166 -45
  59. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/RECORD +63 -76
  60. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  61. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  62. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  63. inferno/__init__.py +0 -6
  64. inferno/__main__.py +0 -9
  65. inferno/cli.py +0 -6
  66. inferno/lol.py +0 -589
  67. webscout/Local/__init__.py +0 -12
  68. webscout/Local/__main__.py +0 -9
  69. webscout/Local/api.py +0 -576
  70. webscout/Local/cli.py +0 -516
  71. webscout/Local/config.py +0 -75
  72. webscout/Local/llm.py +0 -287
  73. webscout/Local/model_manager.py +0 -253
  74. webscout/Local/server.py +0 -721
  75. webscout/Local/utils.py +0 -93
  76. webscout/Provider/Chatify.py +0 -175
  77. webscout/Provider/askmyai.py +0 -158
  78. webscout/Provider/gaurish.py +0 -244
  79. webscout-8.2.4.dist-info/entry_points.txt +0 -5
  80. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/licenses/LICENSE.md +0 -0
@@ -1,11 +1,12 @@
1
- import requests
1
+ from curl_cffi import CurlError
2
+ from curl_cffi.requests import Session
2
3
  import json
3
4
  from typing import Any, Dict, Optional, Generator, List, Union
4
5
  import uuid
5
6
 
6
7
  from webscout.AIutel import Optimizers
7
8
  from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
10
  from webscout.AIbase import Provider
10
11
  from webscout import exceptions
11
12
  from webscout.litagent import LitAgent
@@ -29,7 +30,7 @@ class ChatGLM(Provider):
29
30
  plus_model: bool = True,
30
31
  ):
31
32
  """Initializes the ChatGLM API client."""
32
- self.session = requests.Session()
33
+ self.session = Session() # Use curl_cffi Session
33
34
  self.is_conversation = is_conversation
34
35
  self.max_tokens_to_sample = max_tokens
35
36
  self.api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream"
@@ -55,7 +56,7 @@ class ChatGLM(Provider):
55
56
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
56
57
  )
57
58
  self.session.headers.update(self.headers)
58
- Conversation.intro = (
59
+ Conversation.intro = ( # type: ignore
59
60
  AwesomePrompts().get_act(
60
61
  act, raise_not_found=True, default=None, case_insensitive=True
61
62
  )
@@ -66,7 +67,16 @@ class ChatGLM(Provider):
66
67
  is_conversation, self.max_tokens_to_sample, filepath, update_file
67
68
  )
68
69
  self.conversation.history_offset = history_offset
69
- self.session.proxies = proxies
70
+ self.session.proxies = proxies # Assign proxies directly
71
+
72
+ @staticmethod
73
+ def _chatglm_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
74
+ """Extracts content from ChatGLM stream JSON objects."""
75
+ if isinstance(chunk, dict):
76
+ parts = chunk.get('parts', [])
77
+ if parts and isinstance(parts[0].get('content'), list) and parts[0]['content']:
78
+ return parts[0]['content'][0].get('text')
79
+ return None
70
80
 
71
81
  def ask(
72
82
  self,
@@ -119,45 +129,45 @@ class ChatGLM(Provider):
119
129
  }
120
130
 
121
131
  def for_stream():
132
+ streaming_text = "" # Initialize outside try block
133
+ last_processed_content = "" # Track the last processed content
122
134
  try:
123
- with self.session.post(
124
- self.api_endpoint, json=payload, stream=True, timeout=self.timeout
125
- ) as response:
126
- response.raise_for_status()
127
-
128
- streaming_text = ""
129
- last_processed_content = "" # Track the last processed content
130
- for chunk in response.iter_lines():
131
- if chunk:
132
- decoded_chunk = chunk.decode('utf-8')
133
- if decoded_chunk.startswith('data: '):
134
- try:
135
- json_data = json.loads(decoded_chunk[6:])
136
- parts = json_data.get('parts', [])
137
- if parts:
138
- content = parts[0].get('content', [])
139
- if content:
140
- text = content[0].get('text', '')
141
- new_text = text[len(last_processed_content):]
142
- if new_text: # Check for new content
143
- streaming_text += new_text
144
- last_processed_content = text
145
- yield new_text if raw else dict(text=new_text)
146
- except json.JSONDecodeError:
147
- continue
135
+ response = self.session.post(
136
+ self.api_endpoint, json=payload, stream=True, timeout=self.timeout,
137
+ impersonate="chrome120" # Add impersonate
138
+ )
139
+ response.raise_for_status()
140
+
141
+ # Use sanitize_stream
142
+ processed_stream = sanitize_stream(
143
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
144
+ intro_value="data:",
145
+ to_json=True, # Stream sends JSON
146
+ content_extractor=self._chatglm_extractor, # Use the specific extractor
147
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
148
+ )
148
149
 
150
+ for current_full_text in processed_stream:
151
+ # current_full_text is the full text extracted by _chatglm_extractor
152
+ if current_full_text and isinstance(current_full_text, str):
153
+ new_text = current_full_text[len(last_processed_content):]
154
+ if new_text: # Check for new content
155
+ streaming_text += new_text
156
+ last_processed_content = current_full_text # Update tracker
157
+ yield new_text if raw else dict(text=new_text)
158
+
159
+ except CurlError as e:
160
+ raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
161
+ except Exception as e:
162
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
163
+ finally:
164
+ # Update history after stream finishes or fails
165
+ if streaming_text:
149
166
  self.last_response.update(dict(text=streaming_text))
150
167
  self.conversation.update_chat_history(
151
168
  prompt, self.get_message(self.last_response)
152
169
  )
153
170
 
154
- except requests.exceptions.RequestException as e:
155
- raise exceptions.ProviderConnectionError(f"Request failed: {e}")
156
- except json.JSONDecodeError as e:
157
- raise exceptions.InvalidResponseError(f"Failed to decode JSON: {e}")
158
- except Exception as e:
159
- raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
160
-
161
171
  def for_non_stream():
162
172
  for _ in for_stream():
163
173
  pass
webscout/Provider/elmo.py CHANGED
@@ -1,13 +1,14 @@
1
1
  from curl_cffi.requests import Session
2
2
  from curl_cffi import CurlError
3
3
  import json
4
- from typing import Union, Any, Dict, Generator
4
+ from typing import Optional, Union, Any, Dict, Generator
5
5
  from webscout import exceptions
6
6
  from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
7
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
8
8
  from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
10
  from webscout.litagent import LitAgent
11
+ import re # Import re for the extractor
11
12
 
12
13
 
13
14
  class Elmo(Provider):
@@ -84,6 +85,17 @@ class Elmo(Provider):
84
85
  )
85
86
  self.conversation.history_offset = history_offset
86
87
 
88
+ @staticmethod
89
+ def _elmo_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
90
+ """Extracts content from the Elmo stream format '0:"..."'."""
91
+ if isinstance(chunk, str):
92
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
93
+ if match:
94
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
95
+ content = match.group(1).encode().decode('unicode_escape')
96
+ return content.replace('\\\\', '\\').replace('\\"', '"')
97
+ return None
98
+
87
99
  def ask(
88
100
  self,
89
101
  prompt: str,
@@ -144,7 +156,7 @@ class Elmo(Provider):
144
156
  }
145
157
 
146
158
  def for_stream():
147
- full_response = "" # Initialize outside try block
159
+ streaming_text = "" # Initialize outside try block
148
160
  try:
149
161
  # Use curl_cffi session post with impersonate
150
162
  # Note: The API expects 'text/plain' but we send JSON.
@@ -159,40 +171,32 @@ class Elmo(Provider):
159
171
  )
160
172
  response.raise_for_status() # Check for HTTP errors
161
173
 
162
- # Iterate over bytes and decode manually
163
- for line_bytes in response.iter_lines():
164
- if line_bytes:
165
- try:
166
- line = line_bytes.decode('utf-8')
167
- if line.startswith('0:'):
168
- # Extract content after '0:"' and before the closing '"'
169
- match = line.split(':"', 1)
170
- if len(match) > 1:
171
- chunk = match[1]
172
- if chunk.endswith('"'):
173
- chunk = chunk[:-1] # Remove trailing quote
174
-
175
- # Handle potential escape sequences like \\n
176
- formatted_output = chunk.encode().decode('unicode_escape')
174
+ # Use sanitize_stream
175
+ processed_stream = sanitize_stream(
176
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
177
+ intro_value=None, # No simple prefix
178
+ to_json=False, # Content is text after extraction
179
+ content_extractor=self._elmo_extractor, # Use the specific extractor
180
+ yield_raw_on_error=True
181
+ )
177
182
 
178
- if formatted_output: # Ensure content is not None or empty
179
- full_response += formatted_output
180
- resp = dict(text=formatted_output)
181
- # Yield dict or raw string chunk
182
- yield resp if not raw else formatted_output
183
- except (UnicodeDecodeError, IndexError):
184
- continue # Ignore lines that cannot be decoded or parsed
183
+ for content_chunk in processed_stream:
184
+ if content_chunk and isinstance(content_chunk, str):
185
+ streaming_text += content_chunk
186
+ resp = dict(text=content_chunk)
187
+ yield resp if not raw else content_chunk
185
188
 
186
- # Update history after stream finishes
187
- self.last_response = dict(text=full_response)
188
- self.conversation.update_chat_history(
189
- prompt, full_response
190
- )
191
189
  except CurlError as e: # Catch CurlError
192
190
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
193
191
  except Exception as e: # Catch other potential exceptions (like HTTPError)
194
192
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
195
193
  raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
194
+ finally:
195
+ # Update history after stream finishes
196
+ self.last_response = dict(text=streaming_text)
197
+ self.conversation.update_chat_history(
198
+ prompt, streaming_text
199
+ )
196
200
 
197
201
  def for_non_stream():
198
202
  # Aggregate the stream using the updated for_stream logic
@@ -210,7 +214,9 @@ class Elmo(Provider):
210
214
  if not collected_text:
211
215
  raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
212
216
 
213
- # last_response and history are updated within for_stream
217
+ # Update last_response and history *after* aggregation for non-stream
218
+ self.last_response = {"text": collected_text}
219
+ self.conversation.update_chat_history(prompt, collected_text)
214
220
  # Return the final aggregated response dict or raw string
215
221
  return collected_text if raw else self.last_response
216
222
 
@@ -265,7 +271,7 @@ class Elmo(Provider):
265
271
  str: Message extracted
266
272
  """
267
273
  assert isinstance(response, dict), "Response should be of dict data-type only"
268
- return response["text"]
274
+ return response.get("text", "") # Use .get for safety
269
275
 
270
276
 
271
277
  if __name__ == "__main__":
@@ -1,9 +1,9 @@
1
1
  from curl_cffi.requests import Session
2
2
  from curl_cffi import CurlError
3
3
  import json
4
- from typing import Union, Any, Dict, Generator
4
+ from typing import Optional, Union, Any, Dict, Generator
5
5
 
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
7
7
  from webscout.AIbase import Provider
8
8
  from webscout import exceptions
9
9
  from webscout.litagent import LitAgent as Lit
@@ -77,6 +77,13 @@ class IBMGranite(Provider):
77
77
  self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
78
78
  self.conversation.history_offset = history_offset
79
79
 
80
+ @staticmethod
81
+ def _granite_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
82
+ """Extracts content from IBM Granite stream JSON lists [3, "text"]."""
83
+ if isinstance(chunk, list) and len(chunk) == 2 and chunk[0] == 3 and isinstance(chunk[1], str):
84
+ return chunk[1]
85
+ return None
86
+
80
87
  def ask(
81
88
  self,
82
89
  prompt: str,
@@ -127,25 +134,21 @@ class IBMGranite(Provider):
127
134
  )
128
135
  response.raise_for_status() # Check for HTTP errors
129
136
 
130
- # Iterate over bytes and decode manually
131
- for line_bytes in response.iter_lines():
132
- if line_bytes:
133
- try:
134
- line = line_bytes.decode('utf-8')
135
- data = json.loads(line)
136
- # Check the specific format [3, "text_chunk"]
137
- if isinstance(data, list) and len(data) == 2 and data[0] == 3 and isinstance(data[1], str):
138
- content = data[1]
139
- if content: # Ensure content is not None or empty
140
- streaming_text += content
141
- resp = dict(text=content)
142
- # Yield dict or raw string chunk
143
- yield resp if not raw else content
144
- else:
145
- # Skip unrecognized lines/formats
146
- pass
147
- except (json.JSONDecodeError, UnicodeDecodeError):
148
- continue # Ignore lines that are not valid JSON or cannot be decoded
137
+ # Use sanitize_stream
138
+ processed_stream = sanitize_stream(
139
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
140
+ intro_value=None, # No prefix
141
+ to_json=True, # Stream sends JSON lines (which are lists)
142
+ content_extractor=self._granite_extractor, # Use the specific extractor
143
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
144
+ )
145
+
146
+ for content_chunk in processed_stream:
147
+ # content_chunk is the string extracted by _granite_extractor
148
+ if content_chunk and isinstance(content_chunk, str):
149
+ streaming_text += content_chunk
150
+ resp = dict(text=content_chunk)
151
+ yield resp if not raw else content_chunk
149
152
 
150
153
  # Update history after stream finishes
151
154
  self.last_response = dict(text=streaming_text)
@@ -4,7 +4,7 @@ import json
4
4
  from typing import Union, Any, Dict, Generator, Optional
5
5
 
6
6
  from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
7
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
8
8
  from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
@@ -102,6 +102,13 @@ class NousHermes(Provider):
102
102
  print(f"Warning: Error loading cookies: {e}")
103
103
  return None
104
104
 
105
+ @staticmethod
106
+ def _hermes_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
107
+ """Extracts content from Hermes stream JSON objects."""
108
+ if isinstance(chunk, dict) and chunk.get('type') == 'llm_response':
109
+ return chunk.get('content')
110
+ return None
111
+
105
112
 
106
113
  def ask(
107
114
  self,
@@ -145,36 +152,36 @@ class NousHermes(Provider):
145
152
  "top_p": self.top_p,
146
153
  }
147
154
  def for_stream():
148
- full_response = ""
155
+ streaming_text = "" # Initialize outside try block
149
156
  try:
150
157
  response = self.session.post(
151
158
  self.api_endpoint,
152
159
  json=payload,
153
160
  stream=True,
154
161
  timeout=self.timeout,
155
- impersonate="chrome110"
162
+ impersonate="chrome110" # Keep impersonate
156
163
  )
157
164
  response.raise_for_status()
158
165
 
159
- for line_bytes in response.iter_lines():
160
- if line_bytes:
161
- try:
162
- decoded_line = line_bytes.decode('utf-8')
163
- if decoded_line.startswith('data: '):
164
- data_str = decoded_line.replace('data: ', '', 1)
165
- data = json.loads(data_str)
166
- if data.get('type') == 'llm_response':
167
- content = data.get('content', '')
168
- if content:
169
- full_response += content
170
- resp = dict(text=content)
171
- yield resp if not raw else content
172
- except (json.JSONDecodeError, UnicodeDecodeError):
173
- continue
166
+ # Use sanitize_stream
167
+ processed_stream = sanitize_stream(
168
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
169
+ intro_value="data:",
170
+ to_json=True, # Stream sends JSON
171
+ content_extractor=self._hermes_extractor, # Use the specific extractor
172
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
173
+ )
174
+
175
+ for content_chunk in processed_stream:
176
+ # content_chunk is the string extracted by _hermes_extractor
177
+ if content_chunk and isinstance(content_chunk, str):
178
+ streaming_text += content_chunk
179
+ resp = dict(text=content_chunk)
180
+ yield resp if not raw else content_chunk
174
181
 
175
- self.last_response = dict(text=full_response)
182
+ self.last_response = dict(text=streaming_text) # Use streaming_text
176
183
  self.conversation.update_chat_history(
177
- prompt, full_response
184
+ prompt, streaming_text # Use streaming_text
178
185
  )
179
186
 
180
187
  except CurlError as e:
@@ -1,12 +1,12 @@
1
1
  import os
2
2
  import json
3
- from typing import Optional, Union, Generator
3
+ from typing import Any, Dict, Optional, Union, Generator
4
4
  import uuid
5
5
  from curl_cffi.requests import Session
6
6
  from curl_cffi import CurlError
7
7
 
8
8
  from webscout.AIutel import Optimizers
9
- from webscout.AIutel import Conversation
9
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
10
10
  from webscout.AIutel import AwesomePrompts
11
11
  from webscout.AIbase import Provider
12
12
  from webscout import exceptions
@@ -79,6 +79,13 @@ class LearnFast(Provider):
79
79
  )
80
80
  self.conversation.history_offset = history_offset
81
81
 
82
+ @staticmethod
83
+ def _learnfast_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
84
+ """Extracts message content from LearnFast stream JSON objects."""
85
+ if isinstance(chunk, dict) and chunk.get('code') == 200 and chunk.get('data'):
86
+ return chunk['data'].get('message')
87
+ return None
88
+
82
89
  def generate_unique_id(self) -> str:
83
90
  """Generate a 32-character hexadecimal unique ID."""
84
91
  return uuid.uuid4().hex
@@ -209,24 +216,22 @@ class LearnFast(Provider):
209
216
  )
210
217
  response.raise_for_status() # Check for HTTP errors
211
218
 
212
- # Process the streamed response
213
- # Iterate over bytes and decode manually
214
- for line_bytes in response.iter_lines():
215
- if line_bytes:
216
- try:
217
- line = line_bytes.decode('utf-8').strip()
218
- if line == "[DONE]":
219
- break
220
- json_response = json.loads(line)
221
- if json_response.get('code') == 200 and json_response.get('data'):
222
- message = json_response['data'].get('message', '')
223
- if message:
224
- full_response += message
225
- resp = {"text": message}
226
- # Yield dict or raw string chunk
227
- yield resp if not raw else message
228
- except (json.JSONDecodeError, UnicodeDecodeError):
229
- pass # Ignore lines that are not valid JSON or cannot be decoded
219
+ # Use sanitize_stream
220
+ processed_stream = sanitize_stream(
221
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
222
+ intro_value=None, # No prefix
223
+ to_json=True, # Stream sends JSON lines
224
+ skip_markers=["[DONE]"],
225
+ content_extractor=self._learnfast_extractor, # Use the specific extractor
226
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
227
+ )
228
+
229
+ for content_chunk in processed_stream:
230
+ # content_chunk is the string extracted by _learnfast_extractor
231
+ if content_chunk and isinstance(content_chunk, str):
232
+ full_response += content_chunk
233
+ resp = {"text": content_chunk}
234
+ yield resp if not raw else content_chunk
230
235
 
231
236
  # Update history after stream finishes
232
237
  self.last_response = {"text": full_response}