webscout 8.2.4__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (80) hide show
  1. webscout/Extra/gguf.py +2 -0
  2. webscout/Provider/AISEARCH/scira_search.py +2 -5
  3. webscout/Provider/Aitopia.py +75 -51
  4. webscout/Provider/AllenAI.py +64 -67
  5. webscout/Provider/ChatGPTClone.py +33 -34
  6. webscout/Provider/ChatSandbox.py +342 -0
  7. webscout/Provider/Cloudflare.py +79 -32
  8. webscout/Provider/Deepinfra.py +69 -56
  9. webscout/Provider/ElectronHub.py +48 -39
  10. webscout/Provider/ExaChat.py +36 -20
  11. webscout/Provider/GPTWeb.py +24 -18
  12. webscout/Provider/GithubChat.py +52 -49
  13. webscout/Provider/GizAI.py +283 -0
  14. webscout/Provider/Glider.py +39 -28
  15. webscout/Provider/Groq.py +48 -20
  16. webscout/Provider/HeckAI.py +18 -36
  17. webscout/Provider/Jadve.py +30 -37
  18. webscout/Provider/LambdaChat.py +36 -59
  19. webscout/Provider/MCPCore.py +18 -21
  20. webscout/Provider/Marcus.py +23 -14
  21. webscout/Provider/Netwrck.py +35 -26
  22. webscout/Provider/OPENAI/__init__.py +1 -1
  23. webscout/Provider/OPENAI/exachat.py +4 -0
  24. webscout/Provider/OPENAI/scirachat.py +2 -4
  25. webscout/Provider/OPENAI/textpollinations.py +20 -22
  26. webscout/Provider/OPENAI/toolbaz.py +1 -0
  27. webscout/Provider/PI.py +22 -13
  28. webscout/Provider/StandardInput.py +42 -30
  29. webscout/Provider/TeachAnything.py +16 -7
  30. webscout/Provider/TextPollinationsAI.py +78 -76
  31. webscout/Provider/TwoAI.py +120 -88
  32. webscout/Provider/TypliAI.py +305 -0
  33. webscout/Provider/Venice.py +24 -22
  34. webscout/Provider/VercelAI.py +31 -12
  35. webscout/Provider/__init__.py +7 -7
  36. webscout/Provider/asksteve.py +53 -44
  37. webscout/Provider/cerebras.py +77 -31
  38. webscout/Provider/chatglm.py +47 -37
  39. webscout/Provider/elmo.py +38 -32
  40. webscout/Provider/granite.py +24 -21
  41. webscout/Provider/hermes.py +27 -20
  42. webscout/Provider/learnfastai.py +25 -20
  43. webscout/Provider/llmchatco.py +48 -78
  44. webscout/Provider/multichat.py +13 -3
  45. webscout/Provider/scira_chat.py +49 -30
  46. webscout/Provider/scnet.py +23 -20
  47. webscout/Provider/searchchat.py +16 -24
  48. webscout/Provider/sonus.py +37 -39
  49. webscout/Provider/toolbaz.py +24 -46
  50. webscout/Provider/turboseek.py +37 -41
  51. webscout/Provider/typefully.py +30 -22
  52. webscout/Provider/typegpt.py +47 -51
  53. webscout/Provider/uncovr.py +46 -40
  54. webscout/cli.py +256 -0
  55. webscout/conversation.py +0 -2
  56. webscout/exceptions.py +3 -0
  57. webscout/version.py +1 -1
  58. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/METADATA +166 -45
  59. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/RECORD +63 -76
  60. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  61. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  62. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  63. inferno/__init__.py +0 -6
  64. inferno/__main__.py +0 -9
  65. inferno/cli.py +0 -6
  66. inferno/lol.py +0 -589
  67. webscout/Local/__init__.py +0 -12
  68. webscout/Local/__main__.py +0 -9
  69. webscout/Local/api.py +0 -576
  70. webscout/Local/cli.py +0 -516
  71. webscout/Local/config.py +0 -75
  72. webscout/Local/llm.py +0 -287
  73. webscout/Local/model_manager.py +0 -253
  74. webscout/Local/server.py +0 -721
  75. webscout/Local/utils.py +0 -93
  76. webscout/Provider/Chatify.py +0 -175
  77. webscout/Provider/askmyai.py +0 -158
  78. webscout/Provider/gaurish.py +0 -244
  79. webscout-8.2.4.dist-info/entry_points.txt +0 -5
  80. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/licenses/LICENSE.md +0 -0
@@ -120,7 +120,7 @@ class HeckAI(Provider):
120
120
 
121
121
  def for_stream():
122
122
  streaming_text = "" # Initialize outside try block
123
- in_answer = False # Initialize outside try block
123
+ # in_answer = False # No longer needed
124
124
  try:
125
125
  # Use curl_cffi session post with impersonate
126
126
  response = self.session.post(
@@ -132,41 +132,23 @@ class HeckAI(Provider):
132
132
  impersonate="chrome110" # Use a common impersonation profile
133
133
  )
134
134
  response.raise_for_status() # Check for HTTP errors
135
-
136
- # Iterate over bytes and decode manually
137
- for line_bytes in response.iter_lines():
138
- if not line_bytes:
139
- continue
140
-
141
- try:
142
- line = line_bytes.decode('utf-8')
143
- # Remove "data: " prefix
144
- if line.startswith("data: "):
145
- data = line[6:]
146
- else:
147
- continue # Skip lines without the prefix
148
-
149
- # Check for control markers
150
- if data == "[ANSWER_START]":
151
- in_answer = True
152
- continue
153
-
154
- if data == "[ANSWER_DONE]":
155
- in_answer = False
156
- continue
157
-
158
- if data == "[RELATE_Q_START]" or data == "[RELATE_Q_DONE]":
159
- continue
160
-
161
- # Process content if we're in an answer section
162
- if in_answer:
163
- # Assuming 'data' is the text chunk here
164
- streaming_text += data
165
- resp = dict(text=data)
166
- # Yield dict or raw string chunk
167
- yield resp if not raw else data
168
- except UnicodeDecodeError:
169
- continue # Ignore decoding errors for specific lines
135
+
136
+ # Use sanitize_stream to process the stream
137
+ processed_stream = sanitize_stream(
138
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
139
+ intro_value="data:", # Prefix to remove *from lines between markers*
140
+ to_json=False, # Content is text
141
+ start_marker="data: [ANSWER_START]", # Check against the raw line including prefix
142
+ end_marker="data: [ANSWER_DONE]", # Check against the raw line including prefix
143
+ skip_markers=["[RELATE_Q_START]", "[RELATE_Q_DONE]"], # Skip these if they appear within answer block
144
+ yield_raw_on_error=True
145
+ )
146
+
147
+ for content_chunk in processed_stream:
148
+ # content_chunk is the text between ANSWER_START and ANSWER_DONE
149
+ if content_chunk and isinstance(content_chunk, str):
150
+ streaming_text += content_chunk
151
+ yield dict(text=content_chunk) if not raw else content_chunk
170
152
 
171
153
  # Update history and previous answer after stream finishes
172
154
  self.previous_answer = streaming_text
@@ -4,7 +4,7 @@ import json
4
4
  import re
5
5
  from typing import Union, Any, Dict, Optional, Generator
6
6
 
7
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
10
  from webscout.litagent import LitAgent
@@ -97,6 +97,17 @@ class JadveOpenAI(Provider):
97
97
  )
98
98
  self.conversation.history_offset = history_offset
99
99
 
100
+ @staticmethod
101
+ def _jadve_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
102
+ """Extracts content from the Jadve stream format '0:"..."'."""
103
+ if isinstance(chunk, str):
104
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
105
+ if match:
106
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
107
+ content = match.group(1).encode().decode('unicode_escape')
108
+ return content.replace('\\\\', '\\').replace('\\"', '"')
109
+ return None
110
+
100
111
  def ask(
101
112
  self,
102
113
  prompt: str,
@@ -153,41 +164,22 @@ class JadveOpenAI(Provider):
153
164
  )
154
165
  response.raise_for_status() # Check for HTTP errors
155
166
 
156
- # Pattern to match the streaming chunks format: 0:"text"
157
- pattern = r'0:"(.*?)"'
158
- buffer = ""
159
-
160
- # Iterate over bytes and decode manually
161
- for line_bytes in response.iter_lines():
162
- if not line_bytes:
163
- continue
164
-
165
- try:
166
- line = line_bytes.decode('utf-8')
167
- buffer += line
168
-
169
- # Try to match chunks in the current buffer
170
- matches = re.findall(pattern, buffer)
171
- if matches:
172
- for chunk in matches:
173
- # Handle potential escape sequences like \\n
174
- decoded_chunk = chunk.encode().decode('unicode_escape')
175
- full_response_text += decoded_chunk
176
- resp = {"text": decoded_chunk}
177
- # Yield dict or raw string chunk
178
- yield resp if not raw else decoded_chunk
179
-
180
- # Remove matched parts from the buffer
181
- # Be careful with buffer modification during iteration if issues arise
182
- matched_parts = [f'0:"{match}"' for match in matches]
183
- for part in matched_parts:
184
- buffer = buffer.replace(part, '', 1)
185
-
186
- # Check if we've reached the end of the response
187
- if 'e:' in line or 'd:' in line:
188
- break
189
- except UnicodeDecodeError:
190
- continue # Ignore decoding errors for specific lines
167
+ # Use sanitize_stream
168
+ processed_stream = sanitize_stream(
169
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
170
+ intro_value=None, # No simple prefix
171
+ to_json=False, # Content is text after extraction
172
+ content_extractor=self._jadve_extractor, # Use the specific extractor
173
+ # end_marker="e:", # Add if 'e:' reliably marks the end
174
+ yield_raw_on_error=True
175
+ )
176
+
177
+ for content_chunk in processed_stream:
178
+ # content_chunk is the string extracted by _jadve_extractor
179
+ if content_chunk and isinstance(content_chunk, str):
180
+ full_response_text += content_chunk
181
+ resp = {"text": content_chunk}
182
+ yield resp if not raw else content_chunk
191
183
 
192
184
  # Update history after stream finishes
193
185
  self.last_response = {"text": full_response_text}
@@ -272,7 +264,8 @@ class JadveOpenAI(Provider):
272
264
  str: Extracted text.
273
265
  """
274
266
  assert isinstance(response, dict), "Response should be of dict data-type only"
275
- return response["text"]
267
+ # Extractor handles formatting
268
+ return response.get("text", "")
276
269
 
277
270
  if __name__ == "__main__":
278
271
  # Ensure curl_cffi is installed
@@ -7,8 +7,8 @@ import re
7
7
  import uuid
8
8
  from typing import Any, Dict, List, Optional, Union, Generator
9
9
 
10
- from webscout.AIutel import Conversation
11
- from webscout.AIbase import Provider
10
+ from webscout.AIutel import Conversation, sanitize_stream
11
+ from webscout.AIbase import Provider # Import sanitize_stream
12
12
  from webscout import exceptions
13
13
  from webscout.litagent import LitAgent
14
14
 
@@ -182,62 +182,21 @@ class LambdaChat(Provider):
182
182
  boundary += "".join(random.choice(boundary_chars) for _ in range(16))
183
183
  return boundary
184
184
 
185
- def process_response(self, response, prompt: str):
186
- """Process streaming response and extract content."""
187
- full_text = ""
188
- sources = None
185
+ @staticmethod
186
+ def _lambdachat_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
187
+ """Extracts content from LambdaChat stream JSON objects."""
188
+ if not isinstance(chunk, dict) or "type" not in chunk:
189
+ return None
190
+
189
191
  reasoning_text = ""
190
- has_reasoning = False
191
-
192
- for line in response.iter_lines(decode_unicode=True):
193
- if not line:
194
- continue
195
-
196
- try:
197
- # Parse each line as JSON
198
- data = json.loads(line)
199
-
200
- # Handle different response types
201
- if "type" not in data:
202
- continue
203
-
204
- if data["type"] == "stream" and "token" in data:
205
- token = data["token"].replace("\u0000", "")
206
- full_text += token
207
- resp = {"text": token}
208
- yield resp
209
- elif data["type"] == "finalAnswer":
210
- final_text = data.get("text", "")
211
- if final_text and not full_text:
212
- full_text = final_text
213
- resp = {"text": final_text}
214
- yield resp
215
- elif data["type"] == "webSearch" and "sources" in data:
216
- sources = data["sources"]
217
- elif data["type"] == "reasoning":
218
- has_reasoning = True
219
- if data.get("subtype") == "stream" and "token" in data:
220
- reasoning_text += data["token"]
221
-
222
- # If we have reasoning, prepend it to the next text output
223
- if reasoning_text and not full_text:
224
- resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
225
- yield resp
226
-
227
- except json.JSONDecodeError:
228
- continue
229
-
230
- # Update conversation history only for saving to file if needed
231
- if full_text and self.conversation.file:
232
- if has_reasoning:
233
- full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
234
- self.last_response = {"text": full_text_with_reasoning}
235
- self.conversation.update_chat_history(prompt, full_text_with_reasoning)
236
- else:
237
- self.last_response = {"text": full_text}
238
- self.conversation.update_chat_history(prompt, full_text)
239
-
240
- return full_text
192
+ if chunk["type"] == "stream" and "token" in chunk:
193
+ return chunk["token"].replace("\u0000", "")
194
+ elif chunk["type"] == "finalAnswer":
195
+ return chunk.get("text")
196
+ elif chunk["type"] == "reasoning" and chunk.get("subtype") == "stream" and "token" in chunk:
197
+ # Prepend reasoning with <think> tags? Or handle separately? For now, just return token.
198
+ return chunk["token"] # Or potentially format as f"<think>{chunk['token']}</think>"
199
+ return None
241
200
 
242
201
  def ask(
243
202
  self,
@@ -296,6 +255,7 @@ class LambdaChat(Provider):
296
255
  multipart_headers["Content-Length"] = str(len(body))
297
256
 
298
257
  def for_stream():
258
+ streaming_text = "" # Initialize for history
299
259
  try:
300
260
  # Try with multipart/form-data first
301
261
  response = None
@@ -327,8 +287,21 @@ class LambdaChat(Provider):
327
287
 
328
288
  response.raise_for_status() # Check status after potential fallback
329
289
 
330
- # Process the streaming response
331
- yield from self.process_response(response, prompt)
290
+ # Use sanitize_stream
291
+ processed_stream = sanitize_stream(
292
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
293
+ intro_value=None, # No prefix
294
+ to_json=True, # Stream sends JSON lines
295
+ content_extractor=self._lambdachat_extractor, # Use the specific extractor
296
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
297
+ )
298
+
299
+ for content_chunk in processed_stream:
300
+ # content_chunk is the string extracted by _lambdachat_extractor
301
+ if content_chunk and isinstance(content_chunk, str):
302
+ streaming_text += content_chunk # Aggregate text for history
303
+ resp = {"text": content_chunk}
304
+ yield resp if not raw else content_chunk
332
305
 
333
306
  except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch errors from both attempts
334
307
  # Handle specific exceptions if needed
@@ -353,6 +326,10 @@ class LambdaChat(Provider):
353
326
  # If we get here, all models failed
354
327
  raise exceptions.FailedToGenerateResponseError(f"Request failed after trying fallback: {str(e)}") from e
355
328
 
329
+ # Update history after stream finishes
330
+ if streaming_text and self.conversation.file:
331
+ self.last_response = {"text": streaming_text}
332
+ self.conversation.update_chat_history(prompt, streaming_text)
356
333
 
357
334
  def for_non_stream():
358
335
  # Aggregate the stream using the updated for_stream logic
@@ -8,7 +8,7 @@ from curl_cffi import CurlError
8
8
 
9
9
  from webscout.AIutel import Optimizers
10
10
  from webscout.AIutel import Conversation
11
- from webscout.AIutel import AwesomePrompts
11
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
12
12
  from webscout.AIbase import Provider
13
13
  from webscout import exceptions
14
14
  from webscout.litagent import LitAgent
@@ -146,11 +146,11 @@ class MCPCore(Provider):
146
146
  )
147
147
  return cookie_string, token
148
148
  except FileNotFoundError:
149
- raise exceptions.InvalidAuthenticationError(
149
+ raise exceptions.FailedToGenerateResponseError(
150
150
  f"Error: Cookies file not found at {self.cookies_path}!"
151
151
  )
152
152
  except json.JSONDecodeError:
153
- raise exceptions.InvalidAuthenticationError(
153
+ raise exceptions.FailedToGenerateResponseError(
154
154
  f"Error: Invalid JSON format in cookies file: {self.cookies_path}!"
155
155
  )
156
156
 
@@ -207,24 +207,21 @@ class MCPCore(Provider):
207
207
  )
208
208
  response.raise_for_status()
209
209
 
210
- for line_bytes in response.iter_lines():
211
- if line_bytes:
212
- try:
213
- line = line_bytes.decode('utf-8').strip()
214
- if line.startswith("data: "):
215
- json_str = line[6:]
216
- if json_str == "[DONE]":
217
- break
218
- json_data = json.loads(json_str)
219
- if 'choices' in json_data and len(json_data['choices']) > 0:
220
- delta = json_data['choices'][0].get('delta', {})
221
- content = delta.get('content')
222
- if content:
223
- streaming_text += content
224
- resp = dict(text=content)
225
- yield resp if not raw else content
226
- except (json.JSONDecodeError, UnicodeDecodeError):
227
- continue
210
+ # Use sanitize_stream
211
+ processed_stream = sanitize_stream(
212
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
213
+ intro_value="data:",
214
+ to_json=True, # Stream sends JSON
215
+ skip_markers=["[DONE]"],
216
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
217
+ yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
218
+ )
219
+
220
+ for content_chunk in processed_stream:
221
+ # content_chunk is the string extracted by the content_extractor
222
+ if content_chunk and isinstance(content_chunk, str):
223
+ streaming_text += content_chunk
224
+ yield dict(text=content_chunk) if not raw else content_chunk
228
225
 
229
226
  self.last_response = {"text": streaming_text}
230
227
  self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
@@ -5,7 +5,7 @@ from typing import Union, Any, Dict, Optional, Generator
5
5
 
6
6
  from webscout.AIutel import Optimizers
7
7
  from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
11
11
 
@@ -103,18 +103,18 @@ class Marcus(Provider):
103
103
  )
104
104
  response.raise_for_status() # Check for HTTP errors
105
105
 
106
- # Iterate over bytes and decode manually
107
- for line_bytes in response.iter_lines():
108
- if line_bytes:
109
- try:
110
- decoded_line = line_bytes.decode('utf-8')
111
- streaming_text += decoded_line # Aggregate text
112
- resp = {"text": decoded_line}
113
- # Yield dict or raw string chunk
114
- yield resp if not raw else decoded_line
115
- except UnicodeDecodeError:
116
- continue # Ignore decoding errors
106
+ # Use sanitize_stream to decode bytes and yield text chunks
107
+ processed_stream = sanitize_stream(
108
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
109
+ intro_value=None, # No prefix
110
+ to_json=False, # It's plain text
111
+ yield_raw_on_error=True
112
+ )
117
113
 
114
+ for content_chunk in processed_stream:
115
+ if content_chunk and isinstance(content_chunk, str):
116
+ streaming_text += content_chunk # Aggregate text
117
+ yield {"text": content_chunk} if not raw else content_chunk
118
118
  # Update history after stream finishes
119
119
  self.last_response = {"text": streaming_text} # Store aggregated text
120
120
  self.conversation.update_chat_history(
@@ -140,8 +140,17 @@ class Marcus(Provider):
140
140
  )
141
141
  response.raise_for_status() # Check for HTTP errors
142
142
 
143
- # Use response.text which is already decoded
144
- full_response = response.text
143
+ response_text_raw = response.text # Get raw text
144
+
145
+ # Process the text using sanitize_stream (even though it's not streaming)
146
+ processed_stream = sanitize_stream(
147
+ data=response_text_raw,
148
+ intro_value=None, # No prefix
149
+ to_json=False # It's plain text
150
+ )
151
+ # Aggregate the single result
152
+ full_response = "".join(list(processed_stream))
153
+
145
154
  self.last_response = {"text": full_response}
146
155
  self.conversation.update_chat_history(prompt, full_response)
147
156
  # Return dict or raw string
@@ -1,10 +1,5 @@
1
- import time
2
- import uuid
3
- import json
4
1
  from typing import Any, Dict, Optional, Generator, Union
5
- from dataclasses import dataclass, asdict
6
- from datetime import date
7
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
2
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
8
3
  from webscout.AIbase import Provider
9
4
  from webscout import exceptions
10
5
  from webscout.litagent import LitAgent
@@ -95,6 +90,15 @@ class Netwrck(Provider):
95
90
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
96
91
  )
97
92
 
93
+ @staticmethod
94
+ def _netwrck_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
95
+ """Removes surrounding quotes and handles potential escapes."""
96
+ if isinstance(chunk, str):
97
+ text = chunk.strip('"')
98
+ # Handle potential unicode escapes if they appear
99
+ # text = text.encode().decode('unicode_escape') # Uncomment if needed
100
+ return text
101
+ return None
98
102
  def ask(
99
103
  self,
100
104
  prompt: str,
@@ -136,21 +140,18 @@ class Netwrck(Provider):
136
140
  response.raise_for_status() # Check for HTTP errors
137
141
 
138
142
  streaming_text = ""
139
- # Iterate over bytes and decode manually
140
- for line_bytes in response.iter_lines():
141
- if line_bytes:
142
- try:
143
- decoded_line = line_bytes.decode('utf-8').strip('"')
144
- # Handle potential escape sequences if necessary
145
- # decoded_line = decoded_line.encode().decode('unicode_escape') # Uncomment if needed
146
- streaming_text += decoded_line
147
- resp = {"text": decoded_line}
148
- # Yield dict or raw string
149
- yield resp if not raw else decoded_line
150
- except UnicodeDecodeError:
151
- # Handle potential decoding errors if chunks split mid-character
152
- continue
153
-
143
+ # Use sanitize_stream
144
+ processed_stream = sanitize_stream(
145
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
146
+ intro_value=None, # No prefix
147
+ to_json=False, # It's text
148
+ content_extractor=self._netwrck_extractor, # Use the quote stripper
149
+ yield_raw_on_error=True
150
+ )
151
+ for content_chunk in processed_stream:
152
+ if content_chunk and isinstance(content_chunk, str):
153
+ streaming_text += content_chunk
154
+ yield {"text": content_chunk} if not raw else content_chunk
154
155
  # Update history after stream finishes
155
156
  self.last_response = {"text": streaming_text} # Store aggregated text
156
157
  self.conversation.update_chat_history(payload["query"], streaming_text)
@@ -174,11 +175,19 @@ class Netwrck(Provider):
174
175
  )
175
176
  response.raise_for_status() # Check for HTTP errors
176
177
 
177
- # Use response.text which is already decoded
178
- text = response.text.strip('"')
179
- # Handle potential escape sequences if necessary
180
- # text = text.encode().decode('unicode_escape') # Uncomment if needed
181
- self.last_response = {"text": text}
178
+ response_text_raw = response.text # Get raw text
179
+
180
+ # Process the text using sanitize_stream
181
+ processed_stream = sanitize_stream(
182
+ data=response_text_raw,
183
+ intro_value=None,
184
+ to_json=False,
185
+ content_extractor=self._netwrck_extractor
186
+ )
187
+ # Aggregate the single result
188
+ text = "".join(list(processed_stream))
189
+
190
+ self.last_response = {"text": text} # Store processed text
182
191
  self.conversation.update_chat_history(prompt, text)
183
192
 
184
193
  # Return dict or raw string
@@ -25,4 +25,4 @@ from .textpollinations import *
25
25
  from .e2b import *
26
26
  from .multichat import * # Add MultiChatAI
27
27
  from .ai4chat import * # Add AI4Chat
28
- from .mcpcore import *
28
+ from .mcpcore import *
@@ -35,6 +35,8 @@ MODEL_CONFIGS = {
35
35
  "gemini-2.0-flash-thinking-exp-01-21",
36
36
  "gemini-2.5-pro-exp-03-25",
37
37
  "gemini-2.0-pro-exp-02-05",
38
+ "gemini-2.5-flash-preview-04-17",
39
+
38
40
 
39
41
  ],
40
42
  },
@@ -83,6 +85,7 @@ MODEL_CONFIGS = {
83
85
  },
84
86
  }
85
87
 
88
+
86
89
  class Completions(BaseCompletions):
87
90
  def __init__(self, client: 'ExaChat'):
88
91
  self._client = client
@@ -292,6 +295,7 @@ class ExaChat(OpenAICompatibleProvider):
292
295
  "gemini-2.0-flash-thinking-exp-01-21",
293
296
  "gemini-2.5-pro-exp-03-25",
294
297
  "gemini-2.0-pro-exp-02-05",
298
+ "gemini-2.5-flash-preview-04-17",
295
299
 
296
300
  # OpenRouter Models
297
301
  "mistralai/mistral-small-3.1-24b-instruct:free",
@@ -324,15 +324,13 @@ class SciraChat(OpenAICompatibleProvider):
324
324
  """
325
325
 
326
326
  AVAILABLE_MODELS = {
327
- "scira-default": "Grok3",
328
- "scira-grok-3-mini": "Grok3-mini", # thinking model
327
+ "scira-default": "Grok3-mini", # thinking model
328
+ "scira-grok-3": "Grok3",
329
329
  "scira-vision" : "Grok2-Vision", # vision model
330
330
  "scira-4.1-mini": "GPT4.1-mini",
331
331
  "scira-qwq": "QWQ-32B",
332
332
  "scira-o4-mini": "o4-mini",
333
333
  "scira-google": "gemini 2.5 flash"
334
-
335
-
336
334
  }
337
335
 
338
336
  def __init__(
@@ -268,28 +268,26 @@ class TextPollinations(OpenAICompatibleProvider):
268
268
  """
269
269
 
270
270
  AVAILABLE_MODELS = [
271
- "openai", # OpenAI GPT-4.1-nano (Azure) - vision capable
272
- "openai-large", # OpenAI GPT-4.1 mini (Azure) - vision capable
273
- "openai-reasoning", # OpenAI o4-mini (Azure) - vision capable, reasoning
274
- "qwen-coder", # Qwen 2.5 Coder 32B (Scaleway)
275
- "llama", # Llama 3.3 70B (Cloudflare)
276
- "llamascout", # Llama 4 Scout 17B (Cloudflare)
277
- "mistral", # Mistral Small 3 (Scaleway) - vision capable
278
- "unity", # Unity Mistral Large (Scaleway) - vision capable, uncensored
279
- "midijourney", # Midijourney (Azure)
280
- "rtist", # Rtist (Azure)
281
- "searchgpt", # SearchGPT (Azure) - vision capable
282
- "evil", # Evil (Scaleway) - vision capable, uncensored
283
- "deepseek-reasoning", # DeepSeek-R1 Distill Qwen 32B (Cloudflare) - reasoning
284
- "deepseek-reasoning-large", # DeepSeek R1 - Llama 70B (Scaleway) - reasoning
285
- "phi", # Phi-4 Instruct (Cloudflare) - vision and audio capable
286
- "llama-vision", # Llama 3.2 11B Vision (Cloudflare) - vision capable
287
- "gemini", # gemini-2.5-flash-preview-04-17 (Azure) - vision and audio capable
288
- "hormoz", # Hormoz 8b (Modal)
289
- "hypnosis-tracy", # Hypnosis Tracy 7B (Azure) - audio capable
290
- "deepseek", # DeepSeek-V3 (DeepSeek)
291
- "sur", # Sur AI Assistant (Mistral) (Scaleway) - vision capable
292
- "openai-audio", # OpenAI GPT-4o-audio-preview (Azure) - vision and audio capable
271
+ "openai",
272
+ "openai-large",
273
+ "qwen-coder",
274
+ "llama",
275
+ "llamascout",
276
+ "mistral",
277
+ "unity",
278
+ "midijourney",
279
+ "rtist",
280
+ "searchgpt",
281
+ "evil",
282
+ "deepseek-reasoning",
283
+ "deepseek-reasoning-large",
284
+ "phi",
285
+ "llama-vision",
286
+ "hormoz",
287
+ "hypnosis-tracy",
288
+ "deepseek",
289
+ "sur",
290
+ "openai-audio",
293
291
  ]
294
292
 
295
293
  def __init__(
@@ -288,6 +288,7 @@ class Toolbaz(OpenAICompatibleProvider):
288
288
  "gemini-2.0-flash-thinking",
289
289
  "gemini-2.0-flash",
290
290
  "gemini-1.5-flash",
291
+ "o3-mini",
291
292
  "gpt-4o-latest",
292
293
  "gpt-4o",
293
294
  "deepseek-r1",