webscout 8.3.3__py3-none-any.whl → 8.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (79) hide show
  1. webscout/AIutel.py +53 -800
  2. webscout/Bard.py +2 -22
  3. webscout/Provider/AISEARCH/__init__.py +11 -10
  4. webscout/Provider/AISEARCH/felo_search.py +7 -3
  5. webscout/Provider/AISEARCH/scira_search.py +26 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +53 -8
  7. webscout/Provider/Deepinfra.py +81 -57
  8. webscout/Provider/ExaChat.py +9 -5
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/LambdaChat.py +39 -31
  14. webscout/Provider/Netwrck.py +5 -8
  15. webscout/Provider/OLLAMA.py +8 -9
  16. webscout/Provider/OPENAI/README.md +1 -1
  17. webscout/Provider/OPENAI/TogetherAI.py +57 -48
  18. webscout/Provider/OPENAI/TwoAI.py +94 -1
  19. webscout/Provider/OPENAI/__init__.py +1 -3
  20. webscout/Provider/OPENAI/autoproxy.py +1 -1
  21. webscout/Provider/OPENAI/copilot.py +73 -26
  22. webscout/Provider/OPENAI/deepinfra.py +60 -24
  23. webscout/Provider/OPENAI/exachat.py +9 -5
  24. webscout/Provider/OPENAI/monochat.py +3 -3
  25. webscout/Provider/OPENAI/netwrck.py +4 -7
  26. webscout/Provider/OPENAI/qodo.py +630 -0
  27. webscout/Provider/OPENAI/scirachat.py +86 -49
  28. webscout/Provider/OPENAI/textpollinations.py +19 -14
  29. webscout/Provider/OPENAI/venice.py +1 -0
  30. webscout/Provider/Perplexitylabs.py +163 -147
  31. webscout/Provider/Qodo.py +478 -0
  32. webscout/Provider/TTI/__init__.py +1 -0
  33. webscout/Provider/TTI/monochat.py +3 -3
  34. webscout/Provider/TTI/together.py +7 -6
  35. webscout/Provider/TTI/venice.py +368 -0
  36. webscout/Provider/TextPollinationsAI.py +19 -14
  37. webscout/Provider/TogetherAI.py +57 -44
  38. webscout/Provider/TwoAI.py +96 -2
  39. webscout/Provider/TypliAI.py +33 -27
  40. webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
  41. webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
  42. webscout/Provider/Venice.py +1 -0
  43. webscout/Provider/WiseCat.py +18 -20
  44. webscout/Provider/__init__.py +4 -10
  45. webscout/Provider/copilot.py +58 -61
  46. webscout/Provider/freeaichat.py +64 -55
  47. webscout/Provider/monochat.py +275 -0
  48. webscout/Provider/scira_chat.py +115 -21
  49. webscout/Provider/toolbaz.py +5 -10
  50. webscout/Provider/typefully.py +1 -11
  51. webscout/Provider/x0gpt.py +325 -315
  52. webscout/__init__.py +4 -11
  53. webscout/auth/__init__.py +19 -4
  54. webscout/auth/api_key_manager.py +189 -189
  55. webscout/auth/auth_system.py +25 -40
  56. webscout/auth/config.py +105 -6
  57. webscout/auth/database.py +377 -22
  58. webscout/auth/models.py +185 -130
  59. webscout/auth/request_processing.py +175 -11
  60. webscout/auth/routes.py +119 -5
  61. webscout/auth/server.py +9 -2
  62. webscout/auth/simple_logger.py +236 -0
  63. webscout/sanitize.py +1074 -0
  64. webscout/version.py +1 -1
  65. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -150
  66. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/RECORD +70 -72
  67. webscout/Provider/AI21.py +0 -177
  68. webscout/Provider/HuggingFaceChat.py +0 -469
  69. webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
  70. webscout/Provider/OPENAI/freeaichat.py +0 -363
  71. webscout/Provider/OPENAI/typegpt.py +0 -368
  72. webscout/Provider/OPENAI/uncovrAI.py +0 -477
  73. webscout/Provider/WritingMate.py +0 -273
  74. webscout/Provider/typegpt.py +0 -284
  75. webscout/Provider/uncovr.py +0 -333
  76. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
  77. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
  78. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
  79. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
@@ -33,7 +33,7 @@ class TypliAI(Provider):
33
33
  >>> print(response)
34
34
  'I don't have access to real-time weather information...'
35
35
  """
36
- AVAILABLE_MODELS = ["free-no-sign-up-chatgpt"]
36
+ AVAILABLE_MODELS = ["gpt-4o-mini"]
37
37
 
38
38
  def __init__(
39
39
  self,
@@ -47,7 +47,7 @@ class TypliAI(Provider):
47
47
  history_offset: int = 10250,
48
48
  act: str = None,
49
49
  system_prompt: str = "You are a helpful assistant.",
50
- model: str = "free-no-sign-up-chatgpt"
50
+ model: str = "gpt-4o-mini"
51
51
  ):
52
52
  """
53
53
  Initializes the TypliAI API with given parameters.
@@ -119,16 +119,6 @@ class TypliAI(Provider):
119
119
  self.conversation.history_offset = history_offset
120
120
 
121
121
 
122
- @staticmethod
123
- def _typli_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
124
- """Extracts content from the Typli.ai stream format '0:"..."'."""
125
- if isinstance(chunk, str):
126
- match = re.search(r'0:"(.*?)"', chunk)
127
- if match:
128
- # Decode potential unicode escapes like \u00e9
129
- content = match.group(1).encode().decode('unicode_escape')
130
- return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
131
- return None
132
122
 
133
123
  def ask(
134
124
  self,
@@ -182,7 +172,7 @@ class TypliAI(Provider):
182
172
  ]
183
173
  }
184
174
  ],
185
- "slug": self.model
175
+ "slug": "free-no-sign-up-chatgpt"
186
176
  }
187
177
 
188
178
  def for_stream():
@@ -202,13 +192,21 @@ class TypliAI(Provider):
202
192
  raise exceptions.FailedToGenerateResponseError(error_msg)
203
193
 
204
194
  streaming_response = ""
205
- # Use sanitize_stream with the custom extractor
195
+ # Use sanitize_stream with extract_regexes
206
196
  processed_stream = sanitize_stream(
207
197
  data=response.iter_content(chunk_size=None), # Pass byte iterator
208
198
  intro_value=None, # No simple prefix like 'data:'
209
199
  to_json=False, # Content is extracted as string, not JSON object per line
210
- content_extractor=self._typli_extractor, # Use the specific extractor
211
- skip_markers=["f:{", "e:{", "d:{", "8:[", "2:["] # Skip metadata lines based on observed format
200
+ extract_regexes=[r'0:"(.*?)"'], # Extract content from '0:"..."' format
201
+ skip_regexes=[
202
+ r'^f:\{.*\}$', # Skip metadata lines starting with f:{
203
+ r'^e:\{.*\}$', # Skip metadata lines starting with e:{
204
+ r'^d:\{.*\}$', # Skip metadata lines starting with d:{
205
+ r'^8:\[.*\]$', # Skip metadata lines starting with 8:[
206
+ r'^2:\[.*\]$', # Skip metadata lines starting with 2:[
207
+ r'^\s*$' # Skip empty lines
208
+ ],
209
+ raw=raw # Pass the raw parameter to sanitize_stream
212
210
  )
213
211
 
214
212
  for content_chunk in processed_stream:
@@ -244,6 +242,7 @@ class TypliAI(Provider):
244
242
  self,
245
243
  prompt: str,
246
244
  stream: bool = False,
245
+ raw: bool = False,
247
246
  optimizer: str = None,
248
247
  conversationally: bool = False,
249
248
  ) -> Union[str, Generator[str, None, None]]:
@@ -253,6 +252,7 @@ class TypliAI(Provider):
253
252
  Args:
254
253
  prompt (str): The prompt to send to the API.
255
254
  stream (bool): Whether to stream the response.
255
+ raw (bool): Whether to return the raw response.
256
256
  optimizer (str): Optimizer to use for the prompt.
257
257
  conversationally (bool): Whether to generate the prompt conversationally.
258
258
 
@@ -262,19 +262,25 @@ class TypliAI(Provider):
262
262
 
263
263
  def for_stream():
264
264
  for response in self.ask(
265
- prompt, True, optimizer=optimizer, conversationally=conversationally
265
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
266
266
  ):
267
- yield self.get_message(response)
267
+ if raw:
268
+ yield response
269
+ else:
270
+ yield self.get_message(response)
268
271
 
269
272
  def for_non_stream():
270
- return self.get_message(
271
- self.ask(
272
- prompt,
273
- False,
274
- optimizer=optimizer,
275
- conversationally=conversationally,
276
- )
273
+ result = self.ask(
274
+ prompt,
275
+ False,
276
+ raw=raw,
277
+ optimizer=optimizer,
278
+ conversationally=conversationally,
277
279
  )
280
+ if raw:
281
+ return result
282
+ else:
283
+ return self.get_message(result)
278
284
 
279
285
  return for_stream() if stream else for_non_stream()
280
286
 
@@ -290,7 +296,7 @@ class TypliAI(Provider):
290
296
  """
291
297
  assert isinstance(response, dict), "Response should be of dict data-type only"
292
298
  # Ensure text exists before processing
293
- return response.get("text", "")
299
+ return response.get("text", "").replace('\\n', '\n').replace('\\n\\n', '\n\n')
294
300
 
295
301
 
296
302
 
@@ -298,7 +304,7 @@ if __name__ == "__main__":
298
304
  from rich import print
299
305
  try:
300
306
  ai = TypliAI(timeout=60)
301
- response = ai.chat("Write a short poem about AI", stream=True)
307
+ response = ai.chat("Write a short poem about AI", stream=True, raw=False)
302
308
  for chunk in response:
303
309
  print(chunk, end="", flush=True)
304
310
  except Exception as e:
@@ -0,0 +1,254 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Generator, Optional, Union
4
+
5
+ from webscout.AIbase import AISearch, SearchResponse
6
+ from webscout import exceptions
7
+ from webscout.litagent import LitAgent
8
+ from webscout.AIutel import sanitize_stream
9
+
10
+
11
+ class PERPLEXED(AISearch):
12
+ """A class to interact with the PERPLEXED stream search API.
13
+
14
+ PERPLEXED provides an AI-powered search interface that returns emotionally intelligent
15
+ responses based on web content. It supports both streaming and non-streaming responses.
16
+
17
+ Basic Usage:
18
+ >>> from webscout import PERPLEXED
19
+ >>> ai = PERPLEXED()
20
+ >>> # Non-streaming example
21
+ >>> response = ai.search("What is Python?")
22
+ >>> print(response)
23
+ Python is a high-level programming language...
24
+
25
+ >>> # Streaming example
26
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
27
+ ... print(chunk, end="", flush=True)
28
+ Artificial Intelligence is...
29
+
30
+ >>> # Raw response format
31
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
32
+ ... print(chunk)
33
+ {'text': 'Hello'}
34
+ {'text': ' there!'}
35
+
36
+ Args:
37
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
38
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
39
+
40
+ Attributes:
41
+ api_endpoint (str): The PERPLEXED API endpoint URL.
42
+ stream_chunk_size (int): Size of chunks when streaming responses.
43
+ timeout (int): Request timeout in seconds.
44
+ headers (dict): HTTP headers used in requests.
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ timeout: int = 30,
50
+ proxies: Optional[dict] = None,
51
+ ):
52
+ """Initialize the PERPLEXED API client.
53
+
54
+ Args:
55
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
56
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
57
+
58
+ Example:
59
+ >>> ai = PERPLEXED(timeout=60) # Longer timeout
60
+ >>> ai = PERPLEXED(proxies={'http': 'http://proxy.com:8080'}) # With proxy
61
+ """
62
+ self.session = requests.Session()
63
+ self.api_endpoint = "https://d21l5c617zttgr.cloudfront.net/stream_search"
64
+ self.stream_chunk_size = 64
65
+ self.timeout = timeout
66
+ self.last_response = {}
67
+ self.headers = {
68
+ "accept": "*/*",
69
+ "accept-encoding": "gzip, deflate, br, zstd",
70
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
71
+ "content-type": "application/json",
72
+ "dnt": "1",
73
+ "origin": "https://d37ozmhmvu2kcg.cloudfront.net",
74
+ "referer": "https://d37ozmhmvu2kcg.cloudfront.net/",
75
+ "sec-ch-ua": '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"',
76
+ "sec-ch-ua-mobile": "?0",
77
+ "sec-ch-ua-platform": '"Windows"',
78
+ "sec-fetch-dest": "empty",
79
+ "sec-fetch-mode": "cors",
80
+ "sec-fetch-site": "cross-site",
81
+ "sec-gpc": "1",
82
+ "user-agent": LitAgent().random()
83
+ }
84
+ self.session.headers.update(self.headers)
85
+ self.proxies = proxies
86
+
87
+ def search(
88
+ self,
89
+ prompt: str,
90
+ stream: bool = False,
91
+ raw: bool = False,
92
+ ) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse], None, None]]:
93
+ """Search using the PERPLEXED API and get AI-generated responses.
94
+
95
+ This method sends a search query to PERPLEXED and returns the AI-generated response.
96
+ It supports both streaming and non-streaming modes, as well as raw response format.
97
+
98
+ Args:
99
+ prompt (str): The search query or prompt to send to the API.
100
+ stream (bool, optional): If True, yields response chunks as they arrive.
101
+ If False, returns complete response. Defaults to False.
102
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
103
+ If False, returns SearchResponse objects that convert to text automatically.
104
+ Defaults to False.
105
+
106
+ Returns:
107
+ Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse], None, None]]:
108
+ - If stream=False: Returns complete response as SearchResponse object
109
+ - If stream=True: Yields response chunks as either Dict or SearchResponse objects
110
+
111
+ Raises:
112
+ APIConnectionError: If the API request fails
113
+
114
+ Examples:
115
+ Basic search:
116
+ >>> ai = PERPLEXED()
117
+ >>> response = ai.search("What is Python?")
118
+ >>> print(response)
119
+ Python is a programming language...
120
+
121
+ Streaming response:
122
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
123
+ ... print(chunk, end="")
124
+ Artificial Intelligence...
125
+
126
+ Raw response format:
127
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
128
+ ... print(chunk)
129
+ {'text': 'Hello'}
130
+ {'text': ' there!'}
131
+
132
+ Error handling:
133
+ >>> try:
134
+ ... response = ai.search("My question")
135
+ ... except exceptions.APIConnectionError as e:
136
+ ... print(f"API error: {e}")
137
+ """
138
+ payload = {
139
+ "user_prompt": prompt
140
+ }
141
+
142
+ def extract_answer_content(data):
143
+ """Extract answer content from PERPLEXED response."""
144
+ print(f"[DEBUG] extract_answer_content received: {type(data)}")
145
+ if isinstance(data, dict):
146
+ print(f"[DEBUG] Dict keys: {list(data.keys())}")
147
+ print(f"[DEBUG] success: {data.get('success')}")
148
+ print(f"[DEBUG] stage: {data.get('stage')}")
149
+ print(f"[DEBUG] answer present: {'answer' in data}")
150
+ answer_val = data.get('answer', 'NOT_FOUND')
151
+ print(f"[DEBUG] answer value: {repr(answer_val[:100] if isinstance(answer_val, str) and len(answer_val) > 100 else answer_val)}")
152
+
153
+ # Check if this is the final answer - answer field exists and is not empty
154
+ if data.get("success") and "answer" in data and data["answer"]:
155
+ print(f"[DEBUG] Returning answer content (length: {len(data['answer'])})")
156
+ return data["answer"]
157
+ # Check if this is a stage update with no answer yet
158
+ elif data.get("success") and data.get("stage"):
159
+ print(f"[DEBUG] Skipping stage update: {data.get('stage')}")
160
+ return None # Skip stage updates without answers
161
+ else:
162
+ print(f"[DEBUG] No matching condition, returning None")
163
+ else:
164
+ print(f"[DEBUG] Data is not dict, returning None")
165
+ return None
166
+
167
+ def for_stream():
168
+ try:
169
+ with self.session.post(
170
+ self.api_endpoint,
171
+ json=payload,
172
+ stream=True,
173
+ timeout=self.timeout,
174
+ proxies=self.proxies,
175
+ ) as response:
176
+ if not response.ok:
177
+ raise exceptions.APIConnectionError(
178
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
179
+ )
180
+
181
+ # Process the streaming response manually
182
+ full_response = ""
183
+ for line in response.iter_lines(decode_unicode=True):
184
+ if line:
185
+ full_response += line
186
+
187
+ # Split by the separator to get individual JSON chunks
188
+ chunks = full_response.split("[/PERPLEXED-SEPARATOR]")
189
+
190
+ for chunk_text in chunks:
191
+ if chunk_text.strip():
192
+ try:
193
+ # Parse the JSON chunk
194
+ chunk_data = json.loads(chunk_text.strip())
195
+
196
+ if raw:
197
+ # For raw mode, yield the entire JSON string
198
+ yield {"text": chunk_text.strip()}
199
+ else:
200
+ # For non-raw mode, extract the answer if available
201
+ answer_content = extract_answer_content(chunk_data)
202
+ if answer_content:
203
+ yield SearchResponse(answer_content)
204
+
205
+ except json.JSONDecodeError:
206
+ # Skip invalid JSON chunks
207
+ continue
208
+
209
+ except requests.exceptions.RequestException as e:
210
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
211
+
212
+ def for_non_stream():
213
+ if raw:
214
+ # For raw mode, yield each chunk as it comes
215
+ for chunk in for_stream():
216
+ yield chunk
217
+ else:
218
+ # For non-raw mode, accumulate all chunks and return final response
219
+ full_response = ""
220
+ for chunk in for_stream():
221
+ full_response += str(chunk)
222
+
223
+ if full_response:
224
+ self.last_response = SearchResponse(full_response)
225
+ else:
226
+ # Return empty response if no content was extracted
227
+ self.last_response = SearchResponse("")
228
+
229
+ return self.last_response
230
+
231
+ if stream:
232
+ return for_stream()
233
+ else:
234
+ # For non-streaming mode, we need to consume the generator and return the result
235
+ result = for_non_stream()
236
+ # If result is a generator (which it shouldn't be), consume it
237
+ if hasattr(result, '__iter__') and not isinstance(result, (str, bytes)):
238
+ try:
239
+ # This shouldn't happen with our current implementation, but just in case
240
+ return list(result)[0] if list(result) else SearchResponse("")
241
+ except:
242
+ return SearchResponse("")
243
+ return result
244
+
245
+
246
+ if __name__ == "__main__":
247
+ from rich import print
248
+ ai = PERPLEXED()
249
+
250
+ # Test with raw=False to see debug output
251
+ print("=== Testing with raw=False ===")
252
+ response = ai.search(input(">>> "), stream=False, raw=False)
253
+ print("Final response:", response)
254
+ print("Response type:", type(response))
@@ -49,11 +49,11 @@ def fetch_together_models():
49
49
  print(f"Model: {model_id}")
50
50
  print(f" Type: {model_type}")
51
51
  print(f" Context Length: {context_length}")
52
- if model.get("config"):
53
- config = model["config"]
54
- if config.get("stop"):
55
- print(f" Stop Tokens: {config['stop']}")
56
- print("-" * 40)
52
+ # if model.get("config"):
53
+ # config = model["config"]
54
+ # if config.get("stop"):
55
+ # print(f" Stop Tokens: {config['stop']}")
56
+ # print("-" * 40)
57
57
 
58
58
  print(f"\nSUMMARY:")
59
59
  print(f"Chat Models: {len(chat_models)}")
@@ -87,9 +87,4 @@ if __name__ == "__main__":
87
87
  result = fetch_together_models()
88
88
 
89
89
  if result:
90
- print(f"\n📊 Successfully fetched {len(result['all_models'])} models from Together.xyz")
91
-
92
- # Save to file
93
- with open("together_models.json", "w") as f:
94
- json.dump(result, f, indent=2)
95
- print("✅ Results saved to together_models.json")
90
+ print(f"\n📊 Successfully fetched {len(result['all_models'])} models from Together.xyz")
@@ -20,6 +20,7 @@ class Venice(Provider):
20
20
  AVAILABLE_MODELS = [
21
21
  "mistral-31-24b",
22
22
  "dolphin-3.0-mistral-24b",
23
+ "dolphin-3.0-mistral-24b-1dot1",
23
24
  "qwen2dot5-coder-32b",
24
25
  "deepseek-coder-v2-lite",
25
26
 
@@ -76,16 +76,6 @@ class WiseCat(Provider):
76
76
  )
77
77
  self.conversation.history_offset = history_offset
78
78
 
79
- @staticmethod
80
- def _wisecat_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
81
- """Extracts content from the WiseCat stream format '0:"..."'."""
82
- if isinstance(chunk, str):
83
- match = re.search(r'0:"(.*?)"', chunk)
84
- if match:
85
- # Decode potential unicode escapes like \u00e9
86
- content = match.group(1).encode().decode('unicode_escape')
87
- return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
88
- return None
89
79
 
90
80
  def ask(
91
81
  self,
@@ -138,19 +128,27 @@ class WiseCat(Provider):
138
128
  data=response.iter_content(chunk_size=None),
139
129
  intro_value=None,
140
130
  to_json=False,
141
- content_extractor=self._wisecat_extractor,
131
+ extract_regexes=[
132
+ r'0:"(.*?)"' # Extract content from 0:"..." format
133
+ ],
134
+ skip_regexes=[
135
+ r'\(\d+\.?\d*s\)', # Skip timing information like (0.3s), (1s), (0.5s)
136
+ r'\(\d+\.?\d*ms\)', # Skip millisecond timing like (300ms)
137
+ ],
142
138
  raw=raw
143
139
  )
144
140
  for content_chunk in processed_stream:
145
- # Always yield as string, even in raw mode
146
- if isinstance(content_chunk, bytes):
147
- content_chunk = content_chunk.decode('utf-8', errors='ignore')
148
- if raw:
149
- yield content_chunk
150
- else:
151
- if content_chunk and isinstance(content_chunk, str):
152
- streaming_text += content_chunk
153
- yield dict(text=content_chunk)
141
+ if content_chunk and isinstance(content_chunk, str):
142
+ # Content is already extracted by sanitize_stream
143
+ # Handle unicode escaping and quote unescaping
144
+ extracted_content = content_chunk.encode().decode('unicode_escape')
145
+ extracted_content = extracted_content.replace('\\\\', '\\').replace('\\"', '"')
146
+
147
+ if raw:
148
+ yield extracted_content
149
+ else:
150
+ streaming_text += extracted_content
151
+ yield dict(text=extracted_content)
154
152
  self.last_response.update(dict(text=streaming_text))
155
153
  self.conversation.update_chat_history(
156
154
  prompt, self.get_message(self.last_response)
@@ -24,7 +24,6 @@ from .yep import *
24
24
  from .Cloudflare import *
25
25
  from .turboseek import *
26
26
  from .TeachAnything import *
27
- from .AI21 import *
28
27
  from .x0gpt import *
29
28
  from .cerebras import *
30
29
  from .geminiapi import *
@@ -35,7 +34,6 @@ from .llmchatco import LLMChatCo # Add new LLMChat.co provider
35
34
  from .talkai import *
36
35
  from .llama3mitril import *
37
36
  from .Marcus import *
38
- from .typegpt import *
39
37
  from .multichat import *
40
38
  from .Jadve import *
41
39
  from .chatglm import *
@@ -52,11 +50,9 @@ from .AllenAI import *
52
50
  from .HeckAI import *
53
51
  from .TwoAI import *
54
52
  from .Venice import *
55
- from .HuggingFaceChat import *
56
53
  from .GithubChat import *
57
54
  from .copilot import *
58
55
  from .sonus import *
59
- from .uncovr import *
60
56
  from .LambdaChat import *
61
57
  from .ChatGPTClone import *
62
58
  from .VercelAI import *
@@ -70,7 +66,6 @@ from .scira_chat import *
70
66
  from .StandardInput import *
71
67
  from .toolbaz import Toolbaz
72
68
  from .scnet import SCNet
73
- from .WritingMate import WritingMate
74
69
  from .MCPCore import MCPCore
75
70
  from .TypliAI import TypliAI
76
71
  from .ChatSandbox import ChatSandbox
@@ -87,9 +82,13 @@ from .deepseek_assistant import DeepSeekAssistant
87
82
  from .GeminiProxy import GeminiProxy
88
83
  from .TogetherAI import TogetherAI
89
84
  from .MiniMax import MiniMax
85
+ from .Qodo import *
86
+ from .monochat import MonoChat
90
87
  __all__ = [
91
88
  'SCNet',
89
+ 'MonoChat',
92
90
  'MiniMax',
91
+ 'QodoAI',
93
92
  'GeminiProxy',
94
93
  'TogetherAI',
95
94
  'oivscode',
@@ -109,13 +108,11 @@ __all__ = [
109
108
  'Venice',
110
109
  'ExaAI',
111
110
  'Copilot',
112
- 'HuggingFaceChat',
113
111
  'TwoAI',
114
112
  'HeckAI',
115
113
  'AllenAI',
116
114
  'PerplexityLabs',
117
115
  'AkashGPT',
118
- 'WritingMate',
119
116
  'WiseCat',
120
117
  'IBMGranite',
121
118
  'QwenLM',
@@ -145,7 +142,6 @@ __all__ = [
145
142
  'Cloudflare',
146
143
  'TurboSeek',
147
144
  'TeachAnything',
148
- 'AI21',
149
145
  'X0GPT',
150
146
  'Cerebras',
151
147
  'GEMINIAPI',
@@ -160,7 +156,6 @@ __all__ = [
160
156
  'Talkai',
161
157
  'Llama3Mitril',
162
158
  'Marcus',
163
- 'TypeGPT',
164
159
  'Netwrck',
165
160
  'MultiChatAI',
166
161
  'JadveOpenAI',
@@ -168,7 +163,6 @@ __all__ = [
168
163
  'NousHermes',
169
164
  'FreeAIChat',
170
165
  'GithubChat',
171
- 'UncovrAI',
172
166
  'VercelAI',
173
167
  'ExaChat',
174
168
  'AskSteve',