webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (117) hide show
  1. webscout/AIutel.py +367 -41
  2. webscout/Bard.py +2 -22
  3. webscout/Bing_search.py +1 -2
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/scira_search.py +24 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/Deepinfra.py +75 -57
  8. webscout/Provider/ExaChat.py +93 -63
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/HeckAI.py +85 -80
  14. webscout/Provider/Jadve.py +56 -50
  15. webscout/Provider/LambdaChat.py +39 -31
  16. webscout/Provider/MiniMax.py +207 -0
  17. webscout/Provider/Nemotron.py +41 -13
  18. webscout/Provider/Netwrck.py +39 -59
  19. webscout/Provider/OLLAMA.py +8 -9
  20. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  21. webscout/Provider/OPENAI/MiniMax.py +298 -0
  22. webscout/Provider/OPENAI/README.md +31 -30
  23. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  24. webscout/Provider/OPENAI/__init__.py +4 -2
  25. webscout/Provider/OPENAI/autoproxy.py +753 -18
  26. webscout/Provider/OPENAI/base.py +7 -76
  27. webscout/Provider/OPENAI/copilot.py +73 -26
  28. webscout/Provider/OPENAI/deepinfra.py +96 -132
  29. webscout/Provider/OPENAI/exachat.py +9 -5
  30. webscout/Provider/OPENAI/flowith.py +179 -166
  31. webscout/Provider/OPENAI/friendli.py +233 -0
  32. webscout/Provider/OPENAI/monochat.py +329 -0
  33. webscout/Provider/OPENAI/netwrck.py +4 -7
  34. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  35. webscout/Provider/OPENAI/qodo.py +630 -0
  36. webscout/Provider/OPENAI/scirachat.py +82 -49
  37. webscout/Provider/OPENAI/textpollinations.py +13 -12
  38. webscout/Provider/OPENAI/toolbaz.py +1 -0
  39. webscout/Provider/OPENAI/typegpt.py +4 -4
  40. webscout/Provider/OPENAI/utils.py +19 -42
  41. webscout/Provider/OPENAI/x0gpt.py +14 -2
  42. webscout/Provider/OpenGPT.py +54 -32
  43. webscout/Provider/PI.py +58 -84
  44. webscout/Provider/Qodo.py +454 -0
  45. webscout/Provider/StandardInput.py +32 -13
  46. webscout/Provider/TTI/README.md +9 -9
  47. webscout/Provider/TTI/__init__.py +2 -1
  48. webscout/Provider/TTI/aiarta.py +92 -78
  49. webscout/Provider/TTI/infip.py +212 -0
  50. webscout/Provider/TTI/monochat.py +220 -0
  51. webscout/Provider/TeachAnything.py +11 -3
  52. webscout/Provider/TextPollinationsAI.py +91 -82
  53. webscout/Provider/TogetherAI.py +32 -48
  54. webscout/Provider/Venice.py +37 -46
  55. webscout/Provider/VercelAI.py +27 -24
  56. webscout/Provider/WiseCat.py +35 -35
  57. webscout/Provider/WrDoChat.py +22 -26
  58. webscout/Provider/WritingMate.py +26 -22
  59. webscout/Provider/__init__.py +6 -6
  60. webscout/Provider/copilot.py +58 -61
  61. webscout/Provider/freeaichat.py +64 -55
  62. webscout/Provider/granite.py +48 -57
  63. webscout/Provider/koala.py +51 -39
  64. webscout/Provider/learnfastai.py +49 -64
  65. webscout/Provider/llmchat.py +79 -93
  66. webscout/Provider/llmchatco.py +63 -78
  67. webscout/Provider/monochat.py +275 -0
  68. webscout/Provider/multichat.py +51 -40
  69. webscout/Provider/oivscode.py +1 -1
  70. webscout/Provider/scira_chat.py +257 -104
  71. webscout/Provider/scnet.py +13 -13
  72. webscout/Provider/searchchat.py +13 -13
  73. webscout/Provider/sonus.py +12 -11
  74. webscout/Provider/toolbaz.py +25 -8
  75. webscout/Provider/turboseek.py +41 -42
  76. webscout/Provider/typefully.py +27 -12
  77. webscout/Provider/typegpt.py +43 -48
  78. webscout/Provider/uncovr.py +55 -90
  79. webscout/Provider/x0gpt.py +325 -299
  80. webscout/Provider/yep.py +79 -96
  81. webscout/__init__.py +7 -2
  82. webscout/auth/__init__.py +12 -1
  83. webscout/auth/providers.py +27 -5
  84. webscout/auth/routes.py +146 -105
  85. webscout/auth/server.py +367 -312
  86. webscout/client.py +121 -116
  87. webscout/litagent/Readme.md +68 -55
  88. webscout/litagent/agent.py +99 -9
  89. webscout/version.py +1 -1
  90. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
  91. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
  92. webscout/Provider/AI21.py +0 -177
  93. webscout/Provider/HuggingFaceChat.py +0 -469
  94. webscout/Provider/OPENAI/freeaichat.py +0 -363
  95. webscout/Provider/TTI/fastflux.py +0 -233
  96. webscout/Provider/Writecream.py +0 -246
  97. webscout/auth/static/favicon.svg +0 -11
  98. webscout/auth/swagger_ui.py +0 -203
  99. webscout/auth/templates/components/authentication.html +0 -237
  100. webscout/auth/templates/components/base.html +0 -103
  101. webscout/auth/templates/components/endpoints.html +0 -750
  102. webscout/auth/templates/components/examples.html +0 -491
  103. webscout/auth/templates/components/footer.html +0 -75
  104. webscout/auth/templates/components/header.html +0 -27
  105. webscout/auth/templates/components/models.html +0 -286
  106. webscout/auth/templates/components/navigation.html +0 -70
  107. webscout/auth/templates/static/api.js +0 -455
  108. webscout/auth/templates/static/icons.js +0 -168
  109. webscout/auth/templates/static/main.js +0 -784
  110. webscout/auth/templates/static/particles.js +0 -201
  111. webscout/auth/templates/static/styles.css +0 -3353
  112. webscout/auth/templates/static/ui.js +0 -374
  113. webscout/auth/templates/swagger_ui.html +0 -170
  114. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  115. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  116. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  117. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -136,65 +136,59 @@ class TurboSeek(Provider):
136
136
  }
137
137
 
138
138
  def for_stream():
139
- try: # Add try block for CurlError
140
- # Use curl_cffi session post with impersonate
139
+ try:
141
140
  response = self.session.post(
142
141
  self.chat_endpoint,
143
142
  json=payload,
144
143
  stream=True,
145
144
  timeout=self.timeout,
146
- impersonate="chrome120", # Try a different impersonation profile
145
+ impersonate="chrome120"
147
146
  )
148
147
  if not response.ok:
149
148
  raise exceptions.FailedToGenerateResponseError(
150
149
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
151
150
  )
152
-
153
151
  streaming_text = ""
154
- # Use sanitize_stream with the custom extractor
155
152
  processed_stream = sanitize_stream(
156
- data=response.iter_content(chunk_size=None), # Pass byte iterator
153
+ data=response.iter_content(chunk_size=None),
157
154
  intro_value="data:",
158
- to_json=True, # Stream sends JSON
159
- content_extractor=self._turboseek_extractor, # Use the specific extractor
160
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
155
+ to_json=True,
156
+ content_extractor=self._turboseek_extractor,
157
+ yield_raw_on_error=False,
158
+ raw=raw
161
159
  )
162
-
163
160
  for content_chunk in processed_stream:
164
- # content_chunk is the string extracted by _turboseek_extractor
165
- if content_chunk and isinstance(content_chunk, str):
166
- streaming_text += content_chunk
167
- self.last_response.update(dict(text=streaming_text)) # Update last_response incrementally
168
- yield dict(text=content_chunk) if not raw else content_chunk # Yield dict or raw string
169
-
170
- # Update conversation history after stream finishes
171
- if streaming_text: # Only update if content was received
161
+ if isinstance(content_chunk, bytes):
162
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
163
+ if content_chunk is None:
164
+ continue
165
+ if raw:
166
+ yield content_chunk
167
+ else:
168
+ if content_chunk and isinstance(content_chunk, str):
169
+ streaming_text += content_chunk
170
+ self.last_response.update(dict(text=streaming_text))
171
+ yield dict(text=content_chunk)
172
+ if streaming_text:
172
173
  self.conversation.update_chat_history(
173
- prompt, streaming_text # Use the fully aggregated text
174
+ prompt, streaming_text
174
175
  )
175
- except CurlError as e: # Catch CurlError
176
+ except CurlError as e:
176
177
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
177
- except Exception as e: # Catch other potential exceptions
178
+ except Exception as e:
178
179
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
179
-
180
-
181
180
  def for_non_stream():
182
- # Aggregate the stream using the updated for_stream logic
183
181
  full_text = ""
184
182
  try:
185
- # Ensure raw=False so for_stream yields dicts
186
183
  for chunk_data in for_stream():
187
184
  if isinstance(chunk_data, dict) and "text" in chunk_data:
188
185
  full_text += chunk_data["text"]
189
- elif isinstance(chunk_data, str): # Handle case where raw=True was passed
186
+ elif isinstance(chunk_data, str):
190
187
  full_text += chunk_data
191
188
  except Exception as e:
192
189
  raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {e}") from e
193
- # last_response and history are updated within for_stream
194
- # Ensure last_response reflects the complete aggregated text
195
- self.last_response = {"text": full_text}
190
+ self.last_response = {"text": full_text}
196
191
  return self.last_response
197
-
198
192
  return for_stream() if stream else for_non_stream()
199
193
 
200
194
  def chat(
@@ -203,6 +197,7 @@ class TurboSeek(Provider):
203
197
  stream: bool = False,
204
198
  optimizer: str = None,
205
199
  conversationally: bool = False,
200
+ raw: bool = False, # Added raw parameter
206
201
  ) -> str:
207
202
  """Generate response `str`
208
203
  Args:
@@ -216,20 +211,24 @@ class TurboSeek(Provider):
216
211
 
217
212
  def for_stream():
218
213
  for response in self.ask(
219
- prompt, True, optimizer=optimizer, conversationally=conversationally
214
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
220
215
  ):
221
- yield self.get_message(response)
222
-
216
+ if raw:
217
+ yield response
218
+ else:
219
+ yield self.get_message(response)
223
220
  def for_non_stream():
224
- return self.get_message(
225
- self.ask(
226
- prompt,
227
- False,
228
- optimizer=optimizer,
229
- conversationally=conversationally,
230
- )
221
+ result = self.ask(
222
+ prompt,
223
+ False,
224
+ raw=raw,
225
+ optimizer=optimizer,
226
+ conversationally=conversationally,
231
227
  )
232
-
228
+ if raw:
229
+ return result
230
+ else:
231
+ return self.get_message(result)
233
232
  return for_stream() if stream else for_non_stream()
234
233
 
235
234
  def get_message(self, response: dict) -> str:
@@ -251,7 +250,7 @@ if __name__ == '__main__':
251
250
  try: # Add try-except block for testing
252
251
  ai = TurboSeek(timeout=60)
253
252
  print("[bold blue]Testing Stream:[/bold blue]")
254
- response_stream = ai.chat("yooooooooooo", stream=True)
253
+ response_stream = ai.chat("yooooooooooo", stream=True, raw=False)
255
254
  for chunk in response_stream:
256
255
  print(chunk, end="", flush=True)
257
256
  # Optional: Test non-stream
@@ -126,11 +126,19 @@ class TypefullyAI(Provider):
126
126
  intro_value=None,
127
127
  to_json=False,
128
128
  content_extractor=self._typefully_extractor,
129
+ raw=raw
129
130
  )
130
131
  for content_chunk in processed_stream:
131
- if content_chunk and isinstance(content_chunk, str):
132
- streaming_text += content_chunk
133
- yield content_chunk if raw else dict(text=content_chunk)
132
+ if isinstance(content_chunk, bytes):
133
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
134
+ if content_chunk is None:
135
+ continue
136
+ if raw:
137
+ yield content_chunk
138
+ else:
139
+ if content_chunk and isinstance(content_chunk, str):
140
+ streaming_text += content_chunk
141
+ yield dict(text=content_chunk)
134
142
  self.last_response.update(dict(text=streaming_text))
135
143
  self.conversation.update_chat_history(
136
144
  prompt, self.get_message(self.last_response)
@@ -151,21 +159,28 @@ class TypefullyAI(Provider):
151
159
  stream: bool = False,
152
160
  optimizer: str = None,
153
161
  conversationally: bool = False,
162
+ raw: bool = False, # Added raw parameter
154
163
  ) -> str:
155
164
  def for_stream():
156
165
  for response in self.ask(
157
- prompt, True, optimizer=optimizer, conversationally=conversationally
166
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
158
167
  ):
159
- yield self.get_message(response)
168
+ if raw:
169
+ yield response
170
+ else:
171
+ yield self.get_message(response)
160
172
  def for_non_stream():
161
- return self.get_message(
162
- self.ask(
163
- prompt,
164
- False,
165
- optimizer=optimizer,
166
- conversationally=conversationally,
167
- )
173
+ result = self.ask(
174
+ prompt,
175
+ False,
176
+ raw=raw,
177
+ optimizer=optimizer,
178
+ conversationally=conversationally,
168
179
  )
180
+ if raw:
181
+ return result
182
+ else:
183
+ return self.get_message(result)
169
184
  return for_stream() if stream else for_non_stream()
170
185
 
171
186
  def get_message(self, response: dict) -> str:
@@ -17,10 +17,11 @@ class TypeGPT(Provider):
17
17
  AVAILABLE_MODELS = [
18
18
  # Working Models (based on testing)
19
19
  # "gpt-4o-mini-2024-07-18",
20
+ "gpt-4o-mini",
20
21
  "chatgpt-4o-latest",
21
- "deepseek-r1",
22
+ # "deepseek-r1",
22
23
  "deepseek-v3",
23
- "uncensored-r1",
24
+ # "uncensored-r1",
24
25
  # "Image-Generator",
25
26
  ]
26
27
 
@@ -106,7 +107,6 @@ class TypeGPT(Provider):
106
107
  raise exceptions.FailedToGenerateResponseError(
107
108
  f"Optimizer is not one of {self.__available_optimizers}"
108
109
  )
109
-
110
110
  payload = {
111
111
  "messages": [
112
112
  {"role": "system", "content": self.system_prompt},
@@ -120,10 +120,8 @@ class TypeGPT(Provider):
120
120
  "top_p": self.top_p,
121
121
  "max_tokens": self.max_tokens_to_sample,
122
122
  }
123
-
124
123
  def for_stream():
125
124
  try:
126
- # Use curl_cffi session post with impersonate
127
125
  response = self.session.post(
128
126
  self.api_endpoint,
129
127
  headers=self.headers,
@@ -136,36 +134,33 @@ class TypeGPT(Provider):
136
134
  raise exceptions.FailedToGenerateResponseError(
137
135
  f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
138
136
  ) from ce
139
-
140
137
  response.raise_for_status() # Check for HTTP errors first
141
-
142
138
  streaming_text = ""
143
- # Use sanitize_stream
144
139
  processed_stream = sanitize_stream(
145
140
  data=response.iter_content(chunk_size=None), # Pass byte iterator
146
141
  intro_value="data:",
147
142
  to_json=True, # Stream sends JSON
148
143
  skip_markers=["[DONE]"],
149
144
  content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
150
- yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
145
+ yield_raw_on_error=False,
146
+ raw=raw
151
147
  )
152
-
153
148
  for content_chunk in processed_stream:
154
- # content_chunk is the string extracted by the content_extractor
155
- if content_chunk and isinstance(content_chunk, str):
156
- streaming_text += content_chunk
157
- yield dict(text=content_chunk) if not raw else content_chunk
158
- # Update last_response incrementally
159
- self.last_response = dict(text=streaming_text)
160
-
161
- # Update conversation history after stream finishes
162
- if streaming_text: # Only update if something was received
149
+ if isinstance(content_chunk, bytes):
150
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
151
+ if content_chunk is None:
152
+ continue
153
+ if raw:
154
+ yield content_chunk
155
+ else:
156
+ if content_chunk and isinstance(content_chunk, str):
157
+ streaming_text += content_chunk
158
+ yield dict(text=content_chunk)
159
+ self.last_response = dict(text=streaming_text)
160
+ if streaming_text:
163
161
  self.conversation.update_chat_history(prompt, streaming_text)
164
-
165
-
166
162
  def for_non_stream():
167
163
  try:
168
- # Use curl_cffi session post with impersonate
169
164
  response = self.session.post(
170
165
  self.api_endpoint,
171
166
  headers=self.headers,
@@ -177,34 +172,32 @@ class TypeGPT(Provider):
177
172
  raise exceptions.FailedToGenerateResponseError(
178
173
  f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
179
174
  ) from ce
180
-
181
175
  response.raise_for_status() # Check for HTTP errors
182
-
183
176
  try:
184
177
  response_text = response.text # Get raw text
185
-
186
- # Use sanitize_stream for non-streaming JSON response
187
178
  processed_stream = sanitize_stream(
188
179
  data=response_text,
189
180
  to_json=True, # Parse the whole text as JSON
190
181
  intro_value=None,
191
- # Extractor for non-stream structure
192
182
  content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('message', {}).get('content') if isinstance(chunk, dict) else None,
193
- yield_raw_on_error=False
183
+ yield_raw_on_error=False,
184
+ raw=raw
194
185
  )
195
-
196
- # Extract the single result
197
186
  content = ""
198
187
  for extracted_content in processed_stream:
199
- content = extracted_content if isinstance(extracted_content, str) else ""
200
-
201
- self.last_response = {"text": content} # Store in expected format
188
+ if isinstance(extracted_content, bytes):
189
+ extracted_content = extracted_content.decode('utf-8', errors='ignore')
190
+ if extracted_content is None:
191
+ continue
192
+ if raw:
193
+ content += extracted_content
194
+ else:
195
+ content = extracted_content if isinstance(extracted_content, str) else ""
196
+ self.last_response = {"text": content}
202
197
  self.conversation.update_chat_history(prompt, content)
203
- return self.last_response
204
- except (json.JSONDecodeError, Exception) as je: # Catch potential JSON errors or others
198
+ return self.last_response if not raw else content
199
+ except (json.JSONDecodeError, Exception) as je:
205
200
  raise exceptions.FailedToGenerateResponseError(f"Failed to decode JSON response: {je} - Response text: {response.text}")
206
-
207
-
208
201
  return for_stream() if stream else for_non_stream()
209
202
 
210
203
  def chat(
@@ -213,25 +206,27 @@ class TypeGPT(Provider):
213
206
  stream: bool = False,
214
207
  optimizer: str = None,
215
208
  conversationally: bool = False,
209
+ raw: bool = False, # Added raw parameter
216
210
  ) -> Union[str, Generator[str, None, None]]:
217
- """Generate response string or stream."""
218
211
  if stream:
219
- # ask() yields dicts or strings when streaming
220
212
  gen = self.ask(
221
- prompt, stream=True, raw=False, # Ensure ask yields dicts
213
+ prompt, stream=True, raw=raw, # Ensure ask yields dicts or raw
222
214
  optimizer=optimizer, conversationally=conversationally
223
215
  )
224
- for chunk_dict in gen:
225
- # get_message expects a dict
226
- yield self.get_message(chunk_dict)
216
+ for chunk in gen:
217
+ if raw:
218
+ yield chunk
219
+ else:
220
+ yield self.get_message(chunk)
227
221
  else:
228
- # ask() returns a dict when not streaming
229
- response_dict = self.ask(
230
- prompt, stream=False,
222
+ response = self.ask(
223
+ prompt, stream=False, raw=raw,
231
224
  optimizer=optimizer, conversationally=conversationally
232
225
  )
233
- return self.get_message(response_dict)
234
-
226
+ if raw:
227
+ return response
228
+ else:
229
+ return self.get_message(response)
235
230
  def get_message(self, response: Dict[str, Any]) -> str:
236
231
  """Retrieves message from response."""
237
232
  if isinstance(response, dict):
@@ -165,8 +165,6 @@ class UncovrAI(Provider):
165
165
  )
166
166
  else:
167
167
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
168
-
169
- # Prepare the request payload
170
168
  payload = {
171
169
  "content": conversation_prompt,
172
170
  "chatId": self.chat_id,
@@ -180,78 +178,72 @@ class UncovrAI(Provider):
180
178
  "creativity": creativity
181
179
  }
182
180
  }
183
-
184
181
  def for_stream():
185
182
  try:
186
- # Use curl_cffi session post with impersonate
187
183
  response = self.session.post(
188
184
  self.url,
189
185
  json=payload,
190
186
  stream=True,
191
187
  timeout=self.timeout,
192
- impersonate=self.fingerprint.get("browser_type", "chrome110") # Use fingerprint browser type
188
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
193
189
  )
194
-
195
190
  if response.status_code != 200:
196
- # If we get a non-200 response, try refreshing our identity once
197
191
  if response.status_code in [403, 429]:
198
192
  self.refresh_identity()
199
- # Retry with new identity using curl_cffi session
200
193
  retry_response = self.session.post(
201
194
  self.url,
202
195
  json=payload,
203
196
  stream=True,
204
197
  timeout=self.timeout,
205
- impersonate=self.fingerprint.get("browser_type", "chrome110") # Use updated fingerprint
198
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
206
199
  )
207
200
  if not retry_response.ok:
208
201
  raise exceptions.FailedToGenerateResponseError(
209
202
  f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
210
203
  )
211
- response = retry_response # Use the successful retry response
204
+ response = retry_response
212
205
  else:
213
206
  raise exceptions.FailedToGenerateResponseError(
214
207
  f"Request failed with status code {response.status_code} - {response.text}"
215
208
  )
216
-
217
209
  streaming_text = ""
218
- # Use sanitize_stream with the custom extractor
219
210
  processed_stream = sanitize_stream(
220
- data=response.iter_content(chunk_size=None), # Pass byte iterator
221
- intro_value=None, # No simple prefix
222
- to_json=False, # Content is not JSON
223
- content_extractor=self._uncovr_extractor, # Use the specific extractor
224
- yield_raw_on_error=True # Keep yielding even if extractor fails, for potential error messages? (Adjust if needed)
211
+ data=response.iter_content(chunk_size=None),
212
+ intro_value=None,
213
+ to_json=False,
214
+ content_extractor=self._uncovr_extractor,
215
+ yield_raw_on_error=True,
216
+ raw=raw
225
217
  )
226
-
227
218
  for content_chunk in processed_stream:
228
- if content_chunk and isinstance(content_chunk, str):
229
- streaming_text += content_chunk
230
- yield dict(text=content_chunk) if not raw else content_chunk
231
-
219
+ # Always yield as string, even in raw mode
220
+ if isinstance(content_chunk, bytes):
221
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
222
+ if content_chunk is None:
223
+ continue # Ignore non-content lines
224
+ if raw:
225
+ yield content_chunk
226
+ else:
227
+ if content_chunk and isinstance(content_chunk, str):
228
+ streaming_text += content_chunk
229
+ yield dict(text=content_chunk)
232
230
  self.last_response = {"text": streaming_text}
233
231
  self.conversation.update_chat_history(prompt, streaming_text)
234
-
235
- except CurlError as e: # Catch CurlError
232
+ except CurlError as e:
236
233
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
237
- except Exception as e: # Catch other potential exceptions
234
+ except Exception as e:
238
235
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
239
-
240
-
241
236
  def for_non_stream():
242
237
  try:
243
- # Use curl_cffi session post with impersonate
244
238
  response = self.session.post(
245
239
  self.url,
246
240
  json=payload,
247
241
  timeout=self.timeout,
248
242
  impersonate=self.fingerprint.get("browser_type", "chrome110")
249
243
  )
250
-
251
244
  if response.status_code != 200:
252
245
  if response.status_code in [403, 429]:
253
246
  self.refresh_identity()
254
- # Retry with new identity using curl_cffi session
255
247
  response = self.session.post(
256
248
  self.url,
257
249
  json=payload,
@@ -266,36 +258,32 @@ class UncovrAI(Provider):
266
258
  raise exceptions.FailedToGenerateResponseError(
267
259
  f"Request failed with status code {response.status_code} - {response.text}"
268
260
  )
269
-
270
- response_text = response.text # Get the full response text
271
-
272
- # Use sanitize_stream to process the non-streaming text
273
- # It won't parse as JSON, but will apply the extractor line by line
261
+ response_text = response.text
274
262
  processed_stream = sanitize_stream(
275
- data=response_text.splitlines(), # Split into lines first
263
+ data=response_text.splitlines(),
276
264
  intro_value=None,
277
265
  to_json=False,
278
266
  content_extractor=self._uncovr_extractor,
279
- yield_raw_on_error=True
267
+ yield_raw_on_error=True,
268
+ raw=raw
280
269
  )
281
-
282
- # Aggregate the results from the generator
283
270
  full_response = ""
284
271
  for content in processed_stream:
285
- if content and isinstance(content, str):
272
+ if isinstance(content, bytes):
273
+ content = content.decode('utf-8', errors='ignore')
274
+ if content is None:
275
+ continue # Ignore non-content lines
276
+ if raw:
277
+ full_response += content
278
+ elif content and isinstance(content, str):
286
279
  full_response += content
287
-
288
- # Check if aggregation resulted in empty response (might indicate error not caught by extractor)
289
280
  self.last_response = {"text": full_response}
290
281
  self.conversation.update_chat_history(prompt, full_response)
291
- return {"text": full_response}
292
-
293
- except CurlError as e: # Catch CurlError
282
+ return {"text": full_response} if not raw else full_response
283
+ except CurlError as e:
294
284
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
295
- except Exception as e: # Catch other potential exceptions
285
+ except Exception as e:
296
286
  raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e}")
297
-
298
-
299
287
  return for_stream() if stream else for_non_stream()
300
288
 
301
289
  def chat(
@@ -307,23 +295,29 @@ class UncovrAI(Provider):
307
295
  temperature: int = 32,
308
296
  creativity: str = "medium",
309
297
  selected_focus: list = ["web"],
310
- selected_tools: list = []
298
+ selected_tools: list = [],
299
+ raw: bool = False, # Added raw parameter
311
300
  ) -> Union[str, Generator[str, None, None]]:
312
301
  def for_stream():
313
302
  for response in self.ask(
314
- prompt, True, optimizer=optimizer, conversationally=conversationally,
303
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally,
315
304
  temperature=temperature, creativity=creativity,
316
305
  selected_focus=selected_focus, selected_tools=selected_tools
317
306
  ):
318
- yield self.get_message(response)
307
+ if raw:
308
+ yield response
309
+ else:
310
+ yield self.get_message(response)
319
311
  def for_non_stream():
320
- return self.get_message(
321
- self.ask(
322
- prompt, False, optimizer=optimizer, conversationally=conversationally,
323
- temperature=temperature, creativity=creativity,
324
- selected_focus=selected_focus, selected_tools=selected_tools
325
- )
312
+ result = self.ask(
313
+ prompt, False, raw=raw, optimizer=optimizer, conversationally=conversationally,
314
+ temperature=temperature, creativity=creativity,
315
+ selected_focus=selected_focus, selected_tools=selected_tools
326
316
  )
317
+ if raw:
318
+ return result
319
+ else:
320
+ return self.get_message(result)
327
321
  return for_stream() if stream else for_non_stream()
328
322
 
329
323
  def get_message(self, response: dict) -> str:
@@ -333,36 +327,7 @@ class UncovrAI(Provider):
333
327
  return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement
334
328
 
335
329
  if __name__ == "__main__":
336
- # Ensure curl_cffi is installed
337
- print("-" * 80)
338
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
339
- print("-" * 80)
340
-
341
- for model in UncovrAI.AVAILABLE_MODELS:
342
- try:
343
- test_ai = UncovrAI(model=model, timeout=60)
344
- # Test non-stream first as stream logic depends on it
345
- response_non_stream = test_ai.chat("Say 'Hello' in one word", stream=False)
346
-
347
- if response_non_stream and len(response_non_stream.strip()) > 0:
348
- # Now test stream
349
- response_stream = test_ai.chat("Say 'Hi' in one word", stream=True)
350
- response_text = ""
351
- for chunk in response_stream:
352
- response_text += chunk
353
-
354
- if response_text and len(response_text.strip()) > 0:
355
- status = "✓"
356
- # Clean and truncate response
357
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
358
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
359
- else:
360
- status = "✗ (Stream)"
361
- display_text = "Empty or invalid stream response"
362
- else:
363
- status = "✗ (Non-Stream)"
364
- display_text = "Empty or invalid non-stream response"
365
-
366
- print(f"\r{model:<50} {status:<10} {display_text}")
367
- except Exception as e:
368
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
330
+ ai = UncovrAI()
331
+ response = ai.chat("who is pm of india?", raw=False, stream=True)
332
+ for chunk in response:
333
+ print(chunk, end='', flush=True)