webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (94) hide show
  1. webscout/AIutel.py +146 -37
  2. webscout/Bing_search.py +1 -2
  3. webscout/Provider/AISEARCH/__init__.py +1 -0
  4. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  5. webscout/Provider/ExaChat.py +84 -58
  6. webscout/Provider/HeckAI.py +85 -80
  7. webscout/Provider/Jadve.py +56 -50
  8. webscout/Provider/MiniMax.py +207 -0
  9. webscout/Provider/Nemotron.py +41 -13
  10. webscout/Provider/Netwrck.py +34 -51
  11. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  12. webscout/Provider/OPENAI/MiniMax.py +298 -0
  13. webscout/Provider/OPENAI/README.md +30 -29
  14. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  15. webscout/Provider/OPENAI/__init__.py +3 -1
  16. webscout/Provider/OPENAI/autoproxy.py +752 -17
  17. webscout/Provider/OPENAI/base.py +7 -76
  18. webscout/Provider/OPENAI/deepinfra.py +42 -108
  19. webscout/Provider/OPENAI/flowith.py +179 -166
  20. webscout/Provider/OPENAI/friendli.py +233 -0
  21. webscout/Provider/OPENAI/monochat.py +329 -0
  22. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  23. webscout/Provider/OPENAI/toolbaz.py +1 -0
  24. webscout/Provider/OPENAI/typegpt.py +1 -1
  25. webscout/Provider/OPENAI/utils.py +19 -42
  26. webscout/Provider/OPENAI/x0gpt.py +14 -2
  27. webscout/Provider/OpenGPT.py +54 -32
  28. webscout/Provider/PI.py +58 -84
  29. webscout/Provider/StandardInput.py +32 -13
  30. webscout/Provider/TTI/README.md +9 -9
  31. webscout/Provider/TTI/__init__.py +2 -1
  32. webscout/Provider/TTI/aiarta.py +92 -78
  33. webscout/Provider/TTI/infip.py +212 -0
  34. webscout/Provider/TTI/monochat.py +220 -0
  35. webscout/Provider/TeachAnything.py +11 -3
  36. webscout/Provider/TextPollinationsAI.py +78 -70
  37. webscout/Provider/TogetherAI.py +32 -48
  38. webscout/Provider/Venice.py +37 -46
  39. webscout/Provider/VercelAI.py +27 -24
  40. webscout/Provider/WiseCat.py +35 -35
  41. webscout/Provider/WrDoChat.py +22 -26
  42. webscout/Provider/WritingMate.py +26 -22
  43. webscout/Provider/__init__.py +2 -2
  44. webscout/Provider/granite.py +48 -57
  45. webscout/Provider/koala.py +51 -39
  46. webscout/Provider/learnfastai.py +49 -64
  47. webscout/Provider/llmchat.py +79 -93
  48. webscout/Provider/llmchatco.py +63 -78
  49. webscout/Provider/multichat.py +51 -40
  50. webscout/Provider/oivscode.py +1 -1
  51. webscout/Provider/scira_chat.py +159 -96
  52. webscout/Provider/scnet.py +13 -13
  53. webscout/Provider/searchchat.py +13 -13
  54. webscout/Provider/sonus.py +12 -11
  55. webscout/Provider/toolbaz.py +25 -8
  56. webscout/Provider/turboseek.py +41 -42
  57. webscout/Provider/typefully.py +27 -12
  58. webscout/Provider/typegpt.py +41 -46
  59. webscout/Provider/uncovr.py +55 -90
  60. webscout/Provider/x0gpt.py +33 -17
  61. webscout/Provider/yep.py +79 -96
  62. webscout/auth/__init__.py +12 -1
  63. webscout/auth/providers.py +27 -5
  64. webscout/auth/routes.py +128 -104
  65. webscout/auth/server.py +367 -312
  66. webscout/client.py +121 -116
  67. webscout/litagent/Readme.md +68 -55
  68. webscout/litagent/agent.py +99 -9
  69. webscout/version.py +1 -1
  70. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
  71. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
  72. webscout/Provider/TTI/fastflux.py +0 -233
  73. webscout/Provider/Writecream.py +0 -246
  74. webscout/auth/static/favicon.svg +0 -11
  75. webscout/auth/swagger_ui.py +0 -203
  76. webscout/auth/templates/components/authentication.html +0 -237
  77. webscout/auth/templates/components/base.html +0 -103
  78. webscout/auth/templates/components/endpoints.html +0 -750
  79. webscout/auth/templates/components/examples.html +0 -491
  80. webscout/auth/templates/components/footer.html +0 -75
  81. webscout/auth/templates/components/header.html +0 -27
  82. webscout/auth/templates/components/models.html +0 -286
  83. webscout/auth/templates/components/navigation.html +0 -70
  84. webscout/auth/templates/static/api.js +0 -455
  85. webscout/auth/templates/static/icons.js +0 -168
  86. webscout/auth/templates/static/main.js +0 -784
  87. webscout/auth/templates/static/particles.js +0 -201
  88. webscout/auth/templates/static/styles.css +0 -3353
  89. webscout/auth/templates/static/ui.js +0 -374
  90. webscout/auth/templates/swagger_ui.html +0 -170
  91. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  92. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
  93. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  94. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -185,24 +185,24 @@ class SCNet(Provider):
185
185
  stream: bool = False,
186
186
  optimizer: Optional[str] = None,
187
187
  conversationally: bool = False,
188
+ raw: bool = False, # Added raw parameter
188
189
  ) -> Union[str, Generator[str, None, None]]:
189
190
  def for_stream_chat():
190
- # ask() yields dicts or strings when streaming
191
- gen = self.ask(
192
- prompt, stream=True, raw=False, # Ensure ask yields dicts
193
- optimizer=optimizer, conversationally=conversationally
194
- )
195
- for response_dict in gen:
196
- yield self.get_message(response_dict) # get_message expects dict
197
-
191
+ for response in self.ask(
192
+ prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
193
+ ):
194
+ if raw:
195
+ yield response
196
+ else:
197
+ yield self.get_message(response)
198
198
  def for_non_stream_chat():
199
- # ask() returns dict or str when not streaming
200
199
  response_data = self.ask(
201
- prompt, stream=False, raw=False, # Ensure ask returns dict
202
- optimizer=optimizer, conversationally=conversationally
200
+ prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
203
201
  )
204
- return self.get_message(response_data) # get_message expects dict
205
-
202
+ if raw:
203
+ return response_data if isinstance(response_data, str) else self.get_message(response_data)
204
+ else:
205
+ return self.get_message(response_data)
206
206
  return for_stream_chat() if stream else for_non_stream_chat()
207
207
 
208
208
  def get_message(self, response: dict) -> str:
@@ -232,6 +232,7 @@ class SearchChatAI(Provider):
232
232
  stream: bool = False,
233
233
  optimizer: str = None,
234
234
  conversationally: bool = False,
235
+ raw: bool = False, # Added raw parameter
235
236
  ) -> Union[str, Generator[str, None, None]]:
236
237
  """
237
238
  Chat with the API.
@@ -246,22 +247,21 @@ class SearchChatAI(Provider):
246
247
  Either a string response or a generator for streaming
247
248
  """
248
249
  def for_stream_chat():
249
- # ask() yields dicts or strings when streaming
250
- gen = self.ask(
251
- prompt, stream=True, raw=False, # Ensure ask yields dicts
252
- optimizer=optimizer, conversationally=conversationally
253
- )
254
- for response_dict in gen:
255
- yield self.get_message(response_dict) # get_message expects dict
256
-
250
+ for response in self.ask(
251
+ prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
252
+ ):
253
+ if raw:
254
+ yield response
255
+ else:
256
+ yield self.get_message(response)
257
257
  def for_non_stream_chat():
258
- # ask() returns dict or str when not streaming
259
258
  response_data = self.ask(
260
- prompt, stream=False, raw=False, # Ensure ask returns dict
261
- optimizer=optimizer, conversationally=conversationally
259
+ prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
262
260
  )
263
- return self.get_message(response_data) # get_message expects dict
264
-
261
+ if raw:
262
+ return response_data if isinstance(response_data, str) else self.get_message(response_data)
263
+ else:
264
+ return self.get_message(response_data)
265
265
  return for_stream_chat() if stream else for_non_stream_chat()
266
266
 
267
267
  def get_message(self, response: dict) -> str:
@@ -208,23 +208,24 @@ class SonusAI(Provider):
208
208
  optimizer: str = None,
209
209
  conversationally: bool = False,
210
210
  reasoning: bool = False,
211
+ raw: bool = False, # Added raw parameter
211
212
  ) -> Union[str, Generator[str, None, None]]:
212
213
  def for_stream_chat():
213
- # ask() yields dicts when raw=False
214
- for response_dict in self.ask(
215
- prompt, stream=True, raw=False, # Ensure ask yields dicts
216
- optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
214
+ for response in self.ask(
215
+ prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
217
216
  ):
218
- yield self.get_message(response_dict)
219
-
217
+ if raw:
218
+ yield response
219
+ else:
220
+ yield self.get_message(response)
220
221
  def for_non_stream_chat():
221
- # ask() returns dict or str when raw=False/True
222
222
  response_data = self.ask(
223
- prompt, stream=False, raw=False, # Ensure ask returns dict
224
- optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
223
+ prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
225
224
  )
226
- return self.get_message(response_data) # get_message expects dict
227
-
225
+ if raw:
226
+ return response_data if isinstance(response_data, str) else self.get_message(response_data)
227
+ else:
228
+ return self.get_message(response_data)
228
229
  return for_stream_chat() if stream else for_non_stream_chat()
229
230
 
230
231
  def get_message(self, response: dict) -> str:
@@ -24,6 +24,7 @@ class Toolbaz(Provider):
24
24
  AVAILABLE_MODELS = [
25
25
  "gemini-2.5-flash",
26
26
  "gemini-2.0-flash-thinking",
27
+ "sonar",
27
28
  "gemini-2.0-flash",
28
29
  "gemini-1.5-flash",
29
30
  "o3-mini",
@@ -223,14 +224,22 @@ class Toolbaz(Provider):
223
224
  intro_value=None, # No simple prefix
224
225
  to_json=False, # Content is text
225
226
  content_extractor=self._toolbaz_extractor, # Use the tag remover
226
- yield_raw_on_error=True # Yield even if extractor somehow fails (though unlikely for regex)
227
+ yield_raw_on_error=True, # Yield even if extractor somehow fails (though unlikely for regex)
228
+ raw=raw
227
229
  )
228
230
 
229
231
  for content_chunk in processed_stream:
230
232
  # content_chunk is the string with tags removed
231
- if content_chunk and isinstance(content_chunk, str):
232
- streaming_text += content_chunk
233
- yield {"text": content_chunk} if not raw else content_chunk
233
+ if isinstance(content_chunk, bytes):
234
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
235
+ if content_chunk is None:
236
+ continue
237
+ if raw:
238
+ yield content_chunk
239
+ else:
240
+ if content_chunk and isinstance(content_chunk, str):
241
+ streaming_text += content_chunk
242
+ yield {"text": content_chunk}
234
243
 
235
244
  self.last_response = {"text": streaming_text}
236
245
  self.conversation.update_chat_history(prompt, streaming_text)
@@ -274,28 +283,36 @@ class Toolbaz(Provider):
274
283
  stream: bool = False,
275
284
  optimizer: Optional[str] = None,
276
285
  conversationally: bool = False,
286
+ raw: bool = False, # Added raw parameter
277
287
  ) -> Union[str, Generator[str, None, None]]:
278
288
  """Generates a response from the Toolbaz API."""
279
289
  def for_stream_chat():
280
290
  # ask() yields dicts when raw=False
281
- for response_dict in self.ask(
291
+ for response in self.ask(
282
292
  prompt,
283
293
  stream=True,
284
- raw=False, # Ensure ask yields dicts
294
+ raw=raw,
285
295
  optimizer=optimizer,
286
296
  conversationally=conversationally
287
297
  ):
288
- yield self.get_message(response_dict)
298
+ if raw:
299
+ yield response
300
+ else:
301
+ yield self.get_message(response)
289
302
 
290
303
  def for_non_stream_chat():
291
304
  # ask() returns a dict when stream=False
292
305
  response_dict = self.ask(
293
306
  prompt,
294
307
  stream=False,
308
+ raw=raw,
295
309
  optimizer=optimizer,
296
310
  conversationally=conversationally,
297
311
  )
298
- return self.get_message(response_dict)
312
+ if raw:
313
+ return response_dict
314
+ else:
315
+ return self.get_message(response_dict)
299
316
 
300
317
  return for_stream_chat() if stream else for_non_stream_chat()
301
318
 
@@ -136,65 +136,59 @@ class TurboSeek(Provider):
136
136
  }
137
137
 
138
138
  def for_stream():
139
- try: # Add try block for CurlError
140
- # Use curl_cffi session post with impersonate
139
+ try:
141
140
  response = self.session.post(
142
141
  self.chat_endpoint,
143
142
  json=payload,
144
143
  stream=True,
145
144
  timeout=self.timeout,
146
- impersonate="chrome120", # Try a different impersonation profile
145
+ impersonate="chrome120"
147
146
  )
148
147
  if not response.ok:
149
148
  raise exceptions.FailedToGenerateResponseError(
150
149
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
151
150
  )
152
-
153
151
  streaming_text = ""
154
- # Use sanitize_stream with the custom extractor
155
152
  processed_stream = sanitize_stream(
156
- data=response.iter_content(chunk_size=None), # Pass byte iterator
153
+ data=response.iter_content(chunk_size=None),
157
154
  intro_value="data:",
158
- to_json=True, # Stream sends JSON
159
- content_extractor=self._turboseek_extractor, # Use the specific extractor
160
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
155
+ to_json=True,
156
+ content_extractor=self._turboseek_extractor,
157
+ yield_raw_on_error=False,
158
+ raw=raw
161
159
  )
162
-
163
160
  for content_chunk in processed_stream:
164
- # content_chunk is the string extracted by _turboseek_extractor
165
- if content_chunk and isinstance(content_chunk, str):
166
- streaming_text += content_chunk
167
- self.last_response.update(dict(text=streaming_text)) # Update last_response incrementally
168
- yield dict(text=content_chunk) if not raw else content_chunk # Yield dict or raw string
169
-
170
- # Update conversation history after stream finishes
171
- if streaming_text: # Only update if content was received
161
+ if isinstance(content_chunk, bytes):
162
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
163
+ if content_chunk is None:
164
+ continue
165
+ if raw:
166
+ yield content_chunk
167
+ else:
168
+ if content_chunk and isinstance(content_chunk, str):
169
+ streaming_text += content_chunk
170
+ self.last_response.update(dict(text=streaming_text))
171
+ yield dict(text=content_chunk)
172
+ if streaming_text:
172
173
  self.conversation.update_chat_history(
173
- prompt, streaming_text # Use the fully aggregated text
174
+ prompt, streaming_text
174
175
  )
175
- except CurlError as e: # Catch CurlError
176
+ except CurlError as e:
176
177
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
177
- except Exception as e: # Catch other potential exceptions
178
+ except Exception as e:
178
179
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
179
-
180
-
181
180
  def for_non_stream():
182
- # Aggregate the stream using the updated for_stream logic
183
181
  full_text = ""
184
182
  try:
185
- # Ensure raw=False so for_stream yields dicts
186
183
  for chunk_data in for_stream():
187
184
  if isinstance(chunk_data, dict) and "text" in chunk_data:
188
185
  full_text += chunk_data["text"]
189
- elif isinstance(chunk_data, str): # Handle case where raw=True was passed
186
+ elif isinstance(chunk_data, str):
190
187
  full_text += chunk_data
191
188
  except Exception as e:
192
189
  raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {e}") from e
193
- # last_response and history are updated within for_stream
194
- # Ensure last_response reflects the complete aggregated text
195
- self.last_response = {"text": full_text}
190
+ self.last_response = {"text": full_text}
196
191
  return self.last_response
197
-
198
192
  return for_stream() if stream else for_non_stream()
199
193
 
200
194
  def chat(
@@ -203,6 +197,7 @@ class TurboSeek(Provider):
203
197
  stream: bool = False,
204
198
  optimizer: str = None,
205
199
  conversationally: bool = False,
200
+ raw: bool = False, # Added raw parameter
206
201
  ) -> str:
207
202
  """Generate response `str`
208
203
  Args:
@@ -216,20 +211,24 @@ class TurboSeek(Provider):
216
211
 
217
212
  def for_stream():
218
213
  for response in self.ask(
219
- prompt, True, optimizer=optimizer, conversationally=conversationally
214
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
220
215
  ):
221
- yield self.get_message(response)
222
-
216
+ if raw:
217
+ yield response
218
+ else:
219
+ yield self.get_message(response)
223
220
  def for_non_stream():
224
- return self.get_message(
225
- self.ask(
226
- prompt,
227
- False,
228
- optimizer=optimizer,
229
- conversationally=conversationally,
230
- )
221
+ result = self.ask(
222
+ prompt,
223
+ False,
224
+ raw=raw,
225
+ optimizer=optimizer,
226
+ conversationally=conversationally,
231
227
  )
232
-
228
+ if raw:
229
+ return result
230
+ else:
231
+ return self.get_message(result)
233
232
  return for_stream() if stream else for_non_stream()
234
233
 
235
234
  def get_message(self, response: dict) -> str:
@@ -251,7 +250,7 @@ if __name__ == '__main__':
251
250
  try: # Add try-except block for testing
252
251
  ai = TurboSeek(timeout=60)
253
252
  print("[bold blue]Testing Stream:[/bold blue]")
254
- response_stream = ai.chat("yooooooooooo", stream=True)
253
+ response_stream = ai.chat("yooooooooooo", stream=True, raw=False)
255
254
  for chunk in response_stream:
256
255
  print(chunk, end="", flush=True)
257
256
  # Optional: Test non-stream
@@ -126,11 +126,19 @@ class TypefullyAI(Provider):
126
126
  intro_value=None,
127
127
  to_json=False,
128
128
  content_extractor=self._typefully_extractor,
129
+ raw=raw
129
130
  )
130
131
  for content_chunk in processed_stream:
131
- if content_chunk and isinstance(content_chunk, str):
132
- streaming_text += content_chunk
133
- yield content_chunk if raw else dict(text=content_chunk)
132
+ if isinstance(content_chunk, bytes):
133
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
134
+ if content_chunk is None:
135
+ continue
136
+ if raw:
137
+ yield content_chunk
138
+ else:
139
+ if content_chunk and isinstance(content_chunk, str):
140
+ streaming_text += content_chunk
141
+ yield dict(text=content_chunk)
134
142
  self.last_response.update(dict(text=streaming_text))
135
143
  self.conversation.update_chat_history(
136
144
  prompt, self.get_message(self.last_response)
@@ -151,21 +159,28 @@ class TypefullyAI(Provider):
151
159
  stream: bool = False,
152
160
  optimizer: str = None,
153
161
  conversationally: bool = False,
162
+ raw: bool = False, # Added raw parameter
154
163
  ) -> str:
155
164
  def for_stream():
156
165
  for response in self.ask(
157
- prompt, True, optimizer=optimizer, conversationally=conversationally
166
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
158
167
  ):
159
- yield self.get_message(response)
168
+ if raw:
169
+ yield response
170
+ else:
171
+ yield self.get_message(response)
160
172
  def for_non_stream():
161
- return self.get_message(
162
- self.ask(
163
- prompt,
164
- False,
165
- optimizer=optimizer,
166
- conversationally=conversationally,
167
- )
173
+ result = self.ask(
174
+ prompt,
175
+ False,
176
+ raw=raw,
177
+ optimizer=optimizer,
178
+ conversationally=conversationally,
168
179
  )
180
+ if raw:
181
+ return result
182
+ else:
183
+ return self.get_message(result)
169
184
  return for_stream() if stream else for_non_stream()
170
185
 
171
186
  def get_message(self, response: dict) -> str:
@@ -17,6 +17,7 @@ class TypeGPT(Provider):
17
17
  AVAILABLE_MODELS = [
18
18
  # Working Models (based on testing)
19
19
  # "gpt-4o-mini-2024-07-18",
20
+ "gpt-4o-mini",
20
21
  "chatgpt-4o-latest",
21
22
  "deepseek-r1",
22
23
  "deepseek-v3",
@@ -106,7 +107,6 @@ class TypeGPT(Provider):
106
107
  raise exceptions.FailedToGenerateResponseError(
107
108
  f"Optimizer is not one of {self.__available_optimizers}"
108
109
  )
109
-
110
110
  payload = {
111
111
  "messages": [
112
112
  {"role": "system", "content": self.system_prompt},
@@ -120,10 +120,8 @@ class TypeGPT(Provider):
120
120
  "top_p": self.top_p,
121
121
  "max_tokens": self.max_tokens_to_sample,
122
122
  }
123
-
124
123
  def for_stream():
125
124
  try:
126
- # Use curl_cffi session post with impersonate
127
125
  response = self.session.post(
128
126
  self.api_endpoint,
129
127
  headers=self.headers,
@@ -136,36 +134,33 @@ class TypeGPT(Provider):
136
134
  raise exceptions.FailedToGenerateResponseError(
137
135
  f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
138
136
  ) from ce
139
-
140
137
  response.raise_for_status() # Check for HTTP errors first
141
-
142
138
  streaming_text = ""
143
- # Use sanitize_stream
144
139
  processed_stream = sanitize_stream(
145
140
  data=response.iter_content(chunk_size=None), # Pass byte iterator
146
141
  intro_value="data:",
147
142
  to_json=True, # Stream sends JSON
148
143
  skip_markers=["[DONE]"],
149
144
  content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
150
- yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
145
+ yield_raw_on_error=False,
146
+ raw=raw
151
147
  )
152
-
153
148
  for content_chunk in processed_stream:
154
- # content_chunk is the string extracted by the content_extractor
155
- if content_chunk and isinstance(content_chunk, str):
156
- streaming_text += content_chunk
157
- yield dict(text=content_chunk) if not raw else content_chunk
158
- # Update last_response incrementally
159
- self.last_response = dict(text=streaming_text)
160
-
161
- # Update conversation history after stream finishes
162
- if streaming_text: # Only update if something was received
149
+ if isinstance(content_chunk, bytes):
150
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
151
+ if content_chunk is None:
152
+ continue
153
+ if raw:
154
+ yield content_chunk
155
+ else:
156
+ if content_chunk and isinstance(content_chunk, str):
157
+ streaming_text += content_chunk
158
+ yield dict(text=content_chunk)
159
+ self.last_response = dict(text=streaming_text)
160
+ if streaming_text:
163
161
  self.conversation.update_chat_history(prompt, streaming_text)
164
-
165
-
166
162
  def for_non_stream():
167
163
  try:
168
- # Use curl_cffi session post with impersonate
169
164
  response = self.session.post(
170
165
  self.api_endpoint,
171
166
  headers=self.headers,
@@ -177,34 +172,32 @@ class TypeGPT(Provider):
177
172
  raise exceptions.FailedToGenerateResponseError(
178
173
  f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
179
174
  ) from ce
180
-
181
175
  response.raise_for_status() # Check for HTTP errors
182
-
183
176
  try:
184
177
  response_text = response.text # Get raw text
185
-
186
- # Use sanitize_stream for non-streaming JSON response
187
178
  processed_stream = sanitize_stream(
188
179
  data=response_text,
189
180
  to_json=True, # Parse the whole text as JSON
190
181
  intro_value=None,
191
- # Extractor for non-stream structure
192
182
  content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('message', {}).get('content') if isinstance(chunk, dict) else None,
193
- yield_raw_on_error=False
183
+ yield_raw_on_error=False,
184
+ raw=raw
194
185
  )
195
-
196
- # Extract the single result
197
186
  content = ""
198
187
  for extracted_content in processed_stream:
199
- content = extracted_content if isinstance(extracted_content, str) else ""
200
-
201
- self.last_response = {"text": content} # Store in expected format
188
+ if isinstance(extracted_content, bytes):
189
+ extracted_content = extracted_content.decode('utf-8', errors='ignore')
190
+ if extracted_content is None:
191
+ continue
192
+ if raw:
193
+ content += extracted_content
194
+ else:
195
+ content = extracted_content if isinstance(extracted_content, str) else ""
196
+ self.last_response = {"text": content}
202
197
  self.conversation.update_chat_history(prompt, content)
203
- return self.last_response
204
- except (json.JSONDecodeError, Exception) as je: # Catch potential JSON errors or others
198
+ return self.last_response if not raw else content
199
+ except (json.JSONDecodeError, Exception) as je:
205
200
  raise exceptions.FailedToGenerateResponseError(f"Failed to decode JSON response: {je} - Response text: {response.text}")
206
-
207
-
208
201
  return for_stream() if stream else for_non_stream()
209
202
 
210
203
  def chat(
@@ -213,25 +206,27 @@ class TypeGPT(Provider):
213
206
  stream: bool = False,
214
207
  optimizer: str = None,
215
208
  conversationally: bool = False,
209
+ raw: bool = False, # Added raw parameter
216
210
  ) -> Union[str, Generator[str, None, None]]:
217
- """Generate response string or stream."""
218
211
  if stream:
219
- # ask() yields dicts or strings when streaming
220
212
  gen = self.ask(
221
- prompt, stream=True, raw=False, # Ensure ask yields dicts
213
+ prompt, stream=True, raw=raw, # Ensure ask yields dicts or raw
222
214
  optimizer=optimizer, conversationally=conversationally
223
215
  )
224
- for chunk_dict in gen:
225
- # get_message expects a dict
226
- yield self.get_message(chunk_dict)
216
+ for chunk in gen:
217
+ if raw:
218
+ yield chunk
219
+ else:
220
+ yield self.get_message(chunk)
227
221
  else:
228
- # ask() returns a dict when not streaming
229
- response_dict = self.ask(
230
- prompt, stream=False,
222
+ response = self.ask(
223
+ prompt, stream=False, raw=raw,
231
224
  optimizer=optimizer, conversationally=conversationally
232
225
  )
233
- return self.get_message(response_dict)
234
-
226
+ if raw:
227
+ return response
228
+ else:
229
+ return self.get_message(response)
235
230
  def get_message(self, response: Dict[str, Any]) -> str:
236
231
  """Retrieves message from response."""
237
232
  if isinstance(response, dict):