webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (114) hide show
  1. webscout/AIutel.py +180 -78
  2. webscout/Bing_search.py +417 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/ExaChat.py +84 -58
  8. webscout/Provider/GeminiProxy.py +140 -0
  9. webscout/Provider/HeckAI.py +85 -80
  10. webscout/Provider/Jadve.py +56 -50
  11. webscout/Provider/MCPCore.py +78 -75
  12. webscout/Provider/MiniMax.py +207 -0
  13. webscout/Provider/Nemotron.py +41 -13
  14. webscout/Provider/Netwrck.py +34 -51
  15. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
  16. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  17. webscout/Provider/OPENAI/MiniMax.py +298 -0
  18. webscout/Provider/OPENAI/README.md +32 -29
  19. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  20. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  21. webscout/Provider/OPENAI/__init__.py +17 -1
  22. webscout/Provider/OPENAI/autoproxy.py +1067 -39
  23. webscout/Provider/OPENAI/base.py +17 -76
  24. webscout/Provider/OPENAI/deepinfra.py +42 -108
  25. webscout/Provider/OPENAI/e2b.py +0 -1
  26. webscout/Provider/OPENAI/flowith.py +179 -166
  27. webscout/Provider/OPENAI/friendli.py +233 -0
  28. webscout/Provider/OPENAI/mcpcore.py +109 -70
  29. webscout/Provider/OPENAI/monochat.py +329 -0
  30. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  31. webscout/Provider/OPENAI/scirachat.py +59 -51
  32. webscout/Provider/OPENAI/toolbaz.py +3 -9
  33. webscout/Provider/OPENAI/typegpt.py +1 -1
  34. webscout/Provider/OPENAI/utils.py +19 -42
  35. webscout/Provider/OPENAI/x0gpt.py +14 -2
  36. webscout/Provider/OPENAI/xenai.py +514 -0
  37. webscout/Provider/OPENAI/yep.py +8 -2
  38. webscout/Provider/OpenGPT.py +54 -32
  39. webscout/Provider/PI.py +58 -84
  40. webscout/Provider/StandardInput.py +32 -13
  41. webscout/Provider/TTI/README.md +9 -9
  42. webscout/Provider/TTI/__init__.py +3 -1
  43. webscout/Provider/TTI/aiarta.py +92 -78
  44. webscout/Provider/TTI/bing.py +231 -0
  45. webscout/Provider/TTI/infip.py +212 -0
  46. webscout/Provider/TTI/monochat.py +220 -0
  47. webscout/Provider/TTS/speechma.py +45 -39
  48. webscout/Provider/TeachAnything.py +11 -3
  49. webscout/Provider/TextPollinationsAI.py +78 -70
  50. webscout/Provider/TogetherAI.py +350 -0
  51. webscout/Provider/Venice.py +37 -46
  52. webscout/Provider/VercelAI.py +27 -24
  53. webscout/Provider/WiseCat.py +35 -35
  54. webscout/Provider/WrDoChat.py +22 -26
  55. webscout/Provider/WritingMate.py +26 -22
  56. webscout/Provider/XenAI.py +324 -0
  57. webscout/Provider/__init__.py +10 -5
  58. webscout/Provider/deepseek_assistant.py +378 -0
  59. webscout/Provider/granite.py +48 -57
  60. webscout/Provider/koala.py +51 -39
  61. webscout/Provider/learnfastai.py +49 -64
  62. webscout/Provider/llmchat.py +79 -93
  63. webscout/Provider/llmchatco.py +63 -78
  64. webscout/Provider/multichat.py +51 -40
  65. webscout/Provider/oivscode.py +1 -1
  66. webscout/Provider/scira_chat.py +159 -96
  67. webscout/Provider/scnet.py +13 -13
  68. webscout/Provider/searchchat.py +13 -13
  69. webscout/Provider/sonus.py +12 -11
  70. webscout/Provider/toolbaz.py +25 -8
  71. webscout/Provider/turboseek.py +41 -42
  72. webscout/Provider/typefully.py +27 -12
  73. webscout/Provider/typegpt.py +41 -46
  74. webscout/Provider/uncovr.py +55 -90
  75. webscout/Provider/x0gpt.py +33 -17
  76. webscout/Provider/yep.py +79 -96
  77. webscout/auth/__init__.py +55 -0
  78. webscout/auth/api_key_manager.py +189 -0
  79. webscout/auth/auth_system.py +100 -0
  80. webscout/auth/config.py +76 -0
  81. webscout/auth/database.py +400 -0
  82. webscout/auth/exceptions.py +67 -0
  83. webscout/auth/middleware.py +248 -0
  84. webscout/auth/models.py +130 -0
  85. webscout/auth/providers.py +279 -0
  86. webscout/auth/rate_limiter.py +254 -0
  87. webscout/auth/request_models.py +127 -0
  88. webscout/auth/request_processing.py +226 -0
  89. webscout/auth/routes.py +550 -0
  90. webscout/auth/schemas.py +103 -0
  91. webscout/auth/server.py +367 -0
  92. webscout/client.py +121 -70
  93. webscout/litagent/Readme.md +68 -55
  94. webscout/litagent/agent.py +99 -9
  95. webscout/scout/core/scout.py +104 -26
  96. webscout/scout/element.py +139 -18
  97. webscout/swiftcli/core/cli.py +14 -3
  98. webscout/swiftcli/decorators/output.py +59 -9
  99. webscout/update_checker.py +31 -49
  100. webscout/version.py +1 -1
  101. webscout/webscout_search.py +4 -12
  102. webscout/webscout_search_async.py +3 -10
  103. webscout/yep_search.py +2 -11
  104. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
  105. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
  106. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
  107. webscout/Provider/HF_space/__init__.py +0 -0
  108. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  109. webscout/Provider/OPENAI/api.py +0 -1320
  110. webscout/Provider/TTI/fastflux.py +0 -233
  111. webscout/Provider/Writecream.py +0 -246
  112. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  113. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  114. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -3,7 +3,7 @@ import re
3
3
  from typing import Optional, Union, Any, Dict, Generator
4
4
  from uuid import uuid4
5
5
 
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
7
7
  from webscout.AIbase import Provider
8
8
  from webscout import exceptions
9
9
 
@@ -79,7 +79,7 @@ class KOALA(Provider):
79
79
  raw: bool = False,
80
80
  optimizer: str = None,
81
81
  conversationally: bool = False,
82
- ) -> Dict[str, Any]:
82
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
83
83
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
84
84
  if optimizer:
85
85
  if hasattr(Optimizers, optimizer):
@@ -94,68 +94,80 @@ class KOALA(Provider):
94
94
  "outputHistory": [],
95
95
  "model": self.model
96
96
  }
97
- def for_stream():
98
- response = self.session.post(
99
- self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
97
+ response = self.session.post(
98
+ self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
99
+ )
100
+ if not response.ok:
101
+ raise exceptions.FailedToGenerateResponseError(
102
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
100
103
  )
101
- if not response.ok:
102
- raise exceptions.FailedToGenerateResponseError(
103
- f"Failed to generate response - ({response.status_code}, {response.reason})"
104
- )
104
+ # Use sanitize_stream with content_extractor and intro_value like YEPCHAT/X0GPT
105
+ processed_stream = sanitize_stream(
106
+ data=response.iter_lines(decode_unicode=True),
107
+ intro_value="data:",
108
+ to_json=False,
109
+ content_extractor=self._koala_extractor,
110
+ raw=raw
111
+ )
112
+ if stream:
105
113
  streaming_response = ""
106
- for line in response.iter_lines(decode_unicode=True):
107
- if not line:
108
- continue
109
- # Only process lines starting with data:
110
- if line.startswith("data:"):
111
- content = self._koala_extractor(line)
112
- if content and content.strip():
113
- streaming_response += content
114
- yield dict(text=content) if not raw else content
115
- # Only update chat history if response is not empty
114
+ for content_chunk in processed_stream:
115
+ if raw:
116
+ if content_chunk and isinstance(content_chunk, str) and content_chunk.strip():
117
+ streaming_response += content_chunk
118
+ yield content_chunk
119
+ else:
120
+ if content_chunk and isinstance(content_chunk, str) and content_chunk.strip():
121
+ streaming_response += content_chunk
122
+ yield dict(text=content_chunk)
116
123
  if streaming_response.strip():
117
124
  self.last_response = dict(text=streaming_response)
118
125
  self.conversation.update_chat_history(
119
126
  prompt, self.get_message(self.last_response)
120
127
  )
121
- def for_non_stream():
122
- # Use streaming logic to collect the full response
128
+ else:
123
129
  full_text = ""
124
- for chunk in for_stream():
125
- if isinstance(chunk, dict):
126
- full_text += chunk.get("text", "")
127
- elif isinstance(chunk, str):
128
- full_text += chunk
129
- # Only update chat history if response is not empty
130
+ for content_chunk in processed_stream:
131
+ if raw:
132
+ if content_chunk and isinstance(content_chunk, str):
133
+ full_text += content_chunk
134
+ else:
135
+ if content_chunk and isinstance(content_chunk, str):
136
+ full_text += content_chunk
130
137
  if full_text.strip():
131
138
  self.last_response = dict(text=full_text)
132
139
  self.conversation.update_chat_history(
133
140
  prompt, self.get_message(self.last_response)
134
141
  )
135
142
  return self.last_response
136
- return for_stream() if stream else for_non_stream()
137
143
 
138
144
  def chat(
139
145
  self,
140
146
  prompt: str,
141
147
  stream: bool = False,
148
+ raw: bool = False,
142
149
  optimizer: str = None,
143
150
  conversationally: bool = False,
144
151
  ) -> Union[str, Generator[str, None, None]]:
145
152
  def for_stream():
146
153
  for response in self.ask(
147
- prompt, True, optimizer=optimizer, conversationally=conversationally
154
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
148
155
  ):
149
- yield self.get_message(response)
156
+ if raw:
157
+ yield response
158
+ else:
159
+ yield self.get_message(response)
150
160
  def for_non_stream():
151
- return self.get_message(
152
- self.ask(
153
- prompt,
154
- False,
155
- optimizer=optimizer,
156
- conversationally=conversationally,
157
- )
161
+ result = self.ask(
162
+ prompt,
163
+ False,
164
+ raw=raw,
165
+ optimizer=optimizer,
166
+ conversationally=conversationally,
158
167
  )
168
+ if raw:
169
+ return result.get("text", "") if isinstance(result, dict) else str(result)
170
+ return self.get_message(result)
159
171
  return for_stream() if stream else for_non_stream()
160
172
 
161
173
  def get_message(self, response: dict) -> str:
@@ -165,6 +177,6 @@ class KOALA(Provider):
165
177
  if __name__ == "__main__":
166
178
  from rich import print
167
179
  ai = KOALA(timeout=60)
168
- response = ai.chat("Say 'Hello' in one word", stream=True)
180
+ response = ai.chat("tell me about humans", stream=True, raw=False)
169
181
  for chunk in response:
170
- print(chunk, end="", flush=True)
182
+ print(chunk)
@@ -153,7 +153,7 @@ class LearnFast(Provider):
153
153
  optimizer: str = None,
154
154
  conversationally: bool = False,
155
155
  image_path: Optional[str] = None,
156
- ) -> Union[dict, Generator[dict, None, None]]:
156
+ ) -> Union[dict, Generator[dict, None, None], str]:
157
157
  """Chat with LearnFast
158
158
 
159
159
  Args:
@@ -166,7 +166,7 @@ class LearnFast(Provider):
166
166
  Defaults to None.
167
167
 
168
168
  Returns:
169
- Union[dict, Generator[dict, None, None]]: Response generated
169
+ Union[dict, Generator[dict, None, None], str]: Response generated
170
170
  """
171
171
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
172
172
  if optimizer:
@@ -202,68 +202,59 @@ class LearnFast(Provider):
202
202
  data = json.dumps(payload)
203
203
 
204
204
  def for_stream():
205
- full_response = "" # Initialize outside try block
205
+ full_response = ""
206
206
  try:
207
- # Use curl_cffi session post with impersonate
208
207
  response = self.session.post(
209
208
  self.api_endpoint,
210
209
  headers=current_headers, # Use headers with uniqueid
211
210
  data=data,
212
211
  stream=True,
213
212
  timeout=self.timeout,
214
- # proxies are set on the session
215
- impersonate="chrome110" # Use a common impersonation profile
216
- )
217
- response.raise_for_status() # Check for HTTP errors
218
-
219
- # Use sanitize_stream
220
- processed_stream = sanitize_stream(
221
- data=response.iter_content(chunk_size=None), # Pass byte iterator
222
- intro_value=None, # No prefix
223
- to_json=True, # Stream sends JSON lines
224
- skip_markers=["[DONE]"],
225
- content_extractor=self._learnfast_extractor, # Use the specific extractor
226
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
213
+ impersonate="chrome110"
227
214
  )
215
+ response.raise_for_status()
228
216
 
229
- for content_chunk in processed_stream:
230
- # content_chunk is the string extracted by _learnfast_extractor
231
- if content_chunk and isinstance(content_chunk, str):
232
- full_response += content_chunk
233
- resp = {"text": content_chunk}
234
- yield resp if not raw else content_chunk
235
-
236
- # Update history after stream finishes
217
+ # Iterate over each line in the response
218
+ for line in response.iter_lines():
219
+ if not line:
220
+ continue
221
+ try:
222
+ chunk = json.loads(line)
223
+ except Exception:
224
+ continue
225
+ # Only yield message_type == "text"
226
+ data_field = chunk.get("data", {})
227
+ if (
228
+ chunk.get("code") == 200 and
229
+ data_field.get("code") == 200 and
230
+ data_field.get("message_type") == "text" and
231
+ data_field.get("message")
232
+ ):
233
+ message = data_field["message"]
234
+ full_response += message
235
+ if raw:
236
+ yield message
237
+ else:
238
+ yield {"text": message}
237
239
  self.last_response = {"text": full_response}
238
240
  self.conversation.update_chat_history(prompt, full_response)
239
-
240
- except CurlError as e: # Catch CurlError
241
+ except CurlError as e:
241
242
  raise exceptions.FailedToGenerateResponseError(f"An error occurred (CurlError): {e}") from e
242
- except Exception as e: # Catch other potential exceptions (like HTTPError)
243
+ except Exception as e:
243
244
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
244
245
  raise exceptions.FailedToGenerateResponseError(f"An error occurred ({type(e).__name__}): {e} - {err_text}") from e
245
-
246
246
  def for_non_stream():
247
- # Aggregate the stream using the updated for_stream logic
248
247
  full_response_text = ""
249
248
  try:
250
- # Ensure raw=False so for_stream yields dicts
251
249
  for chunk_data in for_stream():
252
- if isinstance(chunk_data, dict) and "text" in chunk_data:
250
+ if raw and isinstance(chunk_data, str):
251
+ full_response_text += chunk_data
252
+ elif isinstance(chunk_data, dict) and "text" in chunk_data:
253
253
  full_response_text += chunk_data["text"]
254
- # Handle raw string case if raw=True was passed
255
- elif raw and isinstance(chunk_data, str):
256
- full_response_text += chunk_data
257
254
  except Exception as e:
258
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
259
- if not full_response_text:
260
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
261
-
262
- # last_response and history are updated within for_stream
263
- # Return the final aggregated response dict or raw string
255
+ if not full_response_text:
256
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
264
257
  return full_response_text if raw else self.last_response
265
-
266
-
267
258
  return for_stream() if stream else for_non_stream()
268
259
 
269
260
  def chat(
@@ -273,36 +264,30 @@ class LearnFast(Provider):
273
264
  optimizer: str = None,
274
265
  conversationally: bool = False,
275
266
  image_path: Optional[str] = None,
267
+ raw: bool = False
276
268
  ) -> Union[str, Generator[str, None, None]]:
277
- """Generate response `str`
278
- Args:
279
- prompt (str): Prompt to be send.
280
- stream (bool, optional): Flag for streaming response. Defaults to False.
281
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
282
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
283
- image_path (Optional[str], optional): Path to the image to be uploaded.
284
- Defaults to None.
285
- Returns:
286
- Union[str, Generator[str, None, None]]: Response generated
287
- """
269
+ """Generate response `str` or stream, with raw support"""
288
270
  try:
289
- # ask() yields dicts or strings when streaming
290
271
  response_gen = self.ask(
291
- prompt, stream=stream, raw=False, # Ensure ask yields dicts/dict
272
+ prompt, stream=stream, raw=raw,
292
273
  optimizer=optimizer, conversationally=conversationally,
293
274
  image_path=image_path
294
275
  )
295
276
  if stream:
296
277
  def stream_wrapper():
297
- for chunk_dict in response_gen:
298
- yield self.get_message(chunk_dict) # get_message expects dict
278
+ for chunk in response_gen:
279
+ if raw:
280
+ yield chunk
281
+ else:
282
+ yield self.get_message(chunk)
299
283
  return stream_wrapper()
300
284
  else:
301
- # response_gen is the final dict in non-stream mode
302
- return self.get_message(response_gen) # get_message expects dict
285
+ if raw:
286
+ return response_gen if isinstance(response_gen, str) else self.get_message(response_gen)
287
+ else:
288
+ return self.get_message(response_gen)
303
289
  except Exception as e:
304
- # Return error message directly, consider raising instead for better error handling upstream
305
- return f"Error: {str(e)}"
290
+ return f"Error: {str(e)}"
306
291
 
307
292
  def get_message(self, response: dict) -> str:
308
293
  """Retrieves message only from response
@@ -320,6 +305,6 @@ if __name__ == "__main__":
320
305
  # Ensure curl_cffi is installed
321
306
  from rich import print
322
307
  ai = LearnFast()
323
- response = ai.chat(input(">>> "), stream=True)
308
+ response = ai.chat(input(">>> "), stream=True, raw=False)
324
309
  for chunk in response:
325
- print(chunk, end="", flush=True)
310
+ print(chunk, end='', flush=True)
@@ -3,7 +3,7 @@ from curl_cffi import CurlError
3
3
  import json
4
4
  from typing import Union, Any, Dict, Optional, Generator, List
5
5
 
6
- from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Optimizers, sanitize_stream
7
7
  from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
@@ -95,8 +95,8 @@ class LLMChat(Provider):
95
95
  raw: bool = False,
96
96
  optimizer: str = None,
97
97
  conversationally: bool = False,
98
- ) -> Union[Dict[str, Any], Generator[Any, None, None]]: # Corrected return type hint
99
- """Chat with LLMChat with logging capabilities"""
98
+ ) -> Union[Dict[str, Any], Generator[Any, None, None], str]:
99
+ """Chat with LLMChat with logging capabilities and raw output support using sanitize_stream."""
100
100
 
101
101
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
102
102
  if optimizer:
@@ -116,79 +116,59 @@ class LLMChat(Provider):
116
116
  {"role": "user", "content": conversation_prompt}
117
117
  ],
118
118
  "max_tokens": self.max_tokens_to_sample,
119
- "stream": True # API seems to always stream based on endpoint name
119
+ "stream": True
120
120
  }
121
121
 
122
122
  def for_stream():
123
- full_response = "" # Initialize outside try block
123
+ full_response = ""
124
124
  try:
125
- # Use curl_cffi session post with impersonate
126
125
  response = self.session.post(
127
126
  url,
128
127
  json=payload,
129
128
  stream=True,
130
129
  timeout=self.timeout,
131
- impersonate="chrome110" # Use a common impersonation profile
130
+ impersonate="chrome110"
132
131
  )
133
- response.raise_for_status() # Check for HTTP errors
134
-
135
- # Iterate over bytes and decode manually
136
- for line_bytes in response.iter_lines():
137
- if line_bytes:
138
- try:
139
- line = line_bytes.decode('utf-8')
140
- if line.startswith('data: '):
141
- data_str = line[6:]
142
- if data_str == '[DONE]':
143
- break
144
- try:
145
- data = json.loads(data_str)
146
- if data.get('response'):
147
- response_text = data['response']
148
- full_response += response_text
149
- resp = dict(text=response_text)
150
- # Yield dict or raw string chunk
151
- yield resp if not raw else response_text
152
- except json.JSONDecodeError:
153
- continue # Ignore invalid JSON data
154
- except UnicodeDecodeError:
155
- continue # Ignore decoding errors
156
-
157
- # Update history after stream finishes
132
+ response.raise_for_status()
133
+
134
+ # Use sanitize_stream to process SSE lines
135
+ processed_stream = sanitize_stream(
136
+ data=response.iter_lines(),
137
+ intro_value="data: ",
138
+ to_json=True,
139
+ skip_markers=["[DONE]"],
140
+ content_extractor=lambda chunk: chunk.get('response') if isinstance(chunk, dict) else None,
141
+ yield_raw_on_error=False,
142
+ raw=raw
143
+ )
144
+ for content_chunk in processed_stream:
145
+ if content_chunk and isinstance(content_chunk, str):
146
+ full_response += content_chunk
147
+ if raw:
148
+ yield content_chunk
149
+ else:
150
+ yield dict(text=content_chunk)
158
151
  self.last_response = dict(text=full_response)
159
152
  self.conversation.update_chat_history(
160
153
  prompt, full_response
161
154
  )
162
-
163
- except CurlError as e: # Catch CurlError
155
+ except CurlError as e:
164
156
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
165
- except Exception as e: # Catch other potential exceptions (like HTTPError)
157
+ except Exception as e:
166
158
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
167
159
  raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
168
-
169
160
  def for_non_stream():
170
- # Aggregate the stream using the updated for_stream logic
171
- full_response_text = ""
161
+ full_response = ""
172
162
  try:
173
- # Ensure raw=False so for_stream yields dicts
174
- for chunk_data in for_stream():
175
- if isinstance(chunk_data, dict) and "text" in chunk_data:
176
- full_response_text += chunk_data["text"]
177
- # Handle raw string case if raw=True was passed
178
- elif raw and isinstance(chunk_data, str):
179
- full_response_text += chunk_data
163
+ for content_chunk in for_stream():
164
+ if raw and isinstance(content_chunk, str):
165
+ full_response += content_chunk
166
+ elif isinstance(content_chunk, dict) and "text" in content_chunk:
167
+ full_response += content_chunk["text"]
180
168
  except Exception as e:
181
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
182
- if not full_response_text:
183
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
184
-
185
- # last_response and history are updated within for_stream
186
- # Return the final aggregated response dict or raw string
187
- return full_response_text if raw else self.last_response
188
-
189
-
190
- # Since the API endpoint suggests streaming, always call the stream generator.
191
- # The non-stream wrapper will handle aggregation if stream=False.
169
+ if not full_response:
170
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
171
+ return full_response if raw else self.last_response
192
172
  return for_stream() if stream else for_non_stream()
193
173
 
194
174
  def chat(
@@ -197,29 +177,31 @@ class LLMChat(Provider):
197
177
  stream: bool = False,
198
178
  optimizer: str = None,
199
179
  conversationally: bool = False,
180
+ raw: bool = False
200
181
  ) -> Union[str, Generator[str, None, None]]:
201
- """Generate response with logging capabilities"""
202
-
182
+ """Generate response with logging capabilities and raw output support"""
203
183
  def for_stream_chat():
204
- # ask() yields dicts or strings when streaming
205
184
  gen = self.ask(
206
- prompt, stream=True, raw=False, # Ensure ask yields dicts
185
+ prompt, stream=True, raw=raw,
207
186
  optimizer=optimizer, conversationally=conversationally
208
187
  )
209
- for response_dict in gen:
210
- yield self.get_message(response_dict) # get_message expects dict
211
-
188
+ for response in gen:
189
+ if raw:
190
+ yield response
191
+ else:
192
+ yield self.get_message(response)
212
193
  def for_non_stream_chat():
213
- # ask() returns dict or str when not streaming
214
194
  response_data = self.ask(
215
195
  prompt,
216
196
  stream=False,
217
- raw=False, # Ensure ask returns dict
197
+ raw=raw,
218
198
  optimizer=optimizer,
219
- conversationally=conversationally,
199
+ conversationally=conversationally
220
200
  )
221
- return self.get_message(response_data) # get_message expects dict
222
-
201
+ if raw:
202
+ return response_data if isinstance(response_data, str) else self.get_message(response_data)
203
+ else:
204
+ return self.get_message(response_data)
223
205
  return for_stream_chat() if stream else for_non_stream_chat()
224
206
 
225
207
  def get_message(self, response: Dict[str, Any]) -> str:
@@ -228,31 +210,35 @@ class LLMChat(Provider):
228
210
  return response["text"]
229
211
 
230
212
  if __name__ == "__main__":
231
- # Ensure curl_cffi is installed
232
- print("-" * 80)
233
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
234
- print("-" * 80)
213
+ # # Ensure curl_cffi is installed
214
+ # print("-" * 80)
215
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
216
+ # print("-" * 80)
235
217
 
236
- # Test all available models
237
- working = 0
238
- total = len(LLMChat.AVAILABLE_MODELS)
218
+ # # Test all available models
219
+ # working = 0
220
+ # total = len(LLMChat.AVAILABLE_MODELS)
239
221
 
240
- for model in LLMChat.AVAILABLE_MODELS:
241
- try:
242
- test_ai = LLMChat(model=model, timeout=60)
243
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
244
- response_text = ""
245
- for chunk in response:
246
- response_text += chunk
247
- print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
222
+ # for model in LLMChat.AVAILABLE_MODELS:
223
+ # try:
224
+ # test_ai = LLMChat(model=model, timeout=60)
225
+ # response = test_ai.chat("Say 'Hello' in one word", stream=True)
226
+ # response_text = ""
227
+ # for chunk in response:
228
+ # response_text += chunk
229
+ # print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
248
230
 
249
- if response_text and len(response_text.strip()) > 0:
250
- status = "✓"
251
- # Truncate response if too long
252
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
253
- else:
254
- status = "✗"
255
- display_text = "Empty or invalid response"
256
- print(f"\r{model:<50} {status:<10} {display_text}")
257
- except Exception as e:
258
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
231
+ # if response_text and len(response_text.strip()) > 0:
232
+ # status = "✓"
233
+ # # Truncate response if too long
234
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
235
+ # else:
236
+ # status = "✗"
237
+ # display_text = "Empty or invalid response"
238
+ # print(f"\r{model:<50} {status:<10} {display_text}")
239
+ # except Exception as e:
240
+ # print(f"\r{model:<50} {'✗':<10} {str(e)}")
241
+ ai = LLMChat(model="@cf/meta/llama-3.1-70b-instruct")
242
+ response = ai.chat("Say 'Hello' in one word", stream=True, raw=False)
243
+ for chunk in response:
244
+ print(chunk, end="", flush=True)