webscout 8.2.4__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (80) hide show
  1. webscout/Extra/gguf.py +2 -0
  2. webscout/Provider/AISEARCH/scira_search.py +2 -5
  3. webscout/Provider/Aitopia.py +75 -51
  4. webscout/Provider/AllenAI.py +64 -67
  5. webscout/Provider/ChatGPTClone.py +33 -34
  6. webscout/Provider/ChatSandbox.py +342 -0
  7. webscout/Provider/Cloudflare.py +79 -32
  8. webscout/Provider/Deepinfra.py +69 -56
  9. webscout/Provider/ElectronHub.py +48 -39
  10. webscout/Provider/ExaChat.py +36 -20
  11. webscout/Provider/GPTWeb.py +24 -18
  12. webscout/Provider/GithubChat.py +52 -49
  13. webscout/Provider/GizAI.py +283 -0
  14. webscout/Provider/Glider.py +39 -28
  15. webscout/Provider/Groq.py +48 -20
  16. webscout/Provider/HeckAI.py +18 -36
  17. webscout/Provider/Jadve.py +30 -37
  18. webscout/Provider/LambdaChat.py +36 -59
  19. webscout/Provider/MCPCore.py +18 -21
  20. webscout/Provider/Marcus.py +23 -14
  21. webscout/Provider/Netwrck.py +35 -26
  22. webscout/Provider/OPENAI/__init__.py +1 -1
  23. webscout/Provider/OPENAI/exachat.py +4 -0
  24. webscout/Provider/OPENAI/scirachat.py +2 -4
  25. webscout/Provider/OPENAI/textpollinations.py +20 -22
  26. webscout/Provider/OPENAI/toolbaz.py +1 -0
  27. webscout/Provider/PI.py +22 -13
  28. webscout/Provider/StandardInput.py +42 -30
  29. webscout/Provider/TeachAnything.py +16 -7
  30. webscout/Provider/TextPollinationsAI.py +78 -76
  31. webscout/Provider/TwoAI.py +120 -88
  32. webscout/Provider/TypliAI.py +305 -0
  33. webscout/Provider/Venice.py +24 -22
  34. webscout/Provider/VercelAI.py +31 -12
  35. webscout/Provider/__init__.py +7 -7
  36. webscout/Provider/asksteve.py +53 -44
  37. webscout/Provider/cerebras.py +77 -31
  38. webscout/Provider/chatglm.py +47 -37
  39. webscout/Provider/elmo.py +38 -32
  40. webscout/Provider/granite.py +24 -21
  41. webscout/Provider/hermes.py +27 -20
  42. webscout/Provider/learnfastai.py +25 -20
  43. webscout/Provider/llmchatco.py +48 -78
  44. webscout/Provider/multichat.py +13 -3
  45. webscout/Provider/scira_chat.py +49 -30
  46. webscout/Provider/scnet.py +23 -20
  47. webscout/Provider/searchchat.py +16 -24
  48. webscout/Provider/sonus.py +37 -39
  49. webscout/Provider/toolbaz.py +24 -46
  50. webscout/Provider/turboseek.py +37 -41
  51. webscout/Provider/typefully.py +30 -22
  52. webscout/Provider/typegpt.py +47 -51
  53. webscout/Provider/uncovr.py +46 -40
  54. webscout/cli.py +256 -0
  55. webscout/conversation.py +0 -2
  56. webscout/exceptions.py +3 -0
  57. webscout/version.py +1 -1
  58. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/METADATA +166 -45
  59. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/RECORD +63 -76
  60. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  61. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  62. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  63. inferno/__init__.py +0 -6
  64. inferno/__main__.py +0 -9
  65. inferno/cli.py +0 -6
  66. inferno/lol.py +0 -589
  67. webscout/Local/__init__.py +0 -12
  68. webscout/Local/__main__.py +0 -9
  69. webscout/Local/api.py +0 -576
  70. webscout/Local/cli.py +0 -516
  71. webscout/Local/config.py +0 -75
  72. webscout/Local/llm.py +0 -287
  73. webscout/Local/model_manager.py +0 -253
  74. webscout/Local/server.py +0 -721
  75. webscout/Local/utils.py +0 -93
  76. webscout/Provider/Chatify.py +0 -175
  77. webscout/Provider/askmyai.py +0 -158
  78. webscout/Provider/gaurish.py +0 -244
  79. webscout-8.2.4.dist-info/entry_points.txt +0 -5
  80. {webscout-8.2.4.dist-info → webscout-8.2.5.dist-info}/licenses/LICENSE.md +0 -0
@@ -1,13 +1,13 @@
1
1
  from curl_cffi import CurlError
2
2
  from curl_cffi.requests import Session # Import Session
3
3
  import json
4
- from typing import Generator, Dict, Any, List, Union
4
+ from typing import Generator, Dict, Any, List, Optional, Union
5
5
  from uuid import uuid4
6
6
  import random
7
7
 
8
8
  from webscout.AIutel import Optimizers
9
9
  from webscout.AIutel import Conversation
10
- from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
11
11
  from webscout.AIbase import Provider
12
12
  from webscout import exceptions
13
13
  from webscout.litagent import LitAgent
@@ -104,6 +104,14 @@ class Venice(Provider):
104
104
  )
105
105
  self.conversation.history_offset = history_offset
106
106
 
107
+ @staticmethod
108
+ def _venice_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
109
+ """Extracts content from Venice stream JSON objects."""
110
+ if isinstance(chunk, dict) and chunk.get("kind") == "content":
111
+ return chunk.get("content")
112
+ return None
113
+
114
+
107
115
  def ask(
108
116
  self,
109
117
  prompt: str,
@@ -158,26 +166,20 @@ class Venice(Provider):
158
166
  )
159
167
 
160
168
  streaming_text = ""
161
- # Iterate over bytes and decode manually
162
- for line in response.iter_lines(): # Removed decode_unicode
163
- if not line:
164
- continue
165
-
166
- try:
167
- # Decode bytes to string
168
- line_data = line.decode('utf-8').strip()
169
- if '"kind":"content"' in line_data:
170
- data = json.loads(line_data)
171
- if 'content' in data:
172
- content = data['content']
173
- streaming_text += content
174
- resp = dict(text=content)
175
- # Yield content or dict based on raw flag
176
- yield content if raw else resp
177
- except json.JSONDecodeError:
178
- continue
179
- except UnicodeDecodeError:
180
- continue
169
+ # Use sanitize_stream with the custom extractor
170
+ processed_stream = sanitize_stream(
171
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
172
+ intro_value=None, # No simple prefix
173
+ to_json=True, # Each line is JSON
174
+ content_extractor=self._venice_extractor, # Use the specific extractor
175
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
176
+ )
177
+
178
+ for content_chunk in processed_stream:
179
+ # content_chunk is the string extracted by _venice_extractor
180
+ if content_chunk and isinstance(content_chunk, str):
181
+ streaming_text += content_chunk
182
+ yield content_chunk if raw else dict(text=content_chunk)
181
183
 
182
184
  # Update history and last response after stream finishes
183
185
  self.conversation.update_chat_history(prompt, streaming_text)
@@ -7,7 +7,7 @@ import uuid
7
7
 
8
8
  from webscout.AIutel import Optimizers
9
9
  from webscout.AIutel import Conversation
10
- from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
11
11
  from webscout.AIbase import Provider
12
12
  from webscout import exceptions
13
13
  from webscout.litagent import LitAgent
@@ -111,6 +111,17 @@ class VercelAI(Provider):
111
111
  )
112
112
  self.conversation.history_offset = history_offset
113
113
 
114
+ @staticmethod
115
+ def _vercelai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
116
+ """Extracts content from the VercelAI stream format '0:"..."'."""
117
+ if isinstance(chunk, str):
118
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
119
+ if match:
120
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
121
+ content = match.group(1).encode().decode('unicode_escape')
122
+ return content.replace('\\\\', '\\').replace('\\"', '"')
123
+ return None
124
+
114
125
  def ask(
115
126
  self,
116
127
  prompt: str,
@@ -153,15 +164,21 @@ class VercelAI(Provider):
153
164
  error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
154
165
  raise exceptions.FailedToGenerateResponseError(error_msg)
155
166
 
156
- streaming_response = ""
157
- for line in response.iter_lines(decode_unicode=True):
158
- if line:
159
- match = re.search(r'0:"(.*?)"', line)
160
- if match:
161
- content = match.group(1)
162
- streaming_response += content
163
- yield content if raw else dict(text=content)
164
- self.last_response.update(dict(text=streaming_response))
167
+ streaming_text = ""
168
+ # Use sanitize_stream with the custom extractor
169
+ processed_stream = sanitize_stream(
170
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
171
+ intro_value=None, # No simple prefix
172
+ to_json=False, # Content is not JSON
173
+ content_extractor=self._vercelai_extractor # Use the specific extractor
174
+ )
175
+
176
+ for content_chunk in processed_stream:
177
+ if content_chunk and isinstance(content_chunk, str):
178
+ streaming_text += content_chunk
179
+ yield content_chunk if raw else dict(text=content_chunk)
180
+
181
+ self.last_response.update(dict(text=streaming_text))
165
182
  self.conversation.update_chat_history(
166
183
  prompt, self.get_message(self.last_response)
167
184
  )
@@ -201,8 +218,10 @@ class VercelAI(Provider):
201
218
 
202
219
  def get_message(self, response: dict) -> str:
203
220
  """Retrieves message only from response"""
204
- assert isinstance(response, dict), "Response should be of dict data-type only"
205
- return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
221
+ assert isinstance(response, dict), "Response should be of dict data-type only"
222
+ # Formatting is handled by the extractor now
223
+ text = response.get("text", "")
224
+ return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement if needed
206
225
 
207
226
  if __name__ == "__main__":
208
227
  print("-" * 80)
@@ -28,7 +28,6 @@ from .turboseek import *
28
28
  from .Free2GPT import *
29
29
  from .TeachAnything import *
30
30
  from .AI21 import *
31
- from .Chatify import *
32
31
  from .x0gpt import *
33
32
  from .cerebras import *
34
33
  from .geminiapi import *
@@ -39,12 +38,10 @@ from .promptrefine import *
39
38
  from .tutorai import *
40
39
  from .bagoodex import *
41
40
  from .aimathgpt import *
42
- from .gaurish import *
43
41
  from .geminiprorealtime import *
44
42
  from .llmchat import *
45
43
  from .llmchatco import LLMChatCo # Add new LLMChat.co provider
46
44
  from .talkai import *
47
- from .askmyai import *
48
45
  from .llama3mitril import *
49
46
  from .Marcus import *
50
47
  from .typegpt import *
@@ -87,8 +84,13 @@ from .toolbaz import Toolbaz
87
84
  from .scnet import SCNet
88
85
  from .WritingMate import WritingMate
89
86
  from .MCPCore import MCPCore
87
+ from .TypliAI import TypliAI
88
+ from .ChatSandbox import ChatSandbox
89
+ from .GizAI import GizAI
90
90
  __all__ = [
91
91
  'SCNet',
92
+ 'GizAI',
93
+ 'ChatSandbox',
92
94
  'SciraAI',
93
95
  'StandardInputAI',
94
96
  'OpenGPT',
@@ -127,7 +129,6 @@ __all__ = [
127
129
  'Sambanova',
128
130
  'KOALA',
129
131
  'Meta',
130
- 'AskMyAI',
131
132
  'PiAI',
132
133
  'Julius',
133
134
  'YouChat',
@@ -136,7 +137,6 @@ __all__ = [
136
137
  'TurboSeek',
137
138
  'TeachAnything',
138
139
  'AI21',
139
- 'Chatify',
140
140
  'X0GPT',
141
141
  'Cerebras',
142
142
  'GEMINIAPI',
@@ -152,7 +152,6 @@ __all__ = [
152
152
  'TutorAI',
153
153
  'Bagoodex',
154
154
  'AIMathGPT',
155
- 'GaurishCerebras',
156
155
  'GeminiPro',
157
156
  'LLMChat',
158
157
  'LLMChatCo',
@@ -176,5 +175,6 @@ __all__ = [
176
175
  'SearchChatAI',
177
176
  'Writecream',
178
177
  'Toolbaz',
179
- 'MCPCore'
178
+ 'MCPCore',
179
+ 'TypliAI',
180
180
  ]
@@ -1,8 +1,11 @@
1
- import requests
1
+ from typing import Any, Dict, Optional, Union
2
+ from curl_cffi import CurlError
3
+ from curl_cffi.requests import Session
4
+ from webscout import exceptions
2
5
  from webscout.AIutel import Optimizers
3
6
  from webscout.AIutel import Conversation
4
- from webscout.AIutel import AwesomePrompts
5
- from webscout.AIbase import Provider
7
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
8
+ from webscout.AIbase import Provider
6
9
  from webscout.litagent import LitAgent
7
10
 
8
11
  class AskSteve(Provider):
@@ -36,7 +39,7 @@ class AskSteve(Provider):
36
39
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
37
40
  system_prompt (str, optional): System prompt for AskSteve. Defaults to the provided string.
38
41
  """
39
- self.session = requests.Session()
42
+ self.session = Session() # Use curl_cffi Session
40
43
  self.is_conversation = is_conversation
41
44
  self.max_tokens_to_sample = max_tokens
42
45
  self.api_endpoint = "https://quickstart.asksteve.to/quickStartRequest"
@@ -73,7 +76,15 @@ class AskSteve(Provider):
73
76
  is_conversation, self.max_tokens_to_sample, filepath, update_file
74
77
  )
75
78
  self.conversation.history_offset = history_offset
76
- self.session.proxies = proxies
79
+ self.session.proxies = proxies # Assign proxies directly
80
+ @staticmethod
81
+ def _asksteve_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
82
+ """Extracts content from AskSteve JSON response."""
83
+ if isinstance(chunk, dict) and "candidates" in chunk and len(chunk["candidates"]) > 0:
84
+ parts = chunk["candidates"][0].get("content", {}).get("parts", [])
85
+ if parts and isinstance(parts[0].get("text"), str):
86
+ return parts[0]["text"]
87
+ return None
77
88
 
78
89
  def ask(
79
90
  self,
@@ -115,37 +126,43 @@ class AskSteve(Provider):
115
126
  "prompt": conversation_prompt
116
127
  }
117
128
 
118
- def for_stream():
129
+
130
+ # This API doesn't stream, so we process the full response
131
+ try:
119
132
  response = self.session.post(
120
133
  self.api_endpoint,
121
134
  headers=self.headers,
122
135
  json=payload,
123
- stream=True,
136
+ stream=False, # API doesn't stream
124
137
  timeout=self.timeout,
138
+ impersonate="chrome120" # Add impersonate
125
139
  )
126
- if not response.ok:
127
- raise Exception(
128
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
129
- )
130
-
131
- response_data = response.json()
132
- if "candidates" in response_data and len(response_data["candidates"]) > 0:
133
- text = response_data["candidates"][0]["content"]["parts"][0]["text"]
134
- self.last_response.update(dict(text=text))
135
- yield dict(text=text) if not raw else text
136
- else:
137
- raise Exception("No response generated")
140
+ response.raise_for_status()
141
+ response_text_raw = response.text # Get raw text
142
+
143
+ # Process the full JSON text using sanitize_stream
144
+ processed_stream = sanitize_stream(
145
+ data=response_text_raw,
146
+ to_json=True, # Parse the whole text as JSON
147
+ intro_value=None,
148
+ content_extractor=self._asksteve_extractor, # Use the specific extractor
149
+ yield_raw_on_error=False
150
+ )
151
+ # Extract the single result
152
+ text = next(processed_stream, None)
153
+ text = text if isinstance(text, str) else "" # Ensure it's a string
138
154
 
155
+ self.last_response.update(dict(text=text))
139
156
  self.conversation.update_chat_history(
140
157
  prompt, self.get_message(self.last_response)
141
158
  )
159
+ # Return dict or raw string based on raw flag
160
+ return text if raw else self.last_response
142
161
 
143
- def for_non_stream():
144
- for _ in for_stream():
145
- pass
146
- return self.last_response
147
-
148
- return for_stream() if stream else for_non_stream()
162
+ except CurlError as e:
163
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
164
+ except Exception as e: # Catch other potential errors
165
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}") from e
149
166
 
150
167
  def chat(
151
168
  self,
@@ -164,23 +181,15 @@ class AskSteve(Provider):
164
181
  str: Response generated
165
182
  """
166
183
 
167
- def for_stream():
168
- for response in self.ask(
169
- prompt, True, optimizer=optimizer, conversationally=conversationally
170
- ):
171
- yield self.get_message(response)
172
-
173
- def for_non_stream():
174
- return self.get_message(
175
- self.ask(
176
- prompt,
177
- False,
178
- optimizer=optimizer,
179
- conversationally=conversationally,
180
- )
181
- )
182
-
183
- return for_stream() if stream else for_non_stream()
184
+ # Since ask() doesn't truly stream, we just call it once.
185
+ response_data = self.ask(
186
+ prompt,
187
+ stream=False, # Always False for this API
188
+ raw=False, # Get the dict back
189
+ optimizer=optimizer,
190
+ conversationally=conversationally,
191
+ )
192
+ return self.get_message(response_data)
184
193
 
185
194
  def get_message(self, response: dict) -> str:
186
195
  """Retrieves message only from response
@@ -192,12 +201,12 @@ class AskSteve(Provider):
192
201
  str: Message extracted
193
202
  """
194
203
  assert isinstance(response, dict), "Response should be of dict data-type only"
195
- return response["text"]
204
+ return response.get("text", "") # Use .get for safety
196
205
 
197
206
 
198
207
  if __name__ == "__main__":
199
208
  from rich import print
200
209
  ai = AskSteve()
201
- response = ai.chat("hi", stream=True)
210
+ response = ai.chat("write a short poem about AI", stream=True)
202
211
  for chunk in response:
203
212
  print(chunk, end="", flush=True)
@@ -1,10 +1,11 @@
1
1
 
2
2
  import re
3
- import requests
3
+ import curl_cffi
4
+ from curl_cffi.requests import Session
4
5
  import json
5
6
  import os
6
7
  from typing import Any, Dict, Optional, Generator, List, Union
7
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
8
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
8
9
  from webscout.AIbase import Provider
9
10
  from webscout import exceptions
10
11
  from webscout.litagent import LitAgent as UserAgent
@@ -17,7 +18,9 @@ class Cerebras(Provider):
17
18
  AVAILABLE_MODELS = [
18
19
  "llama3.1-8b",
19
20
  "llama-3.3-70b",
20
- "deepseek-r1-distill-llama-70b"
21
+ "deepseek-r1-distill-llama-70b",
22
+ "llama-4-scout-17b-16e-instruct"
23
+
21
24
  ]
22
25
 
23
26
  def __init__(
@@ -49,6 +52,8 @@ class Cerebras(Provider):
49
52
  self.max_tokens_to_sample = max_tokens
50
53
  self.last_response = {}
51
54
 
55
+ self.session = Session() # Initialize curl_cffi session
56
+
52
57
  # Get API key first
53
58
  try:
54
59
  self.api_key = self.get_demo_api_key(cookie_path)
@@ -74,6 +79,9 @@ class Cerebras(Provider):
74
79
  is_conversation, self.max_tokens_to_sample, filepath, update_file
75
80
  )
76
81
  self.conversation.history_offset = history_offset
82
+
83
+ # Apply proxies to the session
84
+ self.session.proxies = proxies
77
85
 
78
86
  # Rest of the class implementation remains the same...
79
87
  @staticmethod
@@ -88,7 +96,14 @@ class Cerebras(Provider):
88
96
  """Refines the input text by removing surrounding quotes."""
89
97
  return text.strip('"')
90
98
 
91
- def get_demo_api_key(self, cookie_path: str) -> str:
99
+ @staticmethod
100
+ def _cerebras_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
101
+ """Extracts content from Cerebras stream JSON objects."""
102
+ if isinstance(chunk, dict):
103
+ return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
104
+ return None
105
+
106
+ def get_demo_api_key(self, cookie_path: str) -> str: # Keep this using requests or switch to curl_cffi
92
107
  """Retrieves the demo API key using the provided cookie."""
93
108
  try:
94
109
  with open(cookie_path, "r") as file:
@@ -114,17 +129,19 @@ class Cerebras(Provider):
114
129
  }
115
130
 
116
131
  try:
117
- response = requests.post(
132
+ # Use the initialized curl_cffi session
133
+ response = self.session.post(
118
134
  "https://inference.cerebras.ai/api/graphql",
119
135
  cookies=cookies,
120
136
  headers=headers,
121
137
  json=json_data,
122
138
  timeout=self.timeout,
139
+ impersonate="chrome120" # Add impersonate
123
140
  )
124
141
  response.raise_for_status()
125
- api_key = response.json()["data"]["GetMyDemoApiKey"]
142
+ api_key = response.json().get("data", {}).get("GetMyDemoApiKey")
126
143
  return api_key
127
- except requests.exceptions.RequestException as e:
144
+ except curl_cffi.CurlError as e:
128
145
  raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
129
146
  except KeyError:
130
147
  raise exceptions.InvalidResponseError("API key not found in response.")
@@ -144,41 +161,48 @@ class Cerebras(Provider):
144
161
  }
145
162
 
146
163
  try:
147
- response = requests.post(
164
+ # Use the initialized curl_cffi session
165
+ response = self.session.post(
148
166
  "https://api.cerebras.ai/v1/chat/completions",
149
167
  headers=headers,
150
168
  json=payload,
151
169
  stream=stream,
152
- timeout=self.timeout
170
+ timeout=self.timeout,
171
+ impersonate="chrome120" # Add impersonate
153
172
  )
154
173
  response.raise_for_status()
155
174
 
156
175
  if stream:
157
176
  def generate_stream():
158
- for line in response.iter_lines():
159
- if line:
160
- line = line.decode('utf-8')
161
- if line.startswith('data:'):
162
- try:
163
- data = json.loads(line[6:])
164
- if data.get('choices') and data['choices'][0].get('delta', {}).get('content'):
165
- content = data['choices'][0]['delta']['content']
166
- yield content
167
- except json.JSONDecodeError:
168
- continue
177
+ # Use sanitize_stream
178
+ processed_stream = sanitize_stream(
179
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
180
+ intro_value="data:",
181
+ to_json=True, # Stream sends JSON
182
+ content_extractor=self._cerebras_extractor, # Use the specific extractor
183
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
184
+ )
185
+ for content_chunk in processed_stream:
186
+ if content_chunk and isinstance(content_chunk, str):
187
+ yield content_chunk # Yield the extracted text chunk
169
188
 
170
189
  return generate_stream()
171
190
  else:
172
191
  response_json = response.json()
173
- return response_json['choices'][0]['message']['content']
192
+ # Extract content for non-streaming response
193
+ content = response_json.get("choices", [{}])[0].get("message", {}).get("content")
194
+ return content if content else "" # Return empty string if not found
174
195
 
175
- except requests.exceptions.RequestException as e:
196
+ except curl_cffi.CurlError as e:
197
+ raise exceptions.APIConnectionError(f"Request failed (CurlError): {e}") from e
198
+ except Exception as e: # Catch other potential errors
176
199
  raise exceptions.APIConnectionError(f"Request failed: {e}")
177
200
 
178
201
  def ask(
179
202
  self,
180
203
  prompt: str,
181
204
  stream: bool = False,
205
+ raw: bool = False, # Add raw parameter for consistency
182
206
  optimizer: str = None,
183
207
  conversationally: bool = False,
184
208
  ) -> Union[Dict, Generator]:
@@ -199,11 +223,23 @@ class Cerebras(Provider):
199
223
 
200
224
  try:
201
225
  response = self._make_request(messages, stream)
202
- if stream:
203
- return response
204
226
 
205
- self.last_response = response
206
- return response
227
+ if stream:
228
+ # Wrap the generator to yield dicts or raw strings
229
+ def stream_wrapper():
230
+ full_text = ""
231
+ for chunk in response:
232
+ full_text += chunk
233
+ yield chunk if raw else {"text": chunk}
234
+ # Update history after stream finishes
235
+ self.last_response = {"text": full_text}
236
+ self.conversation.update_chat_history(prompt, full_text)
237
+ return stream_wrapper()
238
+ else:
239
+ # Non-streaming response is already the full text string
240
+ self.last_response = {"text": response}
241
+ self.conversation.update_chat_history(prompt, response)
242
+ return self.last_response if not raw else response # Return dict or raw string
207
243
 
208
244
  except Exception as e:
209
245
  raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
@@ -216,14 +252,24 @@ class Cerebras(Provider):
216
252
  conversationally: bool = False,
217
253
  ) -> Union[str, Generator]:
218
254
  """Chat with the model."""
219
- response = self.ask(prompt, stream, optimizer, conversationally)
255
+ # Ask returns a generator for stream=True, dict/str for stream=False
256
+ response_gen_or_dict = self.ask(prompt, stream, raw=False, optimizer=optimizer, conversationally=conversationally)
257
+
220
258
  if stream:
221
- return response
222
- return response
259
+ # Wrap the generator from ask() to get message text
260
+ def stream_wrapper():
261
+ for chunk_dict in response_gen_or_dict:
262
+ yield self.get_message(chunk_dict)
263
+ return stream_wrapper()
264
+ else:
265
+ # Non-streaming response is already a dict
266
+ return self.get_message(response_gen_or_dict)
223
267
 
224
268
  def get_message(self, response: str) -> str:
225
269
  """Retrieves message from response."""
226
- return response
270
+ # Updated to handle dict input from ask()
271
+ assert isinstance(response, dict), "Response should be of dict data-type only for get_message"
272
+ return response.get("text", "")
227
273
 
228
274
 
229
275
  if __name__ == "__main__":
@@ -231,7 +277,7 @@ if __name__ == "__main__":
231
277
 
232
278
  # Example usage
233
279
  cerebras = Cerebras(
234
- cookie_path='cookie.json',
280
+ cookie_path=r'cookies.json',
235
281
  model='llama3.1-8b',
236
282
  system_prompt="You are a helpful AI assistant."
237
283
  )