webscout 8.2.5__py3-none-any.whl → 8.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (45) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIutel.py +240 -344
  3. webscout/Extra/autocoder/autocoder.py +66 -5
  4. webscout/Provider/AISEARCH/scira_search.py +2 -1
  5. webscout/Provider/GizAI.py +6 -4
  6. webscout/Provider/Nemotron.py +218 -0
  7. webscout/Provider/OPENAI/scirachat.py +2 -1
  8. webscout/Provider/TeachAnything.py +8 -5
  9. webscout/Provider/WiseCat.py +1 -1
  10. webscout/Provider/WrDoChat.py +370 -0
  11. webscout/Provider/__init__.py +4 -6
  12. webscout/Provider/ai4chat.py +5 -3
  13. webscout/Provider/akashgpt.py +59 -66
  14. webscout/Provider/freeaichat.py +57 -43
  15. webscout/Provider/scira_chat.py +2 -1
  16. webscout/Provider/scnet.py +4 -1
  17. webscout/__init__.py +0 -1
  18. webscout/conversation.py +305 -446
  19. webscout/swiftcli/__init__.py +80 -794
  20. webscout/swiftcli/core/__init__.py +7 -0
  21. webscout/swiftcli/core/cli.py +297 -0
  22. webscout/swiftcli/core/context.py +104 -0
  23. webscout/swiftcli/core/group.py +241 -0
  24. webscout/swiftcli/decorators/__init__.py +28 -0
  25. webscout/swiftcli/decorators/command.py +221 -0
  26. webscout/swiftcli/decorators/options.py +220 -0
  27. webscout/swiftcli/decorators/output.py +252 -0
  28. webscout/swiftcli/exceptions.py +21 -0
  29. webscout/swiftcli/plugins/__init__.py +9 -0
  30. webscout/swiftcli/plugins/base.py +135 -0
  31. webscout/swiftcli/plugins/manager.py +262 -0
  32. webscout/swiftcli/utils/__init__.py +59 -0
  33. webscout/swiftcli/utils/formatting.py +252 -0
  34. webscout/swiftcli/utils/parsing.py +267 -0
  35. webscout/version.py +1 -1
  36. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/METADATA +1 -1
  37. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/RECORD +41 -28
  38. webscout/LLM.py +0 -442
  39. webscout/Provider/PizzaGPT.py +0 -228
  40. webscout/Provider/promptrefine.py +0 -193
  41. webscout/Provider/tutorai.py +0 -270
  42. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/WHEEL +0 -0
  43. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/entry_points.txt +0 -0
  44. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
  45. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,370 @@
1
+ import json
2
+ import re
3
+ from typing import Optional, Union, Any, Dict, Generator
4
+ from datetime import datetime
5
+ from uuid import uuid4
6
+ from curl_cffi import CurlError
7
+ from curl_cffi.requests import Session
8
+
9
+ from webscout.AIutel import Optimizers
10
+ from webscout.AIutel import Conversation
11
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
12
+ from webscout.AIbase import Provider
13
+ from webscout import exceptions
14
+ from webscout.litagent import LitAgent
15
+
16
+ class WrDoChat(Provider):
17
+ """
18
+ A class to interact with the oi.wr.do chat API.
19
+
20
+ Attributes:
21
+ system_prompt (str): The system prompt to define the assistant's role.
22
+ model (str): The model to use for chat completion.
23
+
24
+ Examples:
25
+ >>> from webscout.Provider.api_request import WrDoChat
26
+ >>> ai = WrDoChat(cookies_path="cookies.json")
27
+ >>> response = ai.chat("What's the weather today?")
28
+ >>> print(response)
29
+ """
30
+
31
+ AVAILABLE_MODELS = [
32
+ "deepseek-chat-v3-0324",
33
+ "deepseek-r1",
34
+ "deepseek-r1-distill",
35
+ "gemini-1.5-flash",
36
+ "gemini-2.0-flash-exp",
37
+ "gemini-2.5-flash-preview-04-17",
38
+ "gemini-2.5-pro-exp-03-25",
39
+ "gemma2-9b-it",
40
+ "gpt-4.1-mini",
41
+ "gpt-4.1-nano",
42
+ "gpt-4o-mini",
43
+ "grok-2-1212",
44
+ "grok-3-mini",
45
+ "llama-3.1-8b-instant",
46
+ "llama-3.3-70b-versatile",
47
+ "llama-4-maverick-17b",
48
+ "llama3-70b-8192",
49
+ "mai-ds-r1",
50
+ "qwen-qwq-32b",
51
+ "qwen3-30b-a3b"
52
+ ]
53
+
54
+ def __init__(
55
+ self,
56
+ cookies_path: str,
57
+ is_conversation: bool = True,
58
+ max_tokens: int = 2000,
59
+ timeout: int = 30,
60
+ intro: str = None,
61
+ filepath: str = None,
62
+ update_file: bool = True,
63
+ proxies: dict = {},
64
+ history_offset: int = 10250,
65
+ act: str = None,
66
+ model: str = "gemini-2.5-flash-preview-04-17",
67
+ system_prompt: str = "You are a helpful AI assistant.",
68
+ ):
69
+ """
70
+ Initialize the WrDoChat client.
71
+
72
+ Args:
73
+ cookies_path (str): Path to the cookies JSON file for authentication.
74
+ is_conversation (bool): Whether to maintain conversation history.
75
+ max_tokens (int): Maximum number of tokens to generate.
76
+ timeout (int): Request timeout in seconds.
77
+ intro (str): Introduction message for conversation.
78
+ filepath (str): Path to save conversation history.
79
+ update_file (bool): Whether to update conversation history file.
80
+ proxies (dict): Proxy configuration for requests.
81
+ history_offset (int): Offset for conversation history.
82
+ act (str): Role/act for the conversation.
83
+ model (str): Model to use for completion.
84
+ system_prompt (str): System prompt for the assistant.
85
+ """
86
+ if model not in self.AVAILABLE_MODELS:
87
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
88
+
89
+ self.session = Session()
90
+ self.is_conversation = is_conversation
91
+ self.max_tokens_to_sample = max_tokens
92
+ self.timeout = timeout
93
+ self.last_response = {}
94
+ self.model = model
95
+ self.system_prompt = system_prompt
96
+ self.api_endpoint = "https://oi.wr.do/api/chat"
97
+ self.cookies_path = cookies_path
98
+ self.cookies = self._load_cookies()
99
+
100
+ # Initialize LitAgent for user agent generation
101
+ self.agent = LitAgent()
102
+
103
+ self.headers = {
104
+ "accept": "*/*",
105
+ "accept-language": "en-US,en;q=0.9",
106
+ "content-type": "application/json",
107
+ "origin": "https://oi.wr.do",
108
+ "user-agent": self.agent.random(),
109
+ "x-requested-with": "XMLHttpRequest"
110
+ }
111
+
112
+ self.__available_optimizers = (
113
+ method
114
+ for method in dir(Optimizers)
115
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
116
+ )
117
+
118
+ # Update session headers and proxies
119
+ self.session.headers.update(self.headers)
120
+ self.session.proxies = proxies
121
+
122
+ # Apply cookies to session
123
+ if self.cookies:
124
+ for name, value in self.cookies.items():
125
+ self.session.cookies.set(name, value, domain="oi.wr.do")
126
+
127
+ Conversation.intro = (
128
+ AwesomePrompts().get_act(
129
+ act, raise_not_found=True, default=None, case_insensitive=True
130
+ )
131
+ if act
132
+ else intro or Conversation.intro
133
+ )
134
+ self.conversation = Conversation(
135
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
136
+ )
137
+ self.conversation.history_offset = history_offset
138
+ self.last_message_id = None # Store the last message ID from the API
139
+
140
+ def _load_cookies(self) -> Optional[Dict[str, str]]:
141
+ """Load cookies from a JSON file and return them as a dictionary."""
142
+ try:
143
+ with open(self.cookies_path, 'r') as f:
144
+ cookies_data = json.load(f)
145
+ return {cookie['name']: cookie['value'] for cookie in cookies_data if 'name' in cookie and 'value' in cookie}
146
+ except Exception as e:
147
+ raise exceptions.AuthenticationError(f"Failed to load cookies: {str(e)}")
148
+
149
+ def _wrdo_extractor(self, line: Union[str, Dict[str, Any]]) -> Optional[str]:
150
+ """Extracts content from the oi.wr.do stream format.
151
+
152
+ Format:
153
+ f:{"messageId":"..."}
154
+ 0:"content chunk"
155
+ e:{"finishReason":"stop",...}
156
+ d:{"finishReason":"stop",...}
157
+ """
158
+ if isinstance(line, str):
159
+ # Handle content chunks that start with "0:"
160
+ match = re.search(r'0:"(.*?)"', line)
161
+ if match:
162
+ # Decode potential unicode escapes like \u00e9
163
+ content = match.group(1).encode().decode('unicode_escape')
164
+ return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
165
+
166
+ # Store message ID from 'f:' response
167
+ elif line.startswith('f:'):
168
+ try:
169
+ msg_data = json.loads(line[2:]) # Skip 'f:' prefix
170
+ self.last_message_id = msg_data.get('messageId')
171
+ except json.JSONDecodeError:
172
+ pass
173
+ # Check for error messages in 'e:' response
174
+ elif line.startswith('e:'):
175
+ try:
176
+ error_data = json.loads(line[2:]) # Skip 'e:' prefix
177
+ if error_data.get('error'):
178
+ raise exceptions.FailedToGenerateResponseError(
179
+ f"API Error: {error_data['error']}"
180
+ )
181
+ except json.JSONDecodeError:
182
+ pass
183
+ return None
184
+
185
+ def ask(
186
+ self,
187
+ prompt: str,
188
+ stream: bool = False,
189
+ raw: bool = False,
190
+ optimizer: str = None,
191
+ conversationally: bool = False,
192
+ ) -> Union[Dict[str, Any], Generator[Dict[str, Any], None, None]]:
193
+ """
194
+ Send a message to the oi.wr.do API.
195
+
196
+ Args:
197
+ prompt (str): The prompt to send.
198
+ stream (bool): Whether to stream the response.
199
+ raw (bool): Whether to return raw response.
200
+ optimizer (str): Optimizer to use for the prompt.
201
+ conversationally (bool): Whether to use conversation context.
202
+
203
+ Returns:
204
+ Union[Dict[str, Any], Generator[Dict[str, Any], None, None]]: The API response.
205
+ """
206
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
207
+ if optimizer:
208
+ if optimizer in self.__available_optimizers:
209
+ conversation_prompt = getattr(Optimizers, optimizer)(
210
+ conversation_prompt if conversationally else prompt
211
+ )
212
+ else:
213
+ raise exceptions.FailedToGenerateResponseError(
214
+ f"Optimizer is not one of {self.__available_optimizers}"
215
+ )
216
+
217
+ chat_id = str(uuid4())
218
+ message_id = str(uuid4())
219
+ current_time = datetime.utcnow().isoformat() + "Z"
220
+
221
+ payload = {
222
+ "id": chat_id,
223
+ "messages": [
224
+ {
225
+ "role": "system",
226
+ "content": self.system_prompt
227
+ },
228
+ {
229
+ "id": message_id,
230
+ "createdAt": current_time,
231
+ "role": "user",
232
+ "content": conversation_prompt,
233
+ "parts": [
234
+ {
235
+ "type": "text",
236
+ "text": conversation_prompt
237
+ }
238
+ ]
239
+ }
240
+ ],
241
+ "selectedChatModel": self.model
242
+ }
243
+
244
+ def for_stream():
245
+ try:
246
+ self.headers["referer"] = f"https://oi.wr.do/chat/{chat_id}"
247
+
248
+ response = self.session.post(
249
+ self.api_endpoint,
250
+ json=payload,
251
+ stream=True,
252
+ timeout=self.timeout,
253
+ impersonate="chrome110"
254
+ )
255
+
256
+ if response.status_code == 401:
257
+ raise exceptions.AuthenticationError("Authentication failed. Please check your cookies.")
258
+
259
+ response.raise_for_status()
260
+
261
+ streaming_response = ""
262
+ has_content = False
263
+
264
+ # Use sanitize_stream with the custom extractor
265
+ processed_stream = sanitize_stream(
266
+ data=response.iter_lines(),
267
+ intro_value=None, # No intro to remove
268
+ to_json=False, # Response is not JSON
269
+ content_extractor=self._wrdo_extractor,
270
+ yield_raw_on_error=False
271
+ )
272
+
273
+ for content in processed_stream:
274
+ if content and isinstance(content, str):
275
+ streaming_response += content
276
+ has_content = True
277
+ yield {"text": content} if not raw else content
278
+
279
+ # Only update conversation history if we received content
280
+ if has_content:
281
+ self.last_response = {"text": streaming_response}
282
+ self.conversation.update_chat_history(
283
+ prompt, self.get_message(self.last_response)
284
+ )
285
+ else:
286
+ raise exceptions.FailedToGenerateResponseError(
287
+ "No content received from API"
288
+ )
289
+
290
+ except CurlError as e:
291
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
292
+ except Exception as e:
293
+ raise exceptions.FailedToGenerateResponseError(f"An error occurred: {str(e)}")
294
+
295
+ def for_non_stream():
296
+ response_text = ""
297
+ try:
298
+ for chunk in for_stream():
299
+ if isinstance(chunk, dict) and "text" in chunk:
300
+ response_text += chunk["text"]
301
+ elif raw and isinstance(chunk, str):
302
+ response_text += chunk
303
+ except Exception as e:
304
+ if not response_text:
305
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get response: {str(e)}")
306
+
307
+ return response_text if raw else {"text": response_text}
308
+
309
+ return for_stream() if stream else for_non_stream()
310
+
311
+ def chat(
312
+ self,
313
+ prompt: str,
314
+ stream: bool = False,
315
+ optimizer: str = None,
316
+ conversationally: bool = False,
317
+ ) -> Union[str, Generator[str, None, None]]:
318
+ """
319
+ Generate a response to a prompt.
320
+
321
+ Args:
322
+ prompt (str): The prompt to send.
323
+ stream (bool): Whether to stream the response.
324
+ optimizer (str): Optimizer to use for the prompt.
325
+ conversationally (bool): Whether to use conversation context.
326
+
327
+ Returns:
328
+ Union[str, Generator[str, None, None]]: The generated response.
329
+ """
330
+ def for_stream():
331
+ for response in self.ask(
332
+ prompt, True, optimizer=optimizer, conversationally=conversationally
333
+ ):
334
+ yield self.get_message(response)
335
+
336
+ def for_non_stream():
337
+ return self.get_message(
338
+ self.ask(
339
+ prompt,
340
+ False,
341
+ optimizer=optimizer,
342
+ conversationally=conversationally,
343
+ )
344
+ )
345
+
346
+ return for_stream() if stream else for_non_stream()
347
+
348
+ def get_message(self, response: dict) -> str:
349
+ """
350
+ Extract message from response.
351
+
352
+ Args:
353
+ response (dict): The response dictionary.
354
+
355
+ Returns:
356
+ str: The extracted message.
357
+ """
358
+ assert isinstance(response, dict), "Response should be of dict data-type only"
359
+ return response.get("text", "")
360
+
361
+
362
+ if __name__ == "__main__":
363
+ from rich import print
364
+ import json
365
+
366
+ # Example usage
367
+ ai = WrDoChat(cookies_path="cookies.json")
368
+ response = ai.chat("write me a poem about AI", stream=True)
369
+ for chunk in response:
370
+ print(chunk, end="", flush=True)
@@ -16,7 +16,6 @@ from .typefully import *
16
16
  from .cleeai import *
17
17
  from .OLLAMA import OLLAMA
18
18
  from .Andi import AndiSearch
19
- from .PizzaGPT import *
20
19
  from .Llama3 import *
21
20
  from .koala import *
22
21
  from .meta import *
@@ -34,8 +33,6 @@ from .geminiapi import *
34
33
  from .elmo import *
35
34
  from .GPTWeb import *
36
35
  from .Netwrck import Netwrck
37
- from .promptrefine import *
38
- from .tutorai import *
39
36
  from .bagoodex import *
40
37
  from .aimathgpt import *
41
38
  from .geminiprorealtime import *
@@ -87,8 +84,12 @@ from .MCPCore import MCPCore
87
84
  from .TypliAI import TypliAI
88
85
  from .ChatSandbox import ChatSandbox
89
86
  from .GizAI import GizAI
87
+ from .WrDoChat import WrDoChat
88
+ from .Nemotron import NEMOTRON
90
89
  __all__ = [
91
90
  'SCNet',
91
+ 'NEMOTRON',
92
+ 'WrDoChat',
92
93
  'GizAI',
93
94
  'ChatSandbox',
94
95
  'SciraAI',
@@ -125,7 +126,6 @@ __all__ = [
125
126
  'AI4Chat',
126
127
  'OLLAMA',
127
128
  'AndiSearch',
128
- 'PIZZAGPT',
129
129
  'Sambanova',
130
130
  'KOALA',
131
131
  'Meta',
@@ -148,8 +148,6 @@ __all__ = [
148
148
  'Free2GPT',
149
149
  'GPTWeb',
150
150
  'Netwrck',
151
- 'PromptRefine',
152
- 'TutorAI',
153
151
  'Bagoodex',
154
152
  'AIMathGPT',
155
153
  'GeminiPro',
@@ -182,16 +182,18 @@ class AI4Chat(Provider):
182
182
  )
183
183
  )
184
184
 
185
- def get_message(self, response: dict) -> str:
185
+ def get_message(self, response: Union[dict, str]) -> str:
186
186
  """Retrieves message only from response
187
187
 
188
188
  Args:
189
- response (dict): Response generated by `self.ask`
189
+ response (Union[dict, str]): Response generated by `self.ask`
190
190
 
191
191
  Returns:
192
192
  str: Message extracted
193
193
  """
194
- assert isinstance(response, dict), "Response should be of dict data-type only"
194
+ if isinstance(response, str):
195
+ return response.replace('\\n', '\n').replace('\\n\\n', '\n\n')
196
+ assert isinstance(response, dict), "Response should be either dict or str"
195
197
  return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
196
198
 
197
199
  if __name__ == "__main__":
@@ -1,4 +1,4 @@
1
- from typing import Union, Any, Dict, Generator
1
+ from typing import Optional, Union, Any, Dict, Generator
2
2
  from uuid import uuid4
3
3
  import cloudscraper
4
4
  import re
@@ -7,7 +7,7 @@ import time
7
7
 
8
8
  from webscout.AIutel import Optimizers
9
9
  from webscout.AIutel import Conversation
10
- from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
11
11
  from webscout.AIbase import Provider
12
12
  from webscout import exceptions
13
13
  from webscout.litagent import LitAgent
@@ -29,22 +29,20 @@ class AkashGPT(Provider):
29
29
  """
30
30
 
31
31
  AVAILABLE_MODELS = [
32
- "meta-llama-3-3-70b-instruct",
33
- "deepseek-r1",
34
- "meta-llama-3-1-405b-instruct-fp8",
35
- "meta-llama-llama-4-maverick-17b-128e-instruct-fp8",
36
- "nvidia-llama-3-3-nemotron-super-49b-v1",
37
-
38
- # "meta-llama-3-2-3b-instruct",
39
- # "meta-llama-3-1-8b-instruct-fp8",
40
- # "mistral",
41
- # "nous-hermes2-mixtral",
42
- # "dolphin-mixtral",
43
- "qwen-qwq-32b"
32
+ "Qwen3-235B-A22B-FP8",
33
+ "meta-llama-Llama-4-Maverick-17B-128E-Instruct-FP8",
34
+ "nvidia-Llama-3-3-Nemotron-Super-49B-v1",
35
+ "Qwen-QwQ-32B",
36
+ "Meta-Llama-3-3-70B-Instruct",
37
+ "DeepSeek-R1",
38
+ "AkashGen"
39
+
40
+
44
41
  ]
45
42
 
46
43
  def __init__(
47
44
  self,
45
+ api_key: str,
48
46
  is_conversation: bool = True,
49
47
  max_tokens: int = 600,
50
48
  timeout: int = 30,
@@ -58,12 +56,12 @@ class AkashGPT(Provider):
58
56
  model: str = "meta-llama-3-3-70b-instruct",
59
57
  temperature: float = 0.6,
60
58
  top_p: float = 0.9,
61
- session_token: str = None
62
59
  ):
63
60
  """
64
61
  Initializes the AkashGPT API with given parameters.
65
62
 
66
63
  Args:
64
+ api_key (str): Session token (used as API key here) for authentication. If None, auto-generates one.
67
65
  is_conversation (bool): Whether the provider is in conversation mode.
68
66
  max_tokens (int): Maximum number of tokens to sample.
69
67
  timeout (int): Timeout for API requests.
@@ -77,7 +75,6 @@ class AkashGPT(Provider):
77
75
  model (str): The model to use for generation.
78
76
  temperature (float): Controls randomness in generation.
79
77
  top_p (float): Controls diversity via nucleus sampling.
80
- session_token (str): Session token for authentication. If None, auto-generates one.
81
78
  """
82
79
  # Validate model choice
83
80
  if model not in self.AVAILABLE_MODELS:
@@ -95,10 +92,10 @@ class AkashGPT(Provider):
95
92
  self.top_p = top_p
96
93
 
97
94
  # Generate session token if not provided
98
- if not session_token:
99
- self.session_token = str(uuid4()).replace("-", "") + str(int(time.time()))
95
+ if not api_key:
96
+ self.api_key = str(uuid4()).replace("-", "") + str(int(time.time()))
100
97
  else:
101
- self.session_token = session_token
98
+ self.api_key = api_key
102
99
 
103
100
  self.agent = LitAgent()
104
101
 
@@ -126,7 +123,7 @@ class AkashGPT(Provider):
126
123
  }
127
124
 
128
125
  # Set cookies with the session token
129
- self.session.cookies.set("session_token", self.session_token, domain="chat.akash.network")
126
+ self.session.cookies.set("session_token", self.api_key, domain="chat.akash.network")
130
127
 
131
128
  self.__available_optimizers = (
132
129
  method
@@ -147,6 +144,17 @@ class AkashGPT(Provider):
147
144
  self.conversation.history_offset = history_offset
148
145
  self.session.proxies = proxies
149
146
 
147
+ @staticmethod
148
+ def _akash_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
149
+ """Extracts content from the AkashGPT stream format '0:"..."'."""
150
+ if isinstance(chunk, str):
151
+ match = re.search(r'0:"(.*?)"', chunk)
152
+ if match:
153
+ # Decode potential unicode escapes like \u00e9
154
+ content = match.group(1).encode().decode('unicode_escape')
155
+ return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
156
+ return None
157
+
150
158
  def ask(
151
159
  self,
152
160
  prompt: str,
@@ -198,52 +206,37 @@ class AkashGPT(Provider):
198
206
  }
199
207
 
200
208
  def for_stream():
201
- response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
202
- if not response.ok:
203
- raise exceptions.FailedToGenerateResponseError(
204
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
209
+ try:
210
+ response = self.session.post(
211
+ self.api_endpoint,
212
+ headers=self.headers,
213
+ json=payload,
214
+ stream=True,
215
+ timeout=self.timeout
205
216
  )
206
-
207
- streaming_response = ""
208
- message_id = None
209
-
210
- for line in response.iter_lines(decode_unicode=True):
211
- if not line:
212
- continue
213
-
214
- # Parse message ID from the f: line
215
- if line.startswith('f:'):
216
- try:
217
- f_data = json.loads(line[2:])
218
- message_id = f_data.get("messageId")
219
- continue
220
- except json.JSONDecodeError:
221
- pass
222
-
223
- # Parse content chunks
224
- if line.startswith('0:'):
225
- try:
226
- content = line[2:]
227
- # Remove surrounding quotes if they exist
228
- if content.startswith('"') and content.endswith('"'):
229
- content = content[1:-1]
230
- streaming_response += content
231
- yield content if raw else dict(text=content)
232
- except Exception as e:
233
- continue
234
-
235
- # End of stream markers
236
- if line.startswith('e:') or line.startswith('d:'):
237
- try:
238
- finish_data = json.loads(line[2:])
239
- finish_reason = finish_data.get("finishReason", "stop")
240
- # Could store usage data if needed:
241
- # usage = finish_data.get("usage", {})
242
- except json.JSONDecodeError:
243
- pass
244
- break
245
-
246
- self.last_response.update(dict(text=streaming_response, message_id=message_id))
217
+ if not response.ok:
218
+ raise exceptions.FailedToGenerateResponseError(
219
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
220
+ )
221
+
222
+ streaming_response = ""
223
+ # Use sanitize_stream with the custom extractor
224
+ processed_stream = sanitize_stream(
225
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
226
+ intro_value=None, # No simple prefix
227
+ to_json=False, # Content is not JSON, handled by extractor
228
+ content_extractor=self._akash_extractor, # Use the specific extractor
229
+ )
230
+
231
+ for content_chunk in processed_stream:
232
+ if content_chunk and isinstance(content_chunk, str):
233
+ streaming_response += content_chunk
234
+ yield content_chunk if raw else dict(text=content_chunk)
235
+
236
+ except Exception as e:
237
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred during streaming ({type(e).__name__}): {e}")
238
+
239
+ self.last_response.update(dict(text=streaming_response)) # message_id is not easily accessible with this stream format
247
240
  self.conversation.update_chat_history(
248
241
  prompt, self.get_message(self.last_response)
249
242
  )
@@ -326,7 +319,7 @@ if __name__ == "__main__":
326
319
 
327
320
  for model in AkashGPT.AVAILABLE_MODELS:
328
321
  try:
329
- test_ai = AkashGPT(model=model, timeout=60)
322
+ test_ai = AkashGPT(model=model, timeout=60, api_key="240f96202f87570d9d16c85a148ebdb1ea49d69557b73839a1658970c6d092a4") # Example key
330
323
  response = test_ai.chat("Say 'Hello' in one word")
331
324
  response_text = response
332
325