webscout 7.6__py3-none-any.whl → 7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (36) hide show
  1. webscout/Extra/autocoder/__init__.py +9 -9
  2. webscout/Extra/autocoder/autocoder_utiles.py +193 -195
  3. webscout/Extra/autocoder/rawdog.py +789 -649
  4. webscout/Extra/gguf.py +54 -24
  5. webscout/Provider/AISEARCH/ISou.py +0 -21
  6. webscout/Provider/AllenAI.py +4 -21
  7. webscout/Provider/ChatGPTClone.py +226 -0
  8. webscout/Provider/Glider.py +8 -4
  9. webscout/Provider/Hunyuan.py +272 -0
  10. webscout/Provider/LambdaChat.py +391 -0
  11. webscout/Provider/OLLAMA.py +256 -32
  12. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +18 -45
  13. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +34 -46
  14. webscout/Provider/TTI/artbit/async_artbit.py +3 -32
  15. webscout/Provider/TTI/artbit/sync_artbit.py +3 -31
  16. webscout/Provider/TTI/fastflux/async_fastflux.py +6 -2
  17. webscout/Provider/TTI/fastflux/sync_fastflux.py +7 -2
  18. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  19. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  20. webscout/Provider/WebSim.py +227 -0
  21. webscout/Provider/__init__.py +12 -1
  22. webscout/Provider/flowith.py +13 -2
  23. webscout/Provider/labyrinth.py +239 -0
  24. webscout/Provider/learnfastai.py +28 -15
  25. webscout/Provider/sonus.py +208 -0
  26. webscout/Provider/typegpt.py +1 -1
  27. webscout/Provider/uncovr.py +297 -0
  28. webscout/cli.py +49 -0
  29. webscout/litagent/agent.py +14 -9
  30. webscout/version.py +1 -1
  31. {webscout-7.6.dist-info → webscout-7.7.dist-info}/METADATA +33 -22
  32. {webscout-7.6.dist-info → webscout-7.7.dist-info}/RECORD +36 -29
  33. {webscout-7.6.dist-info → webscout-7.7.dist-info}/LICENSE.md +0 -0
  34. {webscout-7.6.dist-info → webscout-7.7.dist-info}/WHEEL +0 -0
  35. {webscout-7.6.dist-info → webscout-7.7.dist-info}/entry_points.txt +0 -0
  36. {webscout-7.6.dist-info → webscout-7.7.dist-info}/top_level.txt +0 -0
webscout/Extra/gguf.py CHANGED
@@ -17,7 +17,7 @@ import signal
17
17
  import tempfile
18
18
  import platform
19
19
  from pathlib import Path
20
- from typing import Optional, Dict, List
20
+ from typing import Optional, Dict, List, Any, Union, Literal, TypedDict, Set
21
21
 
22
22
  from huggingface_hub import HfApi
23
23
  from webscout.zeroart import figlet_format
@@ -32,10 +32,15 @@ class ConversionError(Exception):
32
32
  """Custom exception for when things don't go as planned! ⚠️"""
33
33
  pass
34
34
 
35
+ class QuantizationMethod(TypedDict):
36
+ """Type definition for quantization method descriptions."""
37
+ description: str
38
+
35
39
  class ModelConverter:
36
40
  """Handles the conversion of Hugging Face models to GGUF format."""
37
41
 
38
- VALID_METHODS = {
42
+ VALID_METHODS: Dict[str, str] = {
43
+ "fp16": "16-bit floating point - maximum accuracy, largest size",
39
44
  "q2_k": "2-bit quantization (smallest size, lowest accuracy)",
40
45
  "q3_k_l": "3-bit quantization (large) - balanced for size/accuracy",
41
46
  "q3_k_m": "3-bit quantization (medium) - good balance for most use cases",
@@ -52,7 +57,7 @@ class ModelConverter:
52
57
  "q8_0": "8-bit quantization - maximum accuracy, largest size"
53
58
  }
54
59
 
55
- VALID_IMATRIX_METHODS = {
60
+ VALID_IMATRIX_METHODS: Dict[str, str] = {
56
61
  "iq3_m": "3-bit imatrix quantization (medium) - balanced importance-based",
57
62
  "iq3_xxs": "3-bit imatrix quantization (extra extra small) - maximum compression",
58
63
  "q4_k_m": "4-bit imatrix quantization (medium) - balanced importance-based",
@@ -63,11 +68,18 @@ class ModelConverter:
63
68
  "q5_k_s": "5-bit imatrix quantization (small) - optimized for speed"
64
69
  }
65
70
 
66
- def __init__(self, model_id: str, username: Optional[str] = None,
67
- token: Optional[str] = None, quantization_methods: str = "q4_k_m",
68
- use_imatrix: bool = False, train_data_file: Optional[str] = None,
69
- split_model: bool = False, split_max_tensors: int = 256,
70
- split_max_size: Optional[str] = None):
71
+ def __init__(
72
+ self,
73
+ model_id: str,
74
+ username: Optional[str] = None,
75
+ token: Optional[str] = None,
76
+ quantization_methods: str = "q4_k_m",
77
+ use_imatrix: bool = False,
78
+ train_data_file: Optional[str] = None,
79
+ split_model: bool = False,
80
+ split_max_tensors: int = 256,
81
+ split_max_size: Optional[str] = None
82
+ ) -> None:
71
83
  self.model_id = model_id
72
84
  self.username = username
73
85
  self.token = token
@@ -79,6 +91,7 @@ class ModelConverter:
79
91
  self.split_model = split_model
80
92
  self.split_max_tensors = split_max_tensors
81
93
  self.split_max_size = split_max_size
94
+ self.fp16_only = "fp16" in self.quantization_methods and len(self.quantization_methods) == 1
82
95
 
83
96
  def validate_inputs(self) -> None:
84
97
  """Validates all input parameters."""
@@ -117,7 +130,7 @@ class ModelConverter:
117
130
  @staticmethod
118
131
  def check_dependencies() -> Dict[str, bool]:
119
132
  """Check if all required dependencies are installed."""
120
- dependencies = {
133
+ dependencies: Dict[str, str] = {
121
134
  'git': 'Git version control',
122
135
  'pip3': 'Python package installer',
123
136
  'huggingface-cli': 'Hugging Face CLI',
@@ -125,7 +138,7 @@ class ModelConverter:
125
138
  'ninja': 'Ninja build system (optional)'
126
139
  }
127
140
 
128
- status = {}
141
+ status: Dict[str, bool] = {}
129
142
  for cmd, desc in dependencies.items():
130
143
  status[cmd] = subprocess.run(['which', cmd], capture_output=True, text=True).returncode == 0
131
144
 
@@ -133,7 +146,7 @@ class ModelConverter:
133
146
 
134
147
  def detect_hardware(self) -> Dict[str, bool]:
135
148
  """Detect available hardware acceleration."""
136
- hardware = {
149
+ hardware: Dict[str, bool] = {
137
150
  'cuda': False,
138
151
  'metal': False,
139
152
  'opencl': False,
@@ -227,7 +240,7 @@ class ModelConverter:
227
240
  console.print(f" {'✓' if available else '✗'} {hw.upper()}")
228
241
 
229
242
  # Configure CMake build
230
- cmake_args = ['cmake', '-B', 'build']
243
+ cmake_args: List[str] = ['cmake', '-B', 'build']
231
244
 
232
245
  # Add hardware acceleration options
233
246
  if hardware['cuda']:
@@ -277,7 +290,7 @@ class ModelConverter:
277
290
 
278
291
  def generate_importance_matrix(self, model_path: str, train_data_path: str, output_path: str) -> None:
279
292
  """Generates importance matrix for quantization."""
280
- imatrix_command = [
293
+ imatrix_command: List[str] = [
281
294
  "./llama.cpp/build/bin/llama-imatrix",
282
295
  "-m", model_path,
283
296
  "-f", train_data_path,
@@ -310,7 +323,7 @@ class ModelConverter:
310
323
 
311
324
  def split_model(self, model_path: str, outdir: str) -> List[str]:
312
325
  """Splits the model into smaller chunks."""
313
- split_cmd = [
326
+ split_cmd: List[str] = [
314
327
  "./llama.cpp/build/bin/llama-gguf-split",
315
328
  "--split",
316
329
  ]
@@ -521,9 +534,20 @@ This repository is licensed under the same terms as the original model.
521
534
 
522
535
  if result.returncode != 0:
523
536
  raise ConversionError(f"Error converting to fp16: {result.stderr}")
524
-
537
+
538
+ # If fp16_only is True, we're done after fp16 conversion
539
+ if self.fp16_only:
540
+ quantized_files = [f"{self.model_name}.fp16.gguf"]
541
+ if self.username and self.token:
542
+ api.upload_file(
543
+ path_or_fileobj=fp16,
544
+ path_in_repo=f"{self.model_name}.fp16.gguf",
545
+ repo_id=f"{self.username}/{self.model_name}-GGUF"
546
+ )
547
+ return
548
+
525
549
  # Generate importance matrix if needed
526
- imatrix_path = None
550
+ imatrix_path: Optional[str] = None
527
551
  if self.use_imatrix:
528
552
  train_data_path = self.train_data_file if self.train_data_file else "llama.cpp/groups_merged.txt"
529
553
  imatrix_path = str(Path(outdir)/"imatrix.dat")
@@ -531,7 +555,7 @@ This repository is licensed under the same terms as the original model.
531
555
 
532
556
  # Quantize model
533
557
  console.print("[bold green]Quantizing model...")
534
- quantized_files = []
558
+ quantized_files: List[str] = []
535
559
  for method in self.quantization_methods:
536
560
  quantized_name = f"{self.model_name.lower()}-{method.lower()}"
537
561
  if self.use_imatrix:
@@ -539,7 +563,7 @@ This repository is licensed under the same terms as the original model.
539
563
  quantized_path = str(Path(outdir)/f"{quantized_name}.gguf")
540
564
 
541
565
  if self.use_imatrix:
542
- quantize_cmd = [
566
+ quantize_cmd: List[str] = [
543
567
  "./llama.cpp/build/bin/llama-quantize",
544
568
  "--imatrix", imatrix_path,
545
569
  fp16, quantized_path, method
@@ -600,11 +624,17 @@ app = CLI(
600
624
  @option("-s", "--split-model", help="Split the model into smaller chunks", is_flag=True)
601
625
  @option("--split-max-tensors", help="Maximum number of tensors per file when splitting", default=256)
602
626
  @option("--split-max-size", help="Maximum file size when splitting (e.g., '256M', '5G')", default=None)
603
- def convert_command(model_id: str, username: Optional[str] = None,
604
- token: Optional[str] = None, quantization: str = "q4_k_m",
605
- use_imatrix: bool = False, train_data: Optional[str] = None,
606
- split_model: bool = False, split_max_tensors: int = 256,
607
- split_max_size: Optional[str] = None):
627
+ def convert_command(
628
+ model_id: str,
629
+ username: Optional[str] = None,
630
+ token: Optional[str] = None,
631
+ quantization: str = "q4_k_m",
632
+ use_imatrix: bool = False,
633
+ train_data: Optional[str] = None,
634
+ split_model: bool = False,
635
+ split_max_tensors: int = 256,
636
+ split_max_size: Optional[str] = None
637
+ ) -> None:
608
638
  """
609
639
  Convert and quantize HuggingFace models to GGUF format! 🚀
610
640
 
@@ -644,7 +674,7 @@ def convert_command(model_id: str, username: Optional[str] = None,
644
674
  console.print(f"[red]Unexpected error: {str(e)}")
645
675
  sys.exit(1)
646
676
 
647
- def main():
677
+ def main() -> None:
648
678
  """Fire up the GGUF converter! 🚀"""
649
679
  app.run()
650
680
 
@@ -65,7 +65,6 @@ class Isou(AISearch):
65
65
  timeout: int = 120,
66
66
  proxies: Optional[dict] = None,
67
67
  model: str = "siliconflow:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
68
- logging: bool = False
69
68
  ):
70
69
  """Initialize the Isou API client.
71
70
 
@@ -73,7 +72,6 @@ class Isou(AISearch):
73
72
  timeout (int, optional): Request timeout in seconds. Defaults to 120.
74
73
  proxies (dict, optional): Proxy configuration for requests. Defaults to None.
75
74
  model (str, optional): Model to use for search. Defaults to DeepSeek-R1.
76
- logging (bool, optional): Enable logging. Defaults to False.
77
75
  """
78
76
  self.available_models = [
79
77
  "siliconflow:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
@@ -115,24 +113,6 @@ class Isou(AISearch):
115
113
  self.session.headers.update(self.headers)
116
114
  self.proxies = proxies
117
115
 
118
- # Initialize logger if enabled
119
- if logging:
120
- from webscout.Litlogger import Logger, LogFormat, ConsoleHandler
121
- from webscout.Litlogger.core.level import LogLevel
122
-
123
- console_handler = ConsoleHandler(
124
- level=LogLevel.DEBUG,
125
- )
126
-
127
- self.logger = Logger(
128
- name="Isou",
129
- level=LogLevel.DEBUG,
130
- handlers=[console_handler]
131
- )
132
- self.logger.info("Isou initialized successfully ✨")
133
- else:
134
- self.logger = None
135
-
136
116
  def search(
137
117
  self,
138
118
  prompt: str,
@@ -268,7 +248,6 @@ if __name__ == "__main__":
268
248
  # Initialize with specific model and logging
269
249
  ai = Isou(
270
250
  model="siliconflow:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
271
- logging=False
272
251
  )
273
252
 
274
253
  response = ai.search(input(">>> "), stream=True, raw=False)
@@ -37,7 +37,7 @@ class AllenAI(Provider):
37
37
  history_offset: int = 10250,
38
38
  act: str = None,
39
39
  model: str = "tulu3-405b",
40
- system_prompt: str = "You are a helpful AI assistant.",
40
+
41
41
  ):
42
42
  """Initializes the AllenAI API client."""
43
43
  if model not in self.AVAILABLE_MODELS:
@@ -68,7 +68,6 @@ class AllenAI(Provider):
68
68
  self.session.headers.update(self.headers)
69
69
  self.session.proxies.update(proxies)
70
70
  self.model = model
71
- self.system_prompt = system_prompt
72
71
  self.is_conversation = is_conversation
73
72
  self.max_tokens_to_sample = max_tokens
74
73
  self.timeout = timeout
@@ -95,19 +94,6 @@ class AllenAI(Provider):
95
94
  )
96
95
  self.conversation.history_offset = history_offset
97
96
 
98
- def format_prompt(self, messages):
99
- """Format messages into a prompt string"""
100
- formatted = []
101
- for msg in messages:
102
- role = msg.get("role", "")
103
- content = msg.get("content", "")
104
- if role == "system":
105
- formatted.append(f"System: {content}")
106
- elif role == "user":
107
- formatted.append(f"User: {content}")
108
- elif role == "assistant":
109
- formatted.append(f"Assistant: {content}")
110
- return "\n".join(formatted)
111
97
 
112
98
  def ask(
113
99
  self,
@@ -139,11 +125,8 @@ class AllenAI(Provider):
139
125
  "x-anonymous-user-id": self.x_anonymous_user_id
140
126
  })
141
127
 
142
- # Format messages for AllenAI
143
- messages = [
144
- {"role": "system", "content": self.system_prompt},
145
- {"role": "user", "content": conversation_prompt}
146
- ]
128
+
129
+ messages = conversation_prompt
147
130
 
148
131
  # Build multipart form data
149
132
  form_data = [
@@ -154,7 +137,7 @@ class AllenAI(Provider):
154
137
  f'Content-Disposition: form-data; name="host"\r\n\r\n{host}\r\n',
155
138
 
156
139
  f'--{boundary}\r\n'
157
- f'Content-Disposition: form-data; name="content"\r\n\r\n{self.format_prompt(messages)}\r\n',
140
+ f'Content-Disposition: form-data; name="content"\r\n\r\n{messages}\r\n',
158
141
 
159
142
  f'--{boundary}\r\n'
160
143
  f'Content-Disposition: form-data; name="private"\r\n\r\n{str(private).lower()}\r\n'
@@ -0,0 +1,226 @@
1
+ import time
2
+ import uuid
3
+ import cloudscraper
4
+ import json
5
+ import re
6
+ from typing import Any, Dict, Optional, Generator, Union
7
+ from dataclasses import dataclass, asdict
8
+ from datetime import date
9
+
10
+ from webscout.AIutel import Optimizers
11
+ from webscout.AIutel import Conversation
12
+ from webscout.AIutel import AwesomePrompts
13
+ from webscout.AIbase import Provider
14
+ from webscout import WEBS, exceptions
15
+ from webscout.litagent import LitAgent
16
+
17
+ class ChatGPTClone(Provider):
18
+ """
19
+ ChatGPTClone is a provider class for interacting with the ChatGPT Clone API.
20
+ Supports streaming responses.
21
+ """
22
+
23
+ url = "https://chatgpt-clone-ten-nu.vercel.app"
24
+ AVAILABLE_MODELS = ["gpt-4", "gpt-3.5-turbo"]
25
+
26
+ def __init__(
27
+ self,
28
+ is_conversation: bool = True,
29
+ max_tokens: int = 2000,
30
+ timeout: int = 60,
31
+ intro: str = None,
32
+ filepath: str = None,
33
+ update_file: bool = True,
34
+ proxies: dict = {},
35
+ history_offset: int = 10250,
36
+ act: str = None,
37
+ model: str = "gpt-4",
38
+ temperature: float = 0.6,
39
+ top_p: float = 0.7,
40
+ browser: str = "chrome",
41
+ system_prompt: str = "You are a helpful assistant."
42
+ ):
43
+ """Initialize the ChatGPT Clone client."""
44
+ if model not in self.AVAILABLE_MODELS:
45
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
46
+
47
+ self.model = model
48
+ self.session = cloudscraper.create_scraper()
49
+ self.is_conversation = is_conversation
50
+ self.max_tokens_to_sample = max_tokens
51
+ self.timeout = timeout
52
+ self.last_response = {}
53
+ self.temperature = temperature
54
+ self.top_p = top_p
55
+ self.system_prompt = system_prompt
56
+
57
+ # Initialize LitAgent for user agent generation
58
+ self.agent = LitAgent()
59
+ # Use fingerprinting to create a consistent browser identity
60
+ self.fingerprint = self.agent.generate_fingerprint(browser)
61
+
62
+ # Use the fingerprint for headers
63
+ self.headers = {
64
+ "Accept": self.fingerprint["accept"],
65
+ "Accept-Encoding": "gzip, deflate, br, zstd",
66
+ "Accept-Language": self.fingerprint["accept_language"],
67
+ "Content-Type": "application/json",
68
+ "DNT": "1",
69
+ "Origin": self.url,
70
+ "Referer": f"{self.url}/",
71
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
72
+ "Sec-CH-UA-Mobile": "?0",
73
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
74
+ "User-Agent": self.fingerprint["user_agent"],
75
+ }
76
+
77
+ # Create session cookies with unique identifiers
78
+ self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
79
+
80
+ self.__available_optimizers = (
81
+ method
82
+ for method in dir(Optimizers)
83
+ if callable(getattr(Optimizers, method))
84
+ and not method.startswith("__")
85
+ )
86
+ Conversation.intro = (
87
+ AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
88
+ if act
89
+ else intro or Conversation.intro
90
+ )
91
+ self.conversation = Conversation(
92
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
93
+ )
94
+ self.conversation.history_offset = history_offset
95
+ self.session.proxies = proxies
96
+
97
+ # Set consistent headers for the scraper session
98
+ for header, value in self.headers.items():
99
+ self.session.headers[header] = value
100
+
101
+ def refresh_identity(self, browser: str = None):
102
+ """Refreshes the browser identity fingerprint."""
103
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
104
+ self.fingerprint = self.agent.generate_fingerprint(browser)
105
+
106
+ # Update headers with new fingerprint
107
+ self.headers.update({
108
+ "Accept": self.fingerprint["accept"],
109
+ "Accept-Language": self.fingerprint["accept_language"],
110
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
111
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
112
+ "User-Agent": self.fingerprint["user_agent"],
113
+ })
114
+
115
+ # Update session headers
116
+ for header, value in self.headers.items():
117
+ self.session.headers[header] = value
118
+
119
+ # Generate new cookies
120
+ self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
121
+
122
+ return self.fingerprint
123
+
124
+ def ask(
125
+ self,
126
+ prompt: str,
127
+ stream: bool = False,
128
+ raw: bool = False,
129
+ optimizer: str = None,
130
+ conversationally: bool = False,
131
+ ) -> Union[Dict[str, Any], Generator]:
132
+ """Send a message to the ChatGPT Clone API"""
133
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
134
+ if optimizer:
135
+ if optimizer in self.__available_optimizers:
136
+ conversation_prompt = getattr(Optimizers, optimizer)(
137
+ conversation_prompt if conversationally else prompt
138
+ )
139
+ else:
140
+ raise Exception(
141
+ f"Optimizer is not one of {self.__available_optimizers}"
142
+ )
143
+
144
+ payload = {
145
+ "messages": [
146
+ {"role": "system", "content": self.system_prompt},
147
+ {"role": "user", "content": conversation_prompt}
148
+ ],
149
+ "model": self.model
150
+ }
151
+
152
+ def for_stream():
153
+ try:
154
+ with self.session.post(f"{self.url}/api/chat", headers=self.headers, cookies=self.cookies, json=payload, stream=True, timeout=self.timeout) as response:
155
+ if not response.ok:
156
+ # If we get a non-200 response, try refreshing our identity once
157
+ if response.status_code in [403, 429]:
158
+ self.refresh_identity()
159
+ # Retry with new identity
160
+ with self.session.post(f"{self.url}/api/chat", headers=self.headers, cookies=self.cookies, json=payload, stream=True, timeout=self.timeout) as retry_response:
161
+ if not retry_response.ok:
162
+ raise exceptions.FailedToGenerateResponseError(
163
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
164
+ )
165
+ response = retry_response
166
+ else:
167
+ raise exceptions.FailedToGenerateResponseError(
168
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
169
+ )
170
+
171
+ streaming_text = ""
172
+ for line in response.iter_lines(decode_unicode=True):
173
+ if line:
174
+ match = re.search(r'0:"(.*?)"', line)
175
+ if match:
176
+ content = match.group(1)
177
+ streaming_text += content
178
+ yield content if raw else dict(text=content)
179
+
180
+ self.last_response.update(dict(text=streaming_text))
181
+ self.conversation.update_chat_history(prompt, streaming_text)
182
+ except Exception as e:
183
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
184
+
185
+ def for_non_stream():
186
+ for _ in for_stream():
187
+ pass
188
+ return self.last_response
189
+
190
+ return for_stream() if stream else for_non_stream()
191
+
192
+ def chat(
193
+ self,
194
+ prompt: str,
195
+ stream: bool = False,
196
+ optimizer: str = None,
197
+ conversationally: bool = False,
198
+ ) -> Union[str, Generator[str, None, None]]:
199
+ """Generate a response to a prompt"""
200
+ def for_stream():
201
+ for response in self.ask(
202
+ prompt, True, optimizer=optimizer, conversationally=conversationally
203
+ ):
204
+ yield self.get_message(response)
205
+
206
+ def for_non_stream():
207
+ return self.get_message(
208
+ self.ask(
209
+ prompt, False, optimizer=optimizer, conversationally=conversationally
210
+ )
211
+ )
212
+
213
+ return for_stream() if stream else for_non_stream()
214
+
215
+ def get_message(self, response: dict) -> str:
216
+ """Extract message text from response"""
217
+ assert isinstance(response, dict)
218
+ formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
219
+ return formatted_text
220
+
221
+ if __name__ == "__main__":
222
+ from rich import print
223
+ ai = ChatGPTClone(timeout=5000)
224
+ response = ai.chat("write a poem about AI", stream=True)
225
+ for chunk in response:
226
+ print(chunk, end="", flush=True)
@@ -124,10 +124,14 @@ class GliderAI(Provider):
124
124
  if value.startswith("data: "):
125
125
  try:
126
126
  data = json.loads(value[6:])
127
- content = data['choices'][0].get('delta', {}).get("content", "")
128
- if content:
129
- streaming_text += content
130
- yield content if raw else {"text": content}
127
+ # Handle both standard and DeepSeek response formats
128
+ if "choices" in data and len(data["choices"]) > 0:
129
+ choice = data["choices"][0]
130
+ if "delta" in choice and "content" in choice["delta"]:
131
+ content = choice["delta"]["content"]
132
+ if content:
133
+ streaming_text += content
134
+ yield content if raw else {"text": content}
131
135
  except json.JSONDecodeError:
132
136
  if "stop" in value:
133
137
  break