webscout 6.9__py3-none-any.whl → 7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (51) hide show
  1. webscout/AIbase.py +12 -2
  2. webscout/DWEBS.py +38 -22
  3. webscout/Extra/autocoder/autocoder_utiles.py +68 -7
  4. webscout/Extra/autollama.py +0 -16
  5. webscout/Extra/gguf.py +0 -13
  6. webscout/LLM.py +1 -1
  7. webscout/Provider/AISEARCH/DeepFind.py +251 -0
  8. webscout/Provider/AISEARCH/__init__.py +2 -2
  9. webscout/Provider/AISEARCH/felo_search.py +167 -118
  10. webscout/Provider/Blackboxai.py +136 -137
  11. webscout/Provider/Cloudflare.py +92 -78
  12. webscout/Provider/Deepinfra.py +59 -35
  13. webscout/Provider/Glider.py +222 -0
  14. webscout/Provider/Groq.py +26 -18
  15. webscout/Provider/HF_space/__init__.py +0 -0
  16. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  17. webscout/Provider/Jadve.py +108 -77
  18. webscout/Provider/Llama3.py +117 -94
  19. webscout/Provider/Marcus.py +65 -10
  20. webscout/Provider/Netwrck.py +61 -49
  21. webscout/Provider/PI.py +77 -122
  22. webscout/Provider/PizzaGPT.py +129 -82
  23. webscout/Provider/TextPollinationsAI.py +229 -0
  24. webscout/Provider/Youchat.py +28 -22
  25. webscout/Provider/__init__.py +12 -4
  26. webscout/Provider/askmyai.py +2 -2
  27. webscout/Provider/chatglm.py +205 -0
  28. webscout/Provider/dgaf.py +215 -0
  29. webscout/Provider/gaurish.py +106 -66
  30. webscout/Provider/hermes.py +219 -0
  31. webscout/Provider/llamatutor.py +72 -62
  32. webscout/Provider/llmchat.py +62 -35
  33. webscout/Provider/meta.py +6 -6
  34. webscout/Provider/multichat.py +205 -104
  35. webscout/Provider/typegpt.py +26 -23
  36. webscout/Provider/yep.py +3 -3
  37. webscout/litagent/__init__.py +3 -146
  38. webscout/litagent/agent.py +120 -0
  39. webscout/litagent/constants.py +31 -0
  40. webscout/tempid.py +0 -4
  41. webscout/version.py +1 -1
  42. webscout/webscout_search.py +1141 -1140
  43. webscout/webscout_search_async.py +635 -635
  44. {webscout-6.9.dist-info → webscout-7.1.dist-info}/METADATA +37 -33
  45. {webscout-6.9.dist-info → webscout-7.1.dist-info}/RECORD +49 -41
  46. {webscout-6.9.dist-info → webscout-7.1.dist-info}/WHEEL +1 -1
  47. webscout/Provider/AISEARCH/ooai.py +0 -155
  48. webscout/Provider/RUBIKSAI.py +0 -272
  49. {webscout-6.9.dist-info → webscout-7.1.dist-info}/LICENSE.md +0 -0
  50. {webscout-6.9.dist-info → webscout-7.1.dist-info}/entry_points.txt +0 -0
  51. {webscout-6.9.dist-info → webscout-7.1.dist-info}/top_level.txt +0 -0
@@ -1,18 +1,19 @@
1
+
1
2
  import requests
2
3
  import json
3
4
  import re
4
5
  from typing import Any, Dict, Optional, Generator
5
6
 
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts
7
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
9
8
  from webscout.AIbase import Provider
10
9
  from webscout import exceptions
11
10
  from webscout.litagent import LitAgent
11
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
12
12
 
13
13
  class JadveOpenAI(Provider):
14
14
  """
15
- A class to interact with the OpenAI API through jadve.com.
15
+ A class to interact with the OpenAI API through jadve.com using the streaming endpoint.
16
+ Includes optional logging capabilities.
16
17
  """
17
18
 
18
19
  AVAILABLE_MODELS = ["gpt-4o", "gpt-4o-mini"]
@@ -29,36 +30,50 @@ class JadveOpenAI(Provider):
29
30
  history_offset: int = 10250,
30
31
  act: str = None,
31
32
  model: str = "gpt-4o-mini",
32
- system_prompt: str = "You are a helpful AI assistant."
33
+ system_prompt: str = "You are a helpful AI assistant.",
34
+ logging: bool = False
33
35
  ):
34
36
  """
35
- Initializes the OpenAI API client through jadve.com with given parameters.
37
+ Initializes the JadveOpenAI client with optional logging support.
36
38
 
37
39
  Args:
38
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
39
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
40
- timeout (int, optional): Http request timeout. Defaults to 30.
41
- intro (str, optional): Conversation introductory prompt. Defaults to None.
42
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
43
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
44
- proxies (dict, optional): Http request proxies. Defaults to {}.
45
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
46
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
47
- system_prompt (str, optional): System prompt for OpenAI. Defaults to "You are a helpful AI assistant.".
48
- model (str, optional): AI model to use for text generation. Defaults to "gpt-4o".
40
+ is_conversation (bool, optional): Enable conversational mode. Defaults to True.
41
+ max_tokens (int, optional): Maximum tokens for generation. Defaults to 600.
42
+ timeout (int, optional): HTTP request timeout in seconds. Defaults to 30.
43
+ intro (str, optional): Introductory prompt text. Defaults to None.
44
+ filepath (str, optional): Path to conversation history file. Defaults to None.
45
+ update_file (bool, optional): Whether to update the conversation history file. Defaults to True.
46
+ proxies (dict, optional): Proxies for HTTP requests. Defaults to {}.
47
+ history_offset (int, optional): Limit for conversation history. Defaults to 10250.
48
+ act (str|int, optional): Act key for AwesomePrompts. Defaults to None.
49
+ model (str, optional): AI model to be used. Defaults to "gpt-4o-mini".
50
+ system_prompt (str, optional): System prompt text. Defaults to "You are a helpful AI assistant."
51
+ logging (bool, optional): Enable logging functionality. Defaults to False.
49
52
  """
50
53
  if model not in self.AVAILABLE_MODELS:
51
54
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
52
55
 
56
+ self.logger = LitLogger(
57
+ name="JadveOpenAI",
58
+ format=LogFormat.MODERN_EMOJI,
59
+ color_scheme=ColorScheme.CYBERPUNK
60
+ ) if logging else None
61
+
62
+ if self.logger:
63
+ self.logger.info(f"Initializing JadveOpenAI with model: {model}")
64
+
53
65
  self.session = requests.Session()
54
66
  self.is_conversation = is_conversation
55
67
  self.max_tokens_to_sample = max_tokens
56
- self.api_endpoint = "https://openai.jadve.com/chatgpt"
68
+ # Streaming endpoint for jadve.com
69
+ self.api_endpoint = "https://openai.jadve.com/stream"
57
70
  self.stream_chunk_size = 64
58
71
  self.timeout = timeout
59
72
  self.last_response = {}
60
73
  self.model = model
61
74
  self.system_prompt = system_prompt
75
+
76
+ # Updated headers with required x-authorization header.
62
77
  self.headers = {
63
78
  "accept": "*/*",
64
79
  "accept-encoding": "gzip, deflate, br, zstd",
@@ -75,14 +90,16 @@ class JadveOpenAI(Provider):
75
90
  "sec-fetch-mode": "cors",
76
91
  "sec-fetch-site": "same-site",
77
92
  "user-agent": LitAgent().random(),
93
+ "x-authorization": "Bearer"
78
94
  }
95
+ self.session.headers.update(self.headers)
96
+ self.session.proxies = proxies
79
97
 
80
98
  self.__available_optimizers = (
81
- method
82
- for method in dir(Optimizers)
99
+ method for method in dir(Optimizers)
83
100
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
84
101
  )
85
- self.session.headers.update(self.headers)
102
+
86
103
  Conversation.intro = (
87
104
  AwesomePrompts().get_act(
88
105
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -90,11 +107,14 @@ class JadveOpenAI(Provider):
90
107
  if act
91
108
  else intro or Conversation.intro
92
109
  )
110
+
93
111
  self.conversation = Conversation(
94
112
  is_conversation, self.max_tokens_to_sample, filepath, update_file
95
113
  )
96
114
  self.conversation.history_offset = history_offset
97
- self.session.proxies = proxies
115
+
116
+ if self.logger:
117
+ self.logger.info("JadveOpenAI initialized successfully.")
98
118
 
99
119
  def ask(
100
120
  self,
@@ -103,78 +123,87 @@ class JadveOpenAI(Provider):
103
123
  raw: bool = False,
104
124
  optimizer: str = None,
105
125
  conversationally: bool = False,
106
- ) -> dict:
107
- """Chat with AI
126
+ ) -> dict | Generator[dict, None, None]:
127
+ """
128
+ Chat with AI.
129
+
108
130
  Args:
109
131
  prompt (str): Prompt to be sent.
110
132
  stream (bool, optional): Flag for streaming response. Defaults to False.
111
- raw (bool, optional): Stream back raw response as received. Defaults to False.
112
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
113
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
133
+ raw (bool, optional): Return raw content chunks. Defaults to False.
134
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
135
+ conversationally (bool, optional): Flag for conversational optimization. Defaults to False.
114
136
  Returns:
115
- dict : {}
116
- ```json
117
- {
118
- "text" : "How may I assist you today?"
119
- }
120
- ```
137
+ dict or generator: A dictionary with the generated text or a generator yielding text chunks.
121
138
  """
139
+ if self.logger:
140
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
141
+ self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
142
+
122
143
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
123
144
  if optimizer:
124
145
  if optimizer in self.__available_optimizers:
125
146
  conversation_prompt = getattr(Optimizers, optimizer)(
126
147
  conversation_prompt if conversationally else prompt
127
148
  )
149
+ if self.logger:
150
+ self.logger.debug(f"Applied optimizer: {optimizer}")
128
151
  else:
152
+ if self.logger:
153
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
129
154
  raise Exception(
130
- f"Optimizer is not one of {self.__available_optimizers}"
155
+ f"Optimizer is not one of {list(self.__available_optimizers)}"
131
156
  )
132
157
 
133
158
  payload = {
134
- "action": "sendmessage",
135
- "model": self.model,
136
159
  "messages": [
137
- {"role": "system", "content": self.system_prompt},
138
- {"role": "user", "content": conversation_prompt}
160
+ {"role": "user", "content": [{"type": "text", "text": conversation_prompt}]}
139
161
  ],
162
+ "model": self.model,
163
+ "botId": "",
164
+ "chatId": "",
165
+ "stream": stream,
140
166
  "temperature": 0.7,
141
- "language": "en",
142
167
  "returnTokensUsage": True,
143
- "botId": "guest-chat",
144
- "chatId": ""
168
+ "useTools": False
145
169
  }
146
170
 
147
171
  def for_stream():
172
+ if self.logger:
173
+ self.logger.debug("Initiating streaming request to API")
148
174
  response = self.session.post(
149
175
  self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
150
176
  )
151
177
 
152
178
  if not response.ok:
179
+ if self.logger:
180
+ self.logger.error(f"API request failed. Status: {response.status_code}, Reason: {response.reason}")
153
181
  raise exceptions.FailedToGenerateResponseError(
154
182
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
155
183
  )
184
+
185
+ if self.logger:
186
+ self.logger.info(f"API connection established successfully. Status: {response.status_code}")
187
+
188
+ # Read the entire response text.
189
+ response_text = response.text
190
+ pattern = r'0:"(.*?)"'
191
+ chunks = re.findall(pattern, response_text)
156
192
  streaming_text = ""
157
- for line in response.iter_lines(decode_unicode=True):
158
- if line:
159
- if line.startswith("data: "):
160
- data = line[6:]
161
- if data == "[DONE]":
162
- break
163
- try:
164
- json_data = json.loads(data)
165
- if "choices" in json_data and len(json_data["choices"]) > 0:
166
- content = json_data["choices"][0].get("delta", {}).get("content", "")
167
- if content:
168
- streaming_text += content
169
- yield content if raw else dict(text=content)
170
- except json.JSONDecodeError as e:
171
- print(f"Error parsing line: {line} - {e}")
193
+ for content in chunks:
194
+ streaming_text += content
195
+
196
+ yield content if raw else dict(text=content)
197
+
172
198
  self.last_response.update(dict(text=streaming_text))
173
- self.conversation.update_chat_history(
174
- prompt, self.get_message(self.last_response)
175
- )
199
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
200
+
201
+ if self.logger:
202
+ self.logger.debug("Response processing completed.")
176
203
 
177
204
  def for_non_stream():
205
+ if self.logger:
206
+ self.logger.debug("Processing non-streaming request")
178
207
  for _ in for_stream():
179
208
  pass
180
209
  return self.last_response
@@ -187,48 +216,50 @@ class JadveOpenAI(Provider):
187
216
  stream: bool = False,
188
217
  optimizer: str = None,
189
218
  conversationally: bool = False,
190
- ) -> str:
191
- """Generate response `str`
219
+ ) -> str | Generator[str, None, None]:
220
+ """
221
+ Generate a chat response (string).
222
+
192
223
  Args:
193
- prompt (str): Prompt to be send.
224
+ prompt (str): Prompt to be sent.
194
225
  stream (bool, optional): Flag for streaming response. Defaults to False.
195
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
196
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
226
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
227
+ conversationally (bool, optional): Flag for conversational optimization. Defaults to False.
197
228
  Returns:
198
- str: Response generated
229
+ str or generator: Generated response string or generator yielding response chunks.
199
230
  """
231
+ if self.logger:
232
+ self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
200
233
 
201
234
  def for_stream():
202
235
  for response in self.ask(
203
- prompt, True, optimizer=optimizer, conversationally=conversationally
236
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
204
237
  ):
205
238
  yield self.get_message(response)
206
239
 
207
240
  def for_non_stream():
208
241
  return self.get_message(
209
- self.ask(
210
- prompt,
211
- False,
212
- optimizer=optimizer,
213
- conversationally=conversationally,
214
- )
242
+ self.ask(prompt, stream=False, optimizer=optimizer, conversationally=conversationally)
215
243
  )
216
244
 
217
245
  return for_stream() if stream else for_non_stream()
218
246
 
219
247
  def get_message(self, response: dict) -> str:
220
- """Retrieves message only from response
248
+ """
249
+ Retrieves message from the response.
250
+
221
251
  Args:
222
- response (dict): Response generated by `self.ask`
252
+ response (dict): Response from the ask() method.
223
253
  Returns:
224
- str: Message extracted
254
+ str: Extracted text.
225
255
  """
226
256
  assert isinstance(response, dict), "Response should be of dict data-type only"
227
257
  return response["text"]
228
258
 
229
259
  if __name__ == "__main__":
230
260
  from rich import print
231
- ai = JadveOpenAI(timeout=5000)
261
+ ai = JadveOpenAI(timeout=5000, logging=False)
262
+ # For streaming response demonstration.
232
263
  response = ai.chat("yo what's up", stream=True)
233
264
  for chunk in response:
234
- print(chunk, end="", flush=True)
265
+ print(chunk, end="", flush=True)
@@ -1,20 +1,30 @@
1
- import os
2
- import openai
3
1
  import requests
2
+ import json
3
+ from typing import Any, Dict, Generator
4
+
4
5
  from webscout.AIutel import Optimizers
5
6
  from webscout.AIutel import Conversation
6
7
  from webscout.AIutel import AwesomePrompts
7
8
  from webscout.AIbase import Provider
9
+ from webscout import exceptions
8
10
 
9
- class LLAMA3(Provider):
11
+ class Sambanova(Provider):
10
12
  """
11
- A class to interact with the Sambanova API using the openai library.
13
+ A class to interact with the Sambanova API.
12
14
  """
13
15
 
14
16
  AVAILABLE_MODELS = [
15
17
  "Meta-Llama-3.1-8B-Instruct",
16
18
  "Meta-Llama-3.1-70B-Instruct",
17
- "Meta-Llama-3.1-405B-Instruct"
19
+ "Meta-Llama-3.1-405B-Instruct",
20
+ "DeepSeek-R1-Distill-Llama-70B",
21
+ "Llama-3.1-Tulu-3-405B",
22
+ "Meta-Llama-3.2-1B-Instruct",
23
+ "Meta-Llama-3.2-3B-Instruct",
24
+ "Meta-Llama-3.3-70B-Instruct",
25
+ "Qwen2.5-72B-Instruct",
26
+ "Qwen2.5-Coder-32B-Instruct",
27
+ "QwQ-32B-Preview"
18
28
  ]
19
29
 
20
30
  def __init__(
@@ -22,9 +32,6 @@ class LLAMA3(Provider):
22
32
  api_key: str = None,
23
33
  is_conversation: bool = True,
24
34
  max_tokens: int = 600,
25
- temperature: float = 1,
26
- top_p: float = 0.95,
27
- model: str = "Meta-Llama-3.1-8B-Instruct",
28
35
  timeout: int = 30,
29
36
  intro: str = None,
30
37
  filepath: str = None,
@@ -32,42 +39,25 @@ class LLAMA3(Provider):
32
39
  proxies: dict = {},
33
40
  history_offset: int = 10250,
34
41
  act: str = None,
42
+ model: str = "Meta-Llama-3.1-8B-Instruct",
35
43
  system_prompt: str = "You are a helpful AI assistant.",
36
44
  ):
37
45
  """
38
- Initializes the Sambanova API with the given parameters.
39
-
40
- Args:
41
- api_key (str, optional): Your Sambanova API key. If None, it will use the environment variable "SAMBANOVA_API_KEY". Defaults to None.
42
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
43
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
44
- temperature (float, optional): The temperature parameter for the model. Defaults to 1.
45
- top_p (float, optional): The top_p parameter for the model. Defaults to 0.95.
46
- model (str, optional): The name of the Sambanova model to use. Defaults to "Meta-Llama-3.1-8B-Instruct".
47
- timeout (int, optional): Http request timeout. Defaults to 30.
48
- intro (str, optional): Conversation introductory prompt. Defaults to None.
49
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
50
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
51
- proxies (dict, optional): Http request proxies. Defaults to {}.
52
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
53
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
54
- system_prompt (str, optional): System instruction to guide the AI's behavior.
55
- Defaults to "You are a helpful and informative AI assistant.".
46
+ Initializes the Sambanova API with given parameters.
56
47
  """
57
48
  if model not in self.AVAILABLE_MODELS:
58
49
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
59
50
 
60
- self.api_key = api_key or os.environ["SAMBANOVA_API_KEY"]
51
+ self.api_key = api_key
61
52
  self.model = model
62
- self.temperature = temperature
63
- self.top_p = top_p
64
- self.system_prompt = system_prompt # Add this line to set the system_prompt attribute
53
+ self.system_prompt = system_prompt
65
54
 
66
- self.session = requests.Session() # Not directly used for Gemini API calls, but can be used for other requests
55
+ self.session = requests.Session()
56
+ self.session.proxies = proxies
67
57
  self.is_conversation = is_conversation
68
58
  self.max_tokens_to_sample = max_tokens
69
59
  self.timeout = timeout
70
- self.last_response = {}
60
+ self.last_response = ""
71
61
 
72
62
  self.__available_optimizers = (
73
63
  method
@@ -85,13 +75,13 @@ class LLAMA3(Provider):
85
75
  is_conversation, self.max_tokens_to_sample, filepath, update_file
86
76
  )
87
77
  self.conversation.history_offset = history_offset
88
- self.session.proxies = proxies
89
78
 
90
- # Configure the Sambanova API
91
- self.client = openai.OpenAI(
92
- api_key=self.api_key,
93
- base_url="https://api.sambanova.ai/v1",
94
- )
79
+ # Configure the API base URL and headers
80
+ self.base_url = "https://api.sambanova.ai/v1/chat/completions"
81
+ self.headers = {
82
+ "Authorization": f"Bearer {self.api_key}",
83
+ "Content-Type": "application/json"
84
+ }
95
85
 
96
86
  def ask(
97
87
  self,
@@ -100,23 +90,8 @@ class LLAMA3(Provider):
100
90
  raw: bool = False,
101
91
  optimizer: str = None,
102
92
  conversationally: bool = False,
103
- ) -> dict:
104
- """Chat with AI
105
-
106
- Args:
107
- prompt (str): Prompt to be send.
108
- stream (bool, optional): Not used for Sambanova API. Defaults to False.
109
- raw (bool, optional): Not used for Sambanova API. Defaults to False.
110
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
111
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
112
- Returns:
113
- dict : {}
114
- ```json
115
- {
116
- "text" : "How may I assist you today?"
117
- }
118
- ```
119
- """
93
+ ) -> Any | Generator[Any, None, None]:
94
+ """Chat with AI using the Sambanova API."""
120
95
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
121
96
  if optimizer:
122
97
  if optimizer in self.__available_optimizers:
@@ -125,65 +100,113 @@ class LLAMA3(Provider):
125
100
  )
126
101
  else:
127
102
  raise Exception(
128
- f"Optimizer is not one of {self.__available_optimizers}"
103
+ f"Optimizer is not one of {list(self.__available_optimizers)}"
129
104
  )
130
105
 
131
- response = self.client.chat.completions.create(
132
- model=self.model,
133
- messages=[
106
+ payload = {
107
+ "model": self.model,
108
+ "stream": stream,
109
+ "messages": [
134
110
  {"role": "system", "content": self.system_prompt},
135
111
  {"role": "user", "content": conversation_prompt},
136
112
  ],
137
- temperature=self.temperature,
138
- top_p=self.top_p
139
- )
113
+ "max_tokens": self.max_tokens_to_sample,
114
+ }
140
115
 
141
- self.last_response.update(dict(text=response.choices[0].message.content))
142
- self.conversation.update_chat_history(
143
- prompt, self.get_message(self.last_response)
144
- )
145
- return self.last_response
116
+ def for_stream():
117
+ streaming_text = ""
118
+ try:
119
+ response = self.session.post(
120
+ self.base_url, headers=self.headers, json=payload, stream=True, timeout=self.timeout
121
+ )
122
+ if not response.ok:
123
+ raise exceptions.FailedToGenerateResponseError(
124
+ f"Request failed: {response.status_code} - {response.text}"
125
+ )
126
+
127
+ for line in response.iter_lines():
128
+ if line:
129
+ # Remove the "data:" prefix and extra whitespace if present
130
+ line_str = line.decode('utf-8').strip() if isinstance(line, bytes) else line.strip()
131
+ if line_str.startswith("data:"):
132
+ data = line_str[5:].strip()
133
+ else:
134
+ data = line_str
135
+ if data == "[DONE]":
136
+ break
137
+ try:
138
+ json_data = json.loads(data)
139
+ # Skip entries without valid choices
140
+ if not json_data.get("choices"):
141
+ continue
142
+ choice = json_data["choices"][0]
143
+ delta = choice.get("delta", {})
144
+ if "content" in delta:
145
+ content = delta["content"]
146
+ streaming_text += content
147
+ # Yield content directly as a string for consistency
148
+ yield content
149
+ # If finish_reason is provided, consider the stream complete
150
+ if choice.get("finish_reason"):
151
+ break
152
+ except json.JSONDecodeError:
153
+ continue
154
+ self.last_response = streaming_text
155
+ self.conversation.update_chat_history(
156
+ prompt, self.last_response
157
+ )
158
+ except requests.exceptions.RequestException as e:
159
+ raise exceptions.ProviderConnectionError(f"Request failed: {e}")
160
+
161
+ def for_non_stream():
162
+ for _ in for_stream():
163
+ pass
164
+ return self.last_response
165
+
166
+ return for_stream() if stream else for_non_stream()
146
167
 
147
168
  def chat(
148
169
  self,
149
170
  prompt: str,
150
- stream: bool = False, # Streaming not supported by the current google-generativeai library
171
+ stream: bool = False,
151
172
  optimizer: str = None,
152
173
  conversationally: bool = False,
153
- ) -> str:
154
- """Generate response `str`
155
-
174
+ ) -> Any | Generator[str, None, None]:
175
+ """Generate response as a string.
156
176
  Args:
157
- prompt (str): Prompt to be send.
158
- stream (bool, optional): Not used for Sambanova API. Defaults to False.
159
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
160
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
177
+ prompt (str): Prompt to be sent.
178
+ stream (bool, optional): Flag for streaming response. Defaults to False.
179
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
180
+ conversationally (bool, optional): Use conversational tuning with the optimizer. Defaults to False.
161
181
  Returns:
162
- str: Response generated
182
+ str: Generated response, or a generator of strings if streaming.
163
183
  """
164
- return self.get_message(
165
- self.ask(
166
- prompt,
167
- optimizer=optimizer,
168
- conversationally=conversationally,
169
- )
170
- )
171
-
172
- def get_message(self, response: dict) -> str:
173
- """Retrieves message only from response
184
+ if stream:
185
+ # For stream mode, yield the text chunks directly
186
+ return self.ask(prompt, stream=True, optimizer=optimizer, conversationally=conversationally)
187
+ else:
188
+ # For non-stream mode, return the complete text response
189
+ return self.ask(prompt, stream=False, optimizer=optimizer, conversationally=conversationally)
190
+
191
+ def get_message(self, response: Any) -> str:
192
+ """
193
+ Retrieves a clean message from the provided response.
174
194
 
175
195
  Args:
176
- response (dict): Response generated by `self.ask`
196
+ response: The raw response data.
177
197
 
178
198
  Returns:
179
- str: Message extracted
199
+ str: The extracted message.
180
200
  """
181
- assert isinstance(response, dict), "Response should be of dict data-type only"
182
- return response["text"]
201
+ if isinstance(response, str):
202
+ return response
203
+ elif isinstance(response, dict) and "text" in response:
204
+ return response["text"]
205
+ return ""
183
206
 
184
207
  if __name__ == "__main__":
185
208
  from rich import print
186
- ai = LLAMA3(api_key='')
187
- response = ai.chat(input(">>> "))
188
- for chunks in response:
189
- print(chunks, end="", flush=True)
209
+ ai = Sambanova(api_key='')
210
+ response = ai.chat(input(">>> "), stream=True)
211
+ for chunk in response:
212
+ print(chunk, end="", flush=True)