webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,9 @@
1
- import requests
1
+ from typing import Generator, Union
2
+ from curl_cffi.requests import Session
3
+ from curl_cffi import CurlError
2
4
  import json
3
5
 
6
+ from webscout import exceptions
4
7
  from webscout.AIutel import Optimizers
5
8
  from webscout.AIutel import Conversation
6
9
  from webscout.AIutel import AwesomePrompts
@@ -14,7 +17,7 @@ class GPTWeb(Provider):
14
17
  def __init__(
15
18
  self,
16
19
  is_conversation: bool = True,
17
- max_tokens: int = 600,
20
+ max_tokens: int = 600, # Note: max_tokens is not used by this API
18
21
  timeout: int = 30,
19
22
  intro: str = None,
20
23
  filepath: str = None,
@@ -22,7 +25,7 @@ class GPTWeb(Provider):
22
25
  proxies: dict = {},
23
26
  history_offset: int = 10250,
24
27
  act: str = None,
25
-
28
+ # Note: system_prompt is not used by this API
26
29
  ):
27
30
  """
28
31
  Initializes the Nexra GPTWeb API with given parameters.
@@ -37,9 +40,9 @@ class GPTWeb(Provider):
37
40
  proxies (dict, optional): Http request proxies. Defaults to {}.
38
41
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
39
42
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
40
- system_prompt (str, optional): System prompt for GPTWeb. Defaults to "You are a helpful AI assistant.".
41
43
  """
42
- self.session = requests.Session()
44
+ # Initialize curl_cffi Session
45
+ self.session = Session()
43
46
  self.is_conversation = is_conversation
44
47
  self.max_tokens_to_sample = max_tokens
45
48
  self.api_endpoint = 'https://nexra.aryahcr.cc/api/chat/gptweb'
@@ -48,6 +51,7 @@ class GPTWeb(Provider):
48
51
  self.last_response = {}
49
52
  self.headers = {
50
53
  "Content-Type": "application/json"
54
+ # Remove User-Agent, Accept-Encoding, etc. - handled by impersonate
51
55
  }
52
56
 
53
57
  self.__available_optimizers = (
@@ -55,7 +59,10 @@ class GPTWeb(Provider):
55
59
  for method in dir(Optimizers)
56
60
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
57
61
  )
62
+ # Update curl_cffi session headers and proxies
58
63
  self.session.headers.update(self.headers)
64
+ self.session.proxies = proxies # Assign proxies directly
65
+
59
66
  Conversation.intro = (
60
67
  AwesomePrompts().get_act(
61
68
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -67,16 +74,15 @@ class GPTWeb(Provider):
67
74
  is_conversation, self.max_tokens_to_sample, filepath, update_file
68
75
  )
69
76
  self.conversation.history_offset = history_offset
70
- self.session.proxies = proxies
71
77
 
72
78
  def ask(
73
79
  self,
74
80
  prompt: str,
75
- stream: bool = False,
81
+ stream: bool = False, # API supports streaming
76
82
  raw: bool = False,
77
83
  optimizer: str = None,
78
84
  conversationally: bool = False,
79
- ) -> dict:
85
+ ) -> Union[dict, Generator[dict, None, None]]: # Corrected return type hint
80
86
  """Chat with GPTWeb
81
87
 
82
88
  Args:
@@ -110,31 +116,66 @@ class GPTWeb(Provider):
110
116
  }
111
117
 
112
118
  def for_stream():
113
- response = self.session.post(self.api_endpoint, headers=self.headers, data=json.dumps(data), stream=True, timeout=self.timeout)
114
- if not response.ok:
115
- raise Exception(
116
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
119
+ full_response = '' # Initialize outside try block
120
+ try:
121
+ # Use curl_cffi session post with impersonate
122
+ response = self.session.post(
123
+ self.api_endpoint,
124
+ # headers are set on the session
125
+ data=json.dumps(data),
126
+ stream=True,
127
+ timeout=self.timeout,
128
+ impersonate="chrome110" # Use a common impersonation profile
117
129
  )
130
+ response.raise_for_status() # Check for HTTP errors
118
131
 
119
- full_response = ''
120
- for line in response.iter_lines(decode_unicode=True):
121
- if line:
122
- line = line.lstrip('_') # Remove "_"
123
- try:
124
- # Attempt to parse the entire line as JSON
125
- json_data = json.loads(line)
126
- full_response = json_data.get("gpt", "")
127
- yield full_response if raw else dict(text=full_response)
128
- except json.JSONDecodeError:
129
- print(f"Skipping invalid JSON line: {line}")
130
- self.last_response.update(dict(text=full_response))
131
- self.conversation.update_chat_history(
132
- prompt, self.get_message(self.last_response)
133
- )
132
+ # Iterate over bytes and decode manually
133
+ for line_bytes in response.iter_lines():
134
+ if line_bytes:
135
+ try:
136
+ line = line_bytes.decode('utf-8').lstrip('_') # Remove "_"
137
+ # Attempt to parse the entire line as JSON
138
+ json_data = json.loads(line)
139
+ content = json_data.get("gpt", "")
140
+ if content: # Ensure content is not None or empty
141
+ full_response = content # API seems to send the full response each time
142
+ resp = dict(text=full_response)
143
+ # Yield dict or raw string chunk (yielding full response each time)
144
+ yield resp if not raw else full_response
145
+ except (json.JSONDecodeError, UnicodeDecodeError):
146
+ # print(f"Skipping invalid JSON line: {line}") # Optional: for debugging
147
+ continue # Ignore lines that are not valid JSON or cannot be decoded
148
+
149
+ # Update history after stream finishes (using the final full response)
150
+ self.last_response = dict(text=full_response)
151
+ self.conversation.update_chat_history(
152
+ prompt, full_response
153
+ )
154
+ except CurlError as e: # Catch CurlError
155
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
156
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
157
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
158
+ raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
159
+
160
+
134
161
  def for_non_stream():
135
- for _ in for_stream():
136
- pass
137
- return self.last_response
162
+ # Aggregate the stream using the updated for_stream logic
163
+ # Since the stream yields the full response each time, we just need the last one.
164
+ last_chunk = None
165
+ try:
166
+ for chunk in for_stream():
167
+ last_chunk = chunk
168
+ except Exception as e:
169
+ # If aggregation fails, re-raise.
170
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
171
+
172
+ # last_response and history are updated within for_stream
173
+ # Return the final aggregated response dict or raw string
174
+ if last_chunk is None:
175
+ raise exceptions.FailedToGenerateResponseError("No response received from stream.")
176
+
177
+ return last_chunk # last_chunk is already dict or raw string based on 'raw'
178
+
138
179
 
139
180
  return for_stream() if stream else for_non_stream()
140
181
 
@@ -144,7 +185,7 @@ class GPTWeb(Provider):
144
185
  stream: bool = False,
145
186
  optimizer: str = None,
146
187
  conversationally: bool = False,
147
- ) -> str:
188
+ ) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
148
189
  """Generate response `str`
149
190
  Args:
150
191
  prompt (str): Prompt to be send.
@@ -155,23 +196,31 @@ class GPTWeb(Provider):
155
196
  str: Response generated
156
197
  """
157
198
 
158
- def for_stream():
159
- for response in self.ask(
160
- prompt, True, optimizer=optimizer, conversationally=conversationally
161
- ):
162
- yield self.get_message(response)
163
-
164
- def for_non_stream():
165
- return self.get_message(
166
- self.ask(
167
- prompt,
168
- False,
169
- optimizer=optimizer,
170
- conversationally=conversationally,
171
- )
199
+ def for_stream_chat():
200
+ # ask() yields dicts or strings when streaming
201
+ gen = self.ask(
202
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
203
+ optimizer=optimizer, conversationally=conversationally
172
204
  )
205
+ # Since the API sends the full response each time, we only need the last one.
206
+ # However, to maintain the streaming interface, we yield the message from each chunk.
207
+ # This might result in repeated text if the client doesn't handle it.
208
+ # A better approach might be to track changes, but for simplicity, yield each message.
209
+ for response_dict in gen:
210
+ yield self.get_message(response_dict)
211
+
212
+ def for_non_stream_chat():
213
+ # ask() returns dict or str when not streaming
214
+ response_data = self.ask(
215
+ prompt,
216
+ stream=False,
217
+ raw=False, # Ensure ask returns dict
218
+ optimizer=optimizer,
219
+ conversationally=conversationally,
220
+ )
221
+ return self.get_message(response_data) # get_message expects dict
173
222
 
174
- return for_stream() if stream else for_non_stream()
223
+ return for_stream_chat() if stream else for_non_stream_chat()
175
224
 
176
225
  def get_message(self, response: dict) -> str:
177
226
  """Retrieves message only from response
@@ -186,6 +235,7 @@ class GPTWeb(Provider):
186
235
  return response["text"]
187
236
 
188
237
  if __name__ == '__main__':
238
+ # Ensure curl_cffi is installed
189
239
  from rich import print
190
240
  ai = GPTWeb()
191
241
  response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
webscout/Provider/Groq.py CHANGED
@@ -1,12 +1,15 @@
1
1
  from typing import Any, AsyncGenerator, Dict, Optional, Callable, List, Union
2
2
 
3
3
  import httpx
4
- import requests
5
4
  import json
6
5
 
6
+ # Import curl_cffi for improved request handling
7
+ from curl_cffi.requests import Session
8
+ from curl_cffi import CurlError
9
+
7
10
  from webscout.AIutel import Optimizers
8
11
  from webscout.AIutel import Conversation
9
- from webscout.AIutel import AwesomePrompts, sanitize_stream
12
+ from webscout.AIutel import AwesomePrompts
10
13
  from webscout.AIbase import Provider, AsyncProvider
11
14
  from webscout import exceptions
12
15
 
@@ -15,6 +18,7 @@ class GROQ(Provider):
15
18
  A class to interact with the GROQ AI API.
16
19
  """
17
20
 
21
+ # Default models list (will be updated dynamically)
18
22
  AVAILABLE_MODELS = [
19
23
  "distil-whisper-large-v3-en",
20
24
  "gemma2-9b-it",
@@ -42,6 +46,45 @@ class GROQ(Provider):
42
46
  "llama-3.2-90b-vision-preview",
43
47
  "mixtral-8x7b-32768"
44
48
  ]
49
+
50
+ @classmethod
51
+ def get_models(cls, api_key: str = None):
52
+ """Fetch available models from Groq API.
53
+
54
+ Args:
55
+ api_key (str, optional): Groq API key. If not provided, returns default models.
56
+
57
+ Returns:
58
+ list: List of available model IDs
59
+ """
60
+ if not api_key:
61
+ return cls.AVAILABLE_MODELS
62
+
63
+ try:
64
+ # Use a temporary curl_cffi session for this class method
65
+ temp_session = Session()
66
+ headers = {
67
+ "Content-Type": "application/json",
68
+ "Authorization": f"Bearer {api_key}",
69
+ }
70
+
71
+ response = temp_session.get(
72
+ "https://api.groq.com/openai/v1/models",
73
+ headers=headers,
74
+ impersonate="chrome110" # Use impersonate for fetching
75
+ )
76
+
77
+ if response.status_code != 200:
78
+ return cls.AVAILABLE_MODELS
79
+
80
+ data = response.json()
81
+ if "data" in data and isinstance(data["data"], list):
82
+ return [model["id"] for model in data["data"]]
83
+ return cls.AVAILABLE_MODELS
84
+
85
+ except (CurlError, Exception):
86
+ # Fallback to default models list if fetching fails
87
+ return cls.AVAILABLE_MODELS
45
88
 
46
89
  def __init__(
47
90
  self,
@@ -82,10 +125,15 @@ class GROQ(Provider):
82
125
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
83
126
  system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
84
127
  """
128
+ # Update available models from API
129
+ self.update_available_models(api_key)
130
+
131
+ # Validate model after updating available models
85
132
  if model not in self.AVAILABLE_MODELS:
86
133
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
87
134
 
88
- self.session = requests.Session()
135
+ # Initialize curl_cffi Session
136
+ self.session = Session()
89
137
  self.is_conversation = is_conversation
90
138
  self.max_tokens_to_sample = max_tokens
91
139
  self.api_key = api_key
@@ -110,7 +158,11 @@ class GROQ(Provider):
110
158
  for method in dir(Optimizers)
111
159
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
112
160
  )
161
+
162
+ # Update curl_cffi session headers
113
163
  self.session.headers.update(self.headers)
164
+
165
+ # Set up conversation
114
166
  Conversation.intro = (
115
167
  AwesomePrompts().get_act(
116
168
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -122,7 +174,20 @@ class GROQ(Provider):
122
174
  is_conversation, self.max_tokens_to_sample, filepath, update_file
123
175
  )
124
176
  self.conversation.history_offset = history_offset
177
+
178
+ # Set proxies for curl_cffi session
125
179
  self.session.proxies = proxies
180
+
181
+ @classmethod
182
+ def update_available_models(cls, api_key=None):
183
+ """Update the available models list from Groq API"""
184
+ try:
185
+ models = cls.get_models(api_key)
186
+ if models and len(models) > 0:
187
+ cls.AVAILABLE_MODELS = models
188
+ except Exception:
189
+ # Fallback to default models list if fetching fails
190
+ pass
126
191
 
127
192
  def add_function(self, function_name: str, function: Callable):
128
193
  """Add a function to the available functions dictionary.
@@ -183,32 +248,42 @@ class GROQ(Provider):
183
248
  }
184
249
 
185
250
  def for_stream():
186
- response = self.session.post(
187
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
188
- )
189
- if not response.ok:
190
- raise exceptions.FailedToGenerateResponseError(
191
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
251
+ try:
252
+ response = self.session.post(
253
+ self.chat_endpoint,
254
+ json=payload,
255
+ stream=True,
256
+ timeout=self.timeout,
257
+ impersonate="chrome110" # Use impersonate for better compatibility
192
258
  )
259
+ if not response.status_code == 200:
260
+ raise exceptions.FailedToGenerateResponseError(
261
+ # Removed response.reason_phrase
262
+ f"Failed to generate response - ({response.status_code}) - {response.text}"
263
+ )
193
264
 
194
- message_load = ""
195
- for value in response.iter_lines(
196
- decode_unicode=True,
197
- delimiter="" if raw else "data:",
198
- chunk_size=self.stream_chunk_size,
199
- ):
200
- try:
201
- resp = json.loads(value)
202
- incomplete_message = self.get_message(resp)
203
- if incomplete_message:
204
- message_load += incomplete_message
205
- resp["choices"][0]["delta"]["content"] = message_load
206
- self.last_response.update(resp)
207
- yield value if raw else resp
208
- elif raw:
209
- yield value
210
- except json.decoder.JSONDecodeError:
211
- pass
265
+ message_load = ""
266
+ for value in response.iter_lines(
267
+ decode_unicode=True,
268
+ delimiter="" if raw else "data:",
269
+ chunk_size=self.stream_chunk_size,
270
+ ):
271
+ try:
272
+ resp = json.loads(value)
273
+ incomplete_message = self.get_message(resp)
274
+ if incomplete_message:
275
+ message_load += incomplete_message
276
+ resp["choices"][0]["delta"]["content"] = message_load
277
+ self.last_response.update(resp)
278
+ yield value if raw else resp
279
+ elif raw:
280
+ yield value
281
+ except json.decoder.JSONDecodeError:
282
+ pass
283
+ except CurlError as e:
284
+ raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
285
+ except Exception as e:
286
+ raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
212
287
 
213
288
  # Handle tool calls if any
214
289
  if 'tool_calls' in self.last_response.get('choices', [{}])[0].get('message', {}):
@@ -226,32 +301,54 @@ class GROQ(Provider):
226
301
  })
227
302
  payload['messages'] = messages
228
303
  # Make a second call to get the final response
229
- second_response = self.session.post(
230
- self.chat_endpoint, json=payload, timeout=self.timeout
231
- )
232
- if second_response.ok:
233
- self.last_response = second_response.json()
234
- else:
235
- raise exceptions.FailedToGenerateResponseError(
236
- f"Failed to execute tool - {second_response.text}"
304
+ try:
305
+ second_response = self.session.post(
306
+ self.chat_endpoint,
307
+ json=payload,
308
+ timeout=self.timeout,
309
+ impersonate="chrome110" # Use impersonate for better compatibility
237
310
  )
311
+ if second_response.status_code == 200:
312
+ self.last_response = second_response.json()
313
+ else:
314
+ raise exceptions.FailedToGenerateResponseError(
315
+ f"Failed to execute tool - {second_response.text}"
316
+ )
317
+ except CurlError as e:
318
+ raise exceptions.FailedToGenerateResponseError(f"CurlError during tool execution: {str(e)}")
319
+ except Exception as e:
320
+ raise exceptions.FailedToGenerateResponseError(f"Error during tool execution: {str(e)}")
238
321
 
239
322
  self.conversation.update_chat_history(
240
323
  prompt, self.get_message(self.last_response)
241
324
  )
242
325
 
243
326
  def for_non_stream():
244
- response = self.session.post(
245
- self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
246
- )
247
- if (
248
- not response.ok
249
- or not response.headers.get("Content-Type", "") == "application/json"
250
- ):
251
- raise exceptions.FailedToGenerateResponseError(
252
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
327
+ try:
328
+ response = self.session.post(
329
+ self.chat_endpoint,
330
+ json=payload,
331
+ stream=False,
332
+ timeout=self.timeout,
333
+ impersonate="chrome110" # Use impersonate for better compatibility
253
334
  )
254
- resp = response.json()
335
+ if (
336
+ not response.status_code == 200
337
+ ):
338
+ raise exceptions.FailedToGenerateResponseError(
339
+ # Removed response.reason_phrase
340
+ f"Failed to generate response - ({response.status_code}) - {response.text}"
341
+ )
342
+ resp = response.json()
343
+ except CurlError as e:
344
+ raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
345
+ except Exception as e:
346
+ # Catch the original AttributeError here if it happens before the raise
347
+ if isinstance(e, AttributeError) and 'reason_phrase' in str(e):
348
+ raise exceptions.FailedToGenerateResponseError(
349
+ f"Failed to generate response - ({response.status_code}) - {response.text}"
350
+ )
351
+ raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
255
352
 
256
353
  # Handle tool calls if any
257
354
  if 'tool_calls' in resp.get('choices', [{}])[0].get('message', {}):
@@ -269,15 +366,23 @@ class GROQ(Provider):
269
366
  })
270
367
  payload['messages'] = messages
271
368
  # Make a second call to get the final response
272
- second_response = self.session.post(
273
- self.chat_endpoint, json=payload, timeout=self.timeout
274
- )
275
- if second_response.ok:
276
- resp = second_response.json()
277
- else:
278
- raise exceptions.FailedToGenerateResponseError(
279
- f"Failed to execute tool - {second_response.text}"
369
+ try:
370
+ second_response = self.session.post(
371
+ self.chat_endpoint,
372
+ json=payload,
373
+ timeout=self.timeout,
374
+ impersonate="chrome110" # Use impersonate for better compatibility
280
375
  )
376
+ if second_response.status_code == 200:
377
+ resp = second_response.json()
378
+ else:
379
+ raise exceptions.FailedToGenerateResponseError(
380
+ f"Failed to execute tool - {second_response.text}"
381
+ )
382
+ except CurlError as e:
383
+ raise exceptions.FailedToGenerateResponseError(f"CurlError during tool execution: {str(e)}")
384
+ except Exception as e:
385
+ raise exceptions.FailedToGenerateResponseError(f"Error during tool execution: {str(e)}")
281
386
 
282
387
  self.last_response.update(resp)
283
388
  self.conversation.update_chat_history(
@@ -287,7 +392,6 @@ class GROQ(Provider):
287
392
 
288
393
  return for_stream() if stream else for_non_stream()
289
394
 
290
-
291
395
  def chat(
292
396
  self,
293
397
  prompt: str,
@@ -337,11 +441,16 @@ class GROQ(Provider):
337
441
  """
338
442
  assert isinstance(response, dict), "Response should be of dict data-type only"
339
443
  try:
340
- if response["choices"][0].get("delta"):
444
+ # Check delta first for streaming
445
+ if response.get("choices") and response["choices"][0].get("delta") and response["choices"][0]["delta"].get("content"):
341
446
  return response["choices"][0]["delta"]["content"]
342
- return response["choices"][0]["message"]["content"]
343
- except KeyError:
344
- return ""
447
+ # Check message content for non-streaming or final message
448
+ if response.get("choices") and response["choices"][0].get("message") and response["choices"][0]["message"].get("content"):
449
+ return response["choices"][0]["message"]["content"]
450
+ except (KeyError, IndexError, TypeError):
451
+ # Handle cases where the structure might be different or content is null/missing
452
+ pass
453
+ return "" # Return empty string if no content found
345
454
 
346
455
 
347
456
  class AsyncGROQ(AsyncProvider):
@@ -349,33 +458,8 @@ class AsyncGROQ(AsyncProvider):
349
458
  An asynchronous class to interact with the GROQ AI API.
350
459
  """
351
460
 
352
- AVAILABLE_MODELS = [
353
- "distil-whisper-large-v3-en",
354
- "gemma2-9b-it",
355
- "llama-3.3-70b-versatile",
356
- "llama-3.1-8b-instant",
357
- "llama-guard-3-8b",
358
- "llama3-70b-8192",
359
- "llama3-8b-8192",
360
- "whisper-large-v3",
361
- "whisper-large-v3-turbo",
362
- "meta-llama/llama-4-scout-17b-16e-instruct",
363
- "meta-llama/llama-4-maverick-17b-128e-instruct",
364
- "playai-tts",
365
- "playai-tts-arabic",
366
- "qwen-qwq-32b",
367
- "mistral-saba-24b",
368
- "qwen-2.5-coder-32b",
369
- "qwen-2.5-32b",
370
- "deepseek-r1-distill-qwen-32b",
371
- "deepseek-r1-distill-llama-70b",
372
- "llama-3.3-70b-specdec",
373
- "llama-3.2-1b-preview",
374
- "llama-3.2-3b-preview",
375
- "llama-3.2-11b-vision-preview",
376
- "llama-3.2-90b-vision-preview",
377
- "mixtral-8x7b-32768"
378
- ]
461
+ # Use the same model list as the synchronous class
462
+ AVAILABLE_MODELS = GROQ.AVAILABLE_MODELS
379
463
 
380
464
  def __init__(
381
465
  self,
@@ -416,6 +500,10 @@ class AsyncGROQ(AsyncProvider):
416
500
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
417
501
  system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
418
502
  """
503
+ # Update available models from API
504
+ GROQ.update_available_models(api_key)
505
+
506
+ # Validate model after updating available models
419
507
  if model not in self.AVAILABLE_MODELS:
420
508
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
421
509
 
@@ -518,7 +606,8 @@ class AsyncGROQ(AsyncProvider):
518
606
  ) as response:
519
607
  if not response.is_success:
520
608
  raise exceptions.FailedToGenerateResponseError(
521
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
609
+ # Removed response.reason_phrase (not available in httpx response)
610
+ f"Failed to generate response - ({response.status_code})"
522
611
  )
523
612
 
524
613
  message_load = ""
@@ -575,7 +664,8 @@ class AsyncGROQ(AsyncProvider):
575
664
  )
576
665
  if not response.is_success:
577
666
  raise exceptions.FailedToGenerateResponseError(
578
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
667
+ # Removed response.reason_phrase (not available in httpx response)
668
+ f"Failed to generate response - ({response.status_code})"
579
669
  )
580
670
  resp = response.json()
581
671
 
@@ -663,8 +753,21 @@ class AsyncGROQ(AsyncProvider):
663
753
  """
664
754
  assert isinstance(response, dict), "Response should be of dict data-type only"
665
755
  try:
666
- if response["choices"][0].get("delta"):
756
+ # Check delta first for streaming
757
+ if response.get("choices") and response["choices"][0].get("delta") and response["choices"][0]["delta"].get("content"):
667
758
  return response["choices"][0]["delta"]["content"]
668
- return response["choices"][0]["message"]["content"]
669
- except KeyError:
670
- return ""
759
+ # Check message content for non-streaming or final message
760
+ if response.get("choices") and response["choices"][0].get("message") and response["choices"][0]["message"].get("content"):
761
+ return response["choices"][0]["message"]["content"]
762
+ except (KeyError, IndexError, TypeError):
763
+ # Handle cases where the structure might be different or content is null/missing
764
+ pass
765
+ return "" # Return empty string if no content found
766
+
767
+ if __name__ == "__main__":
768
+ # Example usage
769
+ api_key = "gsk_*******************************"
770
+ groq = GROQ(api_key=api_key, model="compound-beta")
771
+ prompt = "What is the capital of France?"
772
+ response = groq.chat(prompt)
773
+ print(response)