webscout 4.7__py3-none-any.whl → 4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (43) hide show
  1. webscout/Agents/functioncall.py +97 -37
  2. webscout/Bard.py +365 -0
  3. webscout/Local/_version.py +1 -1
  4. webscout/Provider/Andi.py +7 -1
  5. webscout/Provider/BasedGPT.py +11 -5
  6. webscout/Provider/Berlin4h.py +11 -5
  7. webscout/Provider/Blackboxai.py +10 -4
  8. webscout/Provider/Cohere.py +11 -5
  9. webscout/Provider/DARKAI.py +25 -7
  10. webscout/Provider/Deepinfra.py +2 -1
  11. webscout/Provider/Deepseek.py +25 -9
  12. webscout/Provider/DiscordRocks.py +389 -0
  13. webscout/Provider/{ChatGPTUK.py → Farfalle.py} +80 -67
  14. webscout/Provider/Gemini.py +1 -1
  15. webscout/Provider/Groq.py +244 -110
  16. webscout/Provider/Llama.py +13 -5
  17. webscout/Provider/Llama3.py +15 -2
  18. webscout/Provider/OLLAMA.py +8 -7
  19. webscout/Provider/Perplexity.py +422 -52
  20. webscout/Provider/Phind.py +6 -5
  21. webscout/Provider/PizzaGPT.py +7 -1
  22. webscout/Provider/__init__.py +12 -31
  23. webscout/Provider/ai4chat.py +193 -0
  24. webscout/Provider/koala.py +11 -5
  25. webscout/Provider/{VTLchat.py → liaobots.py} +120 -104
  26. webscout/Provider/meta.py +2 -1
  27. webscout/version.py +1 -1
  28. webscout/webai.py +2 -64
  29. webscout/webscout_search.py +1 -1
  30. {webscout-4.7.dist-info → webscout-4.8.dist-info}/METADATA +227 -252
  31. {webscout-4.7.dist-info → webscout-4.8.dist-info}/RECORD +35 -40
  32. webscout/Provider/FreeGemini.py +0 -169
  33. webscout/Provider/Geminiflash.py +0 -152
  34. webscout/Provider/Geminipro.py +0 -152
  35. webscout/Provider/Leo.py +0 -469
  36. webscout/Provider/OpenGPT.py +0 -867
  37. webscout/Provider/Xjai.py +0 -230
  38. webscout/Provider/Yepchat.py +0 -478
  39. webscout/Provider/Youchat.py +0 -225
  40. {webscout-4.7.dist-info → webscout-4.8.dist-info}/LICENSE.md +0 -0
  41. {webscout-4.7.dist-info → webscout-4.8.dist-info}/WHEEL +0 -0
  42. {webscout-4.7.dist-info → webscout-4.8.dist-info}/entry_points.txt +0 -0
  43. {webscout-4.7.dist-info → webscout-4.8.dist-info}/top_level.txt +0 -0
webscout/Provider/Groq.py CHANGED
@@ -1,35 +1,37 @@
1
1
  import time
2
2
  import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
3
+ from typing import Any, AsyncGenerator, Dict, Optional, Callable, List
4
+
5
+ import httpx
9
6
  import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
7
  import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
8
+
9
+ from webscout.AIutel import Optimizers
10
+ from webscout.AIutel import Conversation
11
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
12
+ from webscout.AIbase import Provider, AsyncProvider
27
13
  from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
14
 
32
15
  class GROQ(Provider):
16
+ """
17
+ A class to interact with the GROQ AI API.
18
+ """
19
+
20
+ AVAILABLE_MODELS = [
21
+ "llama-3.1-405b-reasoning",
22
+ "llama-3.1-70b-versatile",
23
+ "llama-3.1-8b-instant",
24
+ "llama3-groq-70b-8192-tool-use-preview",
25
+ "llama3-groq-8b-8192-tool-use-preview",
26
+ "llama-guard-3-8b",
27
+ "llama3-70b-8192",
28
+ "llama3-8b-8192",
29
+ "mixtral-8x7b-32768",
30
+ "gemma-7b-it",
31
+ "gemma2-9b-it",
32
+ "whisper-large-v3"
33
+ ]
34
+
33
35
  def __init__(
34
36
  self,
35
37
  api_key: str,
@@ -47,6 +49,7 @@ class GROQ(Provider):
47
49
  proxies: dict = {},
48
50
  history_offset: int = 10250,
49
51
  act: str = None,
52
+ system_prompt: Optional[str] = None,
50
53
  ):
51
54
  """Instantiates GROQ
52
55
 
@@ -58,7 +61,7 @@ class GROQ(Provider):
58
61
  presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
59
62
  frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
60
63
  top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
61
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
64
+ model (str, optional): LLM model name. Defaults to "mixtral-8x7b-32768".
62
65
  timeout (int, optional): Http request timeout. Defaults to 30.
63
66
  intro (str, optional): Conversation introductory prompt. Defaults to None.
64
67
  filepath (str, optional): Path to file containing conversation history. Defaults to None.
@@ -66,7 +69,11 @@ class GROQ(Provider):
66
69
  proxies (dict, optional): Http request proxies. Defaults to {}.
67
70
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
68
71
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
72
+ system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
69
73
  """
74
+ if model not in self.AVAILABLE_MODELS:
75
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
76
+
70
77
  self.session = requests.Session()
71
78
  self.is_conversation = is_conversation
72
79
  self.max_tokens_to_sample = max_tokens
@@ -76,10 +83,12 @@ class GROQ(Provider):
76
83
  self.presence_penalty = presence_penalty
77
84
  self.frequency_penalty = frequency_penalty
78
85
  self.top_p = top_p
79
- self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
86
+ self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
80
87
  self.stream_chunk_size = 64
81
88
  self.timeout = timeout
82
89
  self.last_response = {}
90
+ self.system_prompt = system_prompt
91
+ self.available_functions: Dict[str, Callable] = {} # Store available functions
83
92
  self.headers = {
84
93
  "Content-Type": "application/json",
85
94
  "Authorization": f"Bearer {self.api_key}",
@@ -104,6 +113,15 @@ class GROQ(Provider):
104
113
  self.conversation.history_offset = history_offset
105
114
  self.session.proxies = proxies
106
115
 
116
+ def add_function(self, function_name: str, function: Callable):
117
+ """Add a function to the available functions dictionary.
118
+
119
+ Args:
120
+ function_name (str): The name of the function to be used in the prompt.
121
+ function (Callable): The function itself.
122
+ """
123
+ self.available_functions[function_name] = function
124
+
107
125
  def ask(
108
126
  self,
109
127
  prompt: str,
@@ -111,45 +129,20 @@ class GROQ(Provider):
111
129
  raw: bool = False,
112
130
  optimizer: str = None,
113
131
  conversationally: bool = False,
132
+ tools: Optional[List[Dict[str, Any]]] = None, # Add tools parameter
114
133
  ) -> dict:
115
134
  """Chat with AI
116
135
 
117
- Args:
118
- prompt (str): Prompt to be send.
119
- stream (bool, optional): Flag for streaming response. Defaults to False.
120
- raw (bool, optional): Stream back raw response as received. Defaults to False.
121
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
122
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
123
- Returns:
124
- dict : {}
125
- ```json
126
- {
127
- "id": "c0c8d139-d2b9-9909-8aa1-14948bc28404",
128
- "object": "chat.completion",
129
- "created": 1710852779,
130
- "model": "mixtral-8x7b-32768",
131
- "choices": [
132
- {
133
- "index": 0,
134
- "message": {
135
- "role": "assistant",
136
- "content": "Hello! How can I assist you today? I'm here to help answer your questions and engage in conversation on a wide variety of topics. Feel free to ask me anything!"
137
- },
138
- "logprobs": null,
139
- "finish_reason": "stop"
140
- }
141
- ],
142
- "usage": {
143
- "prompt_tokens": 47,
144
- "prompt_time": 0.03,
145
- "completion_tokens": 37,
146
- "completion_time": 0.069,
147
- "total_tokens": 84,
148
- "total_time": 0.099
149
- },
150
- "system_fingerprint": null
151
- }
152
- ```
136
+ Args:
137
+ prompt (str): Prompt to be send.
138
+ stream (bool, optional): Flag for streaming response. Defaults to False.
139
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
140
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
141
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
142
+ tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
143
+
144
+ Returns:
145
+ dict : {}
153
146
  """
154
147
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
155
148
  if optimizer:
@@ -161,15 +154,21 @@ class GROQ(Provider):
161
154
  raise Exception(
162
155
  f"Optimizer is not one of {self.__available_optimizers}"
163
156
  )
157
+
158
+ messages = [{"content": conversation_prompt, "role": "user"}]
159
+ if self.system_prompt:
160
+ messages.insert(0, {"role": "system", "content": self.system_prompt})
161
+
164
162
  self.session.headers.update(self.headers)
165
163
  payload = {
166
164
  "frequency_penalty": self.frequency_penalty,
167
- "messages": [{"content": conversation_prompt, "role": "user"}],
165
+ "messages": messages,
168
166
  "model": self.model,
169
167
  "presence_penalty": self.presence_penalty,
170
168
  "stream": stream,
171
169
  "temperature": self.temperature,
172
170
  "top_p": self.top_p,
171
+ "tools": tools # Include tools in the payload
173
172
  }
174
173
 
175
174
  def for_stream():
@@ -177,7 +176,7 @@ class GROQ(Provider):
177
176
  self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
178
177
  )
179
178
  if not response.ok:
180
- raise Exception(
179
+ raise exceptions.FailedToGenerateResponseError(
181
180
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
182
181
  )
183
182
 
@@ -199,6 +198,33 @@ class GROQ(Provider):
199
198
  yield value
200
199
  except json.decoder.JSONDecodeError:
201
200
  pass
201
+
202
+ # Handle tool calls if any
203
+ if 'tool_calls' in self.last_response.get('choices', [{}])[0].get('message', {}):
204
+ tool_calls = self.last_response['choices'][0]['message']['tool_calls']
205
+ for tool_call in tool_calls:
206
+ function_name = tool_call.get('function', {}).get('name')
207
+ arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
208
+ if function_name in self.available_functions:
209
+ tool_response = self.available_functions[function_name](**arguments)
210
+ messages.append({
211
+ "tool_call_id": tool_call['id'],
212
+ "role": "tool",
213
+ "name": function_name,
214
+ "content": tool_response
215
+ })
216
+ payload['messages'] = messages
217
+ # Make a second call to get the final response
218
+ second_response = self.session.post(
219
+ self.chat_endpoint, json=payload, timeout=self.timeout
220
+ )
221
+ if second_response.ok:
222
+ self.last_response = second_response.json()
223
+ else:
224
+ raise exceptions.FailedToGenerateResponseError(
225
+ f"Failed to execute tool - {second_response.text}"
226
+ )
227
+
202
228
  self.conversation.update_chat_history(
203
229
  prompt, self.get_message(self.last_response)
204
230
  )
@@ -207,11 +233,41 @@ class GROQ(Provider):
207
233
  response = self.session.post(
208
234
  self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
209
235
  )
210
- if not response.ok:
211
- raise Exception(
236
+ if (
237
+ not response.ok
238
+ or not response.headers.get("Content-Type", "") == "application/json"
239
+ ):
240
+ raise exceptions.FailedToGenerateResponseError(
212
241
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
213
242
  )
214
243
  resp = response.json()
244
+
245
+ # Handle tool calls if any
246
+ if 'tool_calls' in resp.get('choices', [{}])[0].get('message', {}):
247
+ tool_calls = resp['choices'][0]['message']['tool_calls']
248
+ for tool_call in tool_calls:
249
+ function_name = tool_call.get('function', {}).get('name')
250
+ arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
251
+ if function_name in self.available_functions:
252
+ tool_response = self.available_functions[function_name](**arguments)
253
+ messages.append({
254
+ "tool_call_id": tool_call['id'],
255
+ "role": "tool",
256
+ "name": function_name,
257
+ "content": tool_response
258
+ })
259
+ payload['messages'] = messages
260
+ # Make a second call to get the final response
261
+ second_response = self.session.post(
262
+ self.chat_endpoint, json=payload, timeout=self.timeout
263
+ )
264
+ if second_response.ok:
265
+ resp = second_response.json()
266
+ else:
267
+ raise exceptions.FailedToGenerateResponseError(
268
+ f"Failed to execute tool - {second_response.text}"
269
+ )
270
+
215
271
  self.last_response.update(resp)
216
272
  self.conversation.update_chat_history(
217
273
  prompt, self.get_message(self.last_response)
@@ -220,12 +276,14 @@ class GROQ(Provider):
220
276
 
221
277
  return for_stream() if stream else for_non_stream()
222
278
 
279
+
223
280
  def chat(
224
281
  self,
225
282
  prompt: str,
226
283
  stream: bool = False,
227
284
  optimizer: str = None,
228
285
  conversationally: bool = False,
286
+ tools: Optional[List[Dict[str, Any]]] = None,
229
287
  ) -> str:
230
288
  """Generate response `str`
231
289
  Args:
@@ -233,13 +291,14 @@ class GROQ(Provider):
233
291
  stream (bool, optional): Flag for streaming response. Defaults to False.
234
292
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
235
293
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
294
+ tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
236
295
  Returns:
237
296
  str: Response generated
238
297
  """
239
298
 
240
299
  def for_stream():
241
300
  for response in self.ask(
242
- prompt, True, optimizer=optimizer, conversationally=conversationally
301
+ prompt, True, optimizer=optimizer, conversationally=conversationally, tools=tools
243
302
  ):
244
303
  yield self.get_message(response)
245
304
 
@@ -250,6 +309,7 @@ class GROQ(Provider):
250
309
  False,
251
310
  optimizer=optimizer,
252
311
  conversationally=conversationally,
312
+ tools=tools
253
313
  )
254
314
  )
255
315
 
@@ -271,7 +331,28 @@ class GROQ(Provider):
271
331
  return response["choices"][0]["message"]["content"]
272
332
  except KeyError:
273
333
  return ""
334
+
335
+
274
336
  class AsyncGROQ(AsyncProvider):
337
+ """
338
+ An asynchronous class to interact with the GROQ AI API.
339
+ """
340
+
341
+ AVAILABLE_MODELS = [
342
+ "llama-3.1-405b-reasoning",
343
+ "llama-3.1-70b-versatile",
344
+ "llama-3.1-8b-instant",
345
+ "llama3-groq-70b-8192-tool-use-preview",
346
+ "llama3-groq-8b-8192-tool-use-preview",
347
+ "llama-guard-3-8b",
348
+ "llama3-70b-8192",
349
+ "llama3-8b-8192",
350
+ "mixtral-8x7b-32768",
351
+ "gemma-7b-it",
352
+ "gemma2-9b-it",
353
+ "whisper-large-v3"
354
+ ]
355
+
275
356
  def __init__(
276
357
  self,
277
358
  api_key: str,
@@ -289,8 +370,9 @@ class AsyncGROQ(AsyncProvider):
289
370
  proxies: dict = {},
290
371
  history_offset: int = 10250,
291
372
  act: str = None,
373
+ system_prompt: Optional[str] = None,
292
374
  ):
293
- """Instantiates GROQ
375
+ """Instantiates AsyncGROQ
294
376
 
295
377
  Args:
296
378
  api_key (key): GROQ's API key.
@@ -308,7 +390,11 @@ class AsyncGROQ(AsyncProvider):
308
390
  proxies (dict, optional): Http request proxies. Defaults to {}.
309
391
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
310
392
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
393
+ system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
311
394
  """
395
+ if model not in self.AVAILABLE_MODELS:
396
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
397
+
312
398
  self.is_conversation = is_conversation
313
399
  self.max_tokens_to_sample = max_tokens
314
400
  self.api_key = api_key
@@ -321,6 +407,8 @@ class AsyncGROQ(AsyncProvider):
321
407
  self.stream_chunk_size = 64
322
408
  self.timeout = timeout
323
409
  self.last_response = {}
410
+ self.system_prompt = system_prompt
411
+ self.available_functions: Dict[str, Callable] = {} # Store available functions
324
412
  self.headers = {
325
413
  "Content-Type": "application/json",
326
414
  "Authorization": f"Bearer {self.api_key}",
@@ -344,6 +432,15 @@ class AsyncGROQ(AsyncProvider):
344
432
  self.conversation.history_offset = history_offset
345
433
  self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
346
434
 
435
+ def add_function(self, function_name: str, function: Callable):
436
+ """Add a function to the available functions dictionary.
437
+
438
+ Args:
439
+ function_name (str): The name of the function to be used in the prompt.
440
+ function (Callable): The function itself.
441
+ """
442
+ self.available_functions[function_name] = function
443
+
347
444
  async def ask(
348
445
  self,
349
446
  prompt: str,
@@ -351,45 +448,19 @@ class AsyncGROQ(AsyncProvider):
351
448
  raw: bool = False,
352
449
  optimizer: str = None,
353
450
  conversationally: bool = False,
451
+ tools: Optional[List[Dict[str, Any]]] = None,
354
452
  ) -> dict | AsyncGenerator:
355
453
  """Chat with AI asynchronously.
356
454
 
357
- Args:
358
- prompt (str): Prompt to be send.
359
- stream (bool, optional): Flag for streaming response. Defaults to False.
360
- raw (bool, optional): Stream back raw response as received. Defaults to False.
361
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
362
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
363
- Returns:
364
- dict|AsyncGenerator : ai content
365
- ```json
366
- {
367
- "id": "c0c8d139-d2b9-9909-8aa1-14948bc28404",
368
- "object": "chat.completion",
369
- "created": 1710852779,
370
- "model": "mixtral-8x7b-32768",
371
- "choices": [
372
- {
373
- "index": 0,
374
- "message": {
375
- "role": "assistant",
376
- "content": "Hello! How can I assist you today? I'm here to help answer your questions and engage in conversation on a wide variety of topics. Feel free to ask me anything!"
377
- },
378
- "logprobs": null,
379
- "finish_reason": "stop"
380
- }
381
- ],
382
- "usage": {
383
- "prompt_tokens": 47,
384
- "prompt_time": 0.03,
385
- "completion_tokens": 37,
386
- "completion_time": 0.069,
387
- "total_tokens": 84,
388
- "total_time": 0.099
389
- },
390
- "system_fingerprint": null
391
- }
392
- ```
455
+ Args:
456
+ prompt (str): Prompt to be send.
457
+ stream (bool, optional): Flag for streaming response. Defaults to False.
458
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
459
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
460
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
461
+ tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
462
+ Returns:
463
+ dict|AsyncGenerator : ai content
393
464
  """
394
465
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
395
466
  if optimizer:
@@ -401,14 +472,20 @@ class AsyncGROQ(AsyncProvider):
401
472
  raise Exception(
402
473
  f"Optimizer is not one of {self.__available_optimizers}"
403
474
  )
475
+
476
+ messages = [{"content": conversation_prompt, "role": "user"}]
477
+ if self.system_prompt:
478
+ messages.insert(0, {"role": "system", "content": self.system_prompt})
479
+
404
480
  payload = {
405
481
  "frequency_penalty": self.frequency_penalty,
406
- "messages": [{"content": conversation_prompt, "role": "user"}],
482
+ "messages": messages,
407
483
  "model": self.model,
408
484
  "presence_penalty": self.presence_penalty,
409
485
  "stream": stream,
410
486
  "temperature": self.temperature,
411
487
  "top_p": self.top_p,
488
+ "tools": tools
412
489
  }
413
490
 
414
491
  async def for_stream():
@@ -416,7 +493,7 @@ class AsyncGROQ(AsyncProvider):
416
493
  "POST", self.chat_endpoint, json=payload, timeout=self.timeout
417
494
  ) as response:
418
495
  if not response.is_success:
419
- raise Exception(
496
+ raise exceptions.FailedToGenerateResponseError(
420
497
  f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
421
498
  )
422
499
 
@@ -437,19 +514,73 @@ class AsyncGROQ(AsyncProvider):
437
514
  yield value
438
515
  except json.decoder.JSONDecodeError:
439
516
  pass
517
+
518
+ # Handle tool calls if any (in streaming mode)
519
+ if 'tool_calls' in self.last_response.get('choices', [{}])[0].get('message', {}):
520
+ tool_calls = self.last_response['choices'][0]['message']['tool_calls']
521
+ for tool_call in tool_calls:
522
+ function_name = tool_call.get('function', {}).get('name')
523
+ arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
524
+ if function_name in self.available_functions:
525
+ tool_response = self.available_functions[function_name](**arguments)
526
+ messages.append({
527
+ "tool_call_id": tool_call['id'],
528
+ "role": "tool",
529
+ "name": function_name,
530
+ "content": tool_response
531
+ })
532
+ payload['messages'] = messages
533
+ # Make a second call to get the final response
534
+ second_response = await self.session.post(
535
+ self.chat_endpoint, json=payload, timeout=self.timeout
536
+ )
537
+ if second_response.is_success:
538
+ self.last_response = second_response.json()
539
+ else:
540
+ raise exceptions.FailedToGenerateResponseError(
541
+ f"Failed to execute tool - {second_response.text}"
542
+ )
543
+
440
544
  self.conversation.update_chat_history(
441
545
  prompt, await self.get_message(self.last_response)
442
546
  )
443
547
 
444
548
  async def for_non_stream():
445
- response = httpx.post(
549
+ response = await self.session.post(
446
550
  self.chat_endpoint, json=payload, timeout=self.timeout
447
551
  )
448
552
  if not response.is_success:
449
- raise Exception(
553
+ raise exceptions.FailedToGenerateResponseError(
450
554
  f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
451
555
  )
452
556
  resp = response.json()
557
+
558
+ # Handle tool calls if any (in non-streaming mode)
559
+ if 'tool_calls' in resp.get('choices', [{}])[0].get('message', {}):
560
+ tool_calls = resp['choices'][0]['message']['tool_calls']
561
+ for tool_call in tool_calls:
562
+ function_name = tool_call.get('function', {}).get('name')
563
+ arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
564
+ if function_name in self.available_functions:
565
+ tool_response = self.available_functions[function_name](**arguments)
566
+ messages.append({
567
+ "tool_call_id": tool_call['id'],
568
+ "role": "tool",
569
+ "name": function_name,
570
+ "content": tool_response
571
+ })
572
+ payload['messages'] = messages
573
+ # Make a second call to get the final response
574
+ second_response = await self.session.post(
575
+ self.chat_endpoint, json=payload, timeout=self.timeout
576
+ )
577
+ if second_response.is_success:
578
+ resp = second_response.json()
579
+ else:
580
+ raise exceptions.FailedToGenerateResponseError(
581
+ f"Failed to execute tool - {second_response.text}"
582
+ )
583
+
453
584
  self.last_response.update(resp)
454
585
  self.conversation.update_chat_history(
455
586
  prompt, await self.get_message(self.last_response)
@@ -464,6 +595,7 @@ class AsyncGROQ(AsyncProvider):
464
595
  stream: bool = False,
465
596
  optimizer: str = None,
466
597
  conversationally: bool = False,
598
+ tools: Optional[List[Dict[str, Any]]] = None,
467
599
  ) -> str | AsyncGenerator:
468
600
  """Generate response `str` asynchronously.
469
601
  Args:
@@ -471,13 +603,14 @@ class AsyncGROQ(AsyncProvider):
471
603
  stream (bool, optional): Flag for streaming response. Defaults to False.
472
604
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
473
605
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
606
+ tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
474
607
  Returns:
475
608
  str|AsyncGenerator: Response generated
476
609
  """
477
610
 
478
611
  async def for_stream():
479
612
  async_ask = await self.ask(
480
- prompt, True, optimizer=optimizer, conversationally=conversationally
613
+ prompt, True, optimizer=optimizer, conversationally=conversationally, tools=tools
481
614
  )
482
615
  async for response in async_ask:
483
616
  yield await self.get_message(response)
@@ -489,6 +622,7 @@ class AsyncGROQ(AsyncProvider):
489
622
  False,
490
623
  optimizer=optimizer,
491
624
  conversationally=conversationally,
625
+ tools=tools
492
626
  )
493
627
  )
494
628
 
@@ -19,10 +19,10 @@ import io
19
19
  import re
20
20
  import json
21
21
  import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
22
+ from webscout.AIutel import Optimizers
23
+ from webscout.AIutel import Conversation
24
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
25
+ from webscout.AIbase import Provider, AsyncProvider
26
26
  from webscout import exceptions
27
27
  from typing import Any, AsyncGenerator, Dict
28
28
  import logging
@@ -208,4 +208,12 @@ class LLAMA(Provider):
208
208
  str: Message extracted
209
209
  """
210
210
  assert isinstance(response, dict), "Response should be of dict data-type only"
211
- return response["message"]
211
+ return response["message"]
212
+ if __name__ == "__main__":
213
+ from rich import print
214
+
215
+ ai = LLAMA()
216
+ # Stream the response
217
+ response = ai.chat(input(">>> "))
218
+ for chunk in response:
219
+ print(chunk, end="", flush=True)
@@ -6,6 +6,9 @@ from webscout.AIutel import AwesomePrompts
6
6
  from webscout.AIbase import Provider
7
7
 
8
8
  class LLAMA3(Provider):
9
+
10
+ available_models = ["llama3-70b", "llama3-8b", "llama3-405b"]
11
+
9
12
  def __init__(
10
13
  self,
11
14
  is_conversation: bool = True,
@@ -17,7 +20,7 @@ class LLAMA3(Provider):
17
20
  proxies: dict = {},
18
21
  history_offset: int = 10250,
19
22
  act: str = None,
20
- model: str = "llama3-70b", # model= llama3-70b, llama3-8b, llama3-405b
23
+ model: str = "llama3-8b",
21
24
  system: str = "Answer as concisely as possible.",
22
25
  ):
23
26
  """Instantiates Snova
@@ -35,6 +38,9 @@ class LLAMA3(Provider):
35
38
  model (str, optional): Snova model name. Defaults to "llama3-70b".
36
39
  system (str, optional): System prompt for Snova. Defaults to "Answer as concisely as possible.".
37
40
  """
41
+ if model not in self.available_models:
42
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.available_models}")
43
+
38
44
  self.session = requests.Session()
39
45
  self.is_conversation = is_conversation
40
46
  self.max_tokens_to_sample = max_tokens
@@ -170,4 +176,11 @@ class LLAMA3(Provider):
170
176
  """
171
177
  assert isinstance(response, dict), "Response should be of dict data-type only"
172
178
  return response["text"]
173
-
179
+ if __name__ == "__main__":
180
+ from rich import print
181
+
182
+ ai = LLAMA3()
183
+ # Stream the response
184
+ response = ai.chat(input(">>> "))
185
+ for chunk in response:
186
+ print(chunk, end="", flush=True)