webscout 5.5__py3-none-any.whl → 5.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. webscout/Agents/Onlinesearcher.py +3 -3
  2. webscout/Agents/__init__.py +0 -1
  3. webscout/Agents/functioncall.py +3 -3
  4. webscout/Provider/Bing.py +243 -0
  5. webscout/Provider/Chatify.py +1 -1
  6. webscout/Provider/Cloudflare.py +1 -1
  7. webscout/Provider/DARKAI.py +1 -1
  8. webscout/Provider/DiscordRocks.py +109 -246
  9. webscout/Provider/Farfalle.py +1 -1
  10. webscout/Provider/Free2GPT.py +234 -0
  11. webscout/{Agents/ai.py → Provider/GPTWeb.py} +40 -33
  12. webscout/Provider/Llama3.py +65 -62
  13. webscout/Provider/OLLAMA.py +1 -1
  14. webscout/Provider/PizzaGPT.py +1 -1
  15. webscout/Provider/RUBIKSAI.py +13 -3
  16. webscout/Provider/TTI/Nexra.py +120 -0
  17. webscout/Provider/TTI/__init__.py +3 -1
  18. webscout/Provider/TTI/blackboximage.py +153 -0
  19. webscout/Provider/TTI/deepinfra.py +2 -2
  20. webscout/Provider/TeachAnything.py +1 -1
  21. webscout/Provider/Youchat.py +1 -1
  22. webscout/Provider/__init__.py +11 -6
  23. webscout/Provider/{NetFly.py → aigames.py} +76 -79
  24. webscout/Provider/cleeai.py +1 -1
  25. webscout/Provider/elmo.py +1 -1
  26. webscout/Provider/felo_search.py +1 -1
  27. webscout/Provider/genspark.py +1 -1
  28. webscout/Provider/julius.py +7 -1
  29. webscout/Provider/lepton.py +1 -1
  30. webscout/Provider/meta.py +1 -1
  31. webscout/Provider/turboseek.py +1 -1
  32. webscout/Provider/upstage.py +230 -0
  33. webscout/Provider/x0gpt.py +1 -1
  34. webscout/Provider/xdash.py +1 -1
  35. webscout/Provider/yep.py +2 -2
  36. webscout/version.py +1 -1
  37. webscout/webai.py +1 -1
  38. {webscout-5.5.dist-info → webscout-5.6.dist-info}/METADATA +5 -29
  39. {webscout-5.5.dist-info → webscout-5.6.dist-info}/RECORD +43 -39
  40. webscout/Provider/ThinkAnyAI.py +0 -219
  41. {webscout-5.5.dist-info → webscout-5.6.dist-info}/LICENSE.md +0 -0
  42. {webscout-5.5.dist-info → webscout-5.6.dist-info}/WHEEL +0 -0
  43. {webscout-5.5.dist-info → webscout-5.6.dist-info}/entry_points.txt +0 -0
  44. {webscout-5.5.dist-info → webscout-5.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,234 @@
1
+ import requests
2
+ import uuid
3
+ import json
4
+ import time
5
+ from hashlib import sha256
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+
13
+
14
+ class Free2GPT(Provider):
15
+ """
16
+ A class to interact with the Free2GPT API.
17
+ """
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 600,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ system_prompt: str = "You are a helpful AI assistant.",
31
+ ):
32
+ """
33
+ Initializes the Free2GPT API with given parameters.
34
+
35
+ Args:
36
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
37
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
38
+ timeout (int, optional): Http request timeout. Defaults to 30.
39
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
40
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
41
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
42
+ proxies (dict, optional): Http request proxies. Defaults to {}.
43
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
44
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
45
+ system_prompt (str, optional): System prompt for Free2GPT.
46
+ Defaults to "You are a helpful AI assistant.".
47
+ """
48
+ self.session = requests.Session()
49
+ self.is_conversation = is_conversation
50
+ self.max_tokens_to_sample = max_tokens
51
+ self.api_endpoint = "https://chat10.free2gpt.xyz/api/generate"
52
+ self.stream_chunk_size = 64
53
+ self.timeout = timeout
54
+ self.last_response = {}
55
+ self.system_prompt = system_prompt
56
+ self.headers = {
57
+ "accept": "*/*",
58
+ "accept-encoding": "gzip, deflate, br, zstd",
59
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
60
+ "content-type": "text/plain;charset=UTF-8",
61
+ "dnt": "1",
62
+ "origin": "https://chat10.free2gpt.xyz",
63
+ "referer": "https://chat10.free2gpt.xyz/",
64
+ "sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
65
+ "sec-ch-ua-mobile": "?0",
66
+ "sec-ch-ua-platform": '"Windows"',
67
+ "sec-fetch-dest": "empty",
68
+ "sec-fetch-mode": "cors",
69
+ "sec-fetch-site": "same-origin",
70
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0"
71
+ }
72
+
73
+ self.__available_optimizers = (
74
+ method
75
+ for method in dir(Optimizers)
76
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
77
+ )
78
+ self.session.headers.update(self.headers)
79
+ Conversation.intro = (
80
+ AwesomePrompts().get_act(
81
+ act, raise_not_found=True, default=None, case_insensitive=True
82
+ )
83
+ if act
84
+ else intro or Conversation.intro
85
+ )
86
+ self.conversation = Conversation(
87
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
88
+ )
89
+ self.conversation.history_offset = history_offset
90
+ self.session.proxies = proxies
91
+
92
+ def generate_signature(self, time: int, text: str, secret: str = ""):
93
+ message = f"{time}:{text}:{secret}"
94
+ return sha256(message.encode()).hexdigest()
95
+
96
+ def ask(
97
+ self,
98
+ prompt: str,
99
+ stream: bool = False,
100
+ raw: bool = False,
101
+ optimizer: str = None,
102
+ conversationally: bool = False,
103
+ ) -> dict:
104
+ """Chat with AI
105
+
106
+ Args:
107
+ prompt (str): Prompt to be send.
108
+ stream (bool, optional): Flag for streaming response. Defaults to False.
109
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
110
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
111
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
112
+ Returns:
113
+ dict : {}
114
+ ```json
115
+ {
116
+ "text" : "How may I assist you today?"
117
+ }
118
+ ```
119
+ """
120
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
121
+ if optimizer:
122
+ if optimizer in self.__available_optimizers:
123
+ conversation_prompt = getattr(Optimizers, optimizer)(
124
+ conversation_prompt if conversationally else prompt
125
+ )
126
+ else:
127
+ raise Exception(
128
+ f"Optimizer is not one of {self.__available_optimizers}"
129
+ )
130
+
131
+ # Generate timestamp
132
+ timestamp = int(time.time() * 1e3)
133
+
134
+ # Generate signature
135
+ signature = self.generate_signature(timestamp, conversation_prompt)
136
+
137
+ payload = {
138
+ "messages": [
139
+ {
140
+ "role": "system",
141
+ "content": self.system_prompt
142
+ },
143
+ {
144
+ "role": "user",
145
+ "content": conversation_prompt
146
+ }
147
+ ],
148
+ "time": timestamp,
149
+ "pass": None,
150
+ "sign": signature
151
+ }
152
+
153
+ def for_stream():
154
+ try:
155
+ # Send the POST request with streaming enabled
156
+ with requests.post(self.api_endpoint, headers=self.headers, data=json.dumps(payload), stream=True) as response:
157
+ response.raise_for_status()
158
+
159
+ full_response = ""
160
+ for chunk in response.iter_content(chunk_size=self.stream_chunk_size):
161
+ if chunk:
162
+ full_response += chunk.decode('utf-8')
163
+ yield chunk.decode('utf-8') if raw else dict(text=full_response)
164
+
165
+ self.last_response.update(dict(text=full_response))
166
+ self.conversation.update_chat_history(
167
+ prompt, self.get_message(self.last_response)
168
+ )
169
+
170
+ except requests.exceptions.RequestException as e:
171
+ raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
172
+
173
+ def for_non_stream():
174
+ for _ in for_stream():
175
+ pass
176
+ return self.last_response
177
+
178
+ return for_stream() if stream else for_non_stream()
179
+
180
+ def chat(
181
+ self,
182
+ prompt: str,
183
+ stream: bool = False,
184
+ optimizer: str = None,
185
+ conversationally: bool = False,
186
+ ) -> str:
187
+ """Generate response `str`
188
+ Args:
189
+ prompt (str): Prompt to be send.
190
+ stream (bool, optional): Flag for streaming response. Defaults to False.
191
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
192
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
193
+ Returns:
194
+ str: Response generated
195
+ """
196
+
197
+ def for_stream():
198
+ for response in self.ask(
199
+ prompt, True, optimizer=optimizer, conversationally=conversationally
200
+ ):
201
+ yield self.get_message(response)
202
+
203
+ def for_non_stream():
204
+ return self.get_message(
205
+ self.ask(
206
+ prompt,
207
+ False,
208
+ optimizer=optimizer,
209
+ conversationally=conversationally,
210
+ )
211
+ )
212
+
213
+ return for_stream() if stream else for_non_stream()
214
+
215
+ def get_message(self, response: dict) -> str:
216
+ """Retrieves message only from response
217
+
218
+ Args:
219
+ response (dict): Response generated by `self.ask`
220
+
221
+ Returns:
222
+ str: Message extracted
223
+ """
224
+ assert isinstance(response, dict), "Response should be of dict data-type only"
225
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
226
+
227
+
228
+ if __name__ == "__main__":
229
+ from rich import print
230
+
231
+ ai = Free2GPT()
232
+ response = ai.chat('hi')
233
+ for chunk in response:
234
+ print(chunk, end="", flush=True)
@@ -1,13 +1,15 @@
1
1
  import requests
2
2
  import json
3
+
3
4
  from webscout.AIutel import Optimizers
4
5
  from webscout.AIutel import Conversation
5
6
  from webscout.AIutel import AwesomePrompts
6
7
  from webscout.AIbase import Provider
7
8
 
8
- class LLAMA3(Provider):
9
-
10
- AVAILABLE_MODELS = ["llama3-70b", "llama3-8b", "llama3-405b"]
9
+ class GPTWeb(Provider):
10
+ """
11
+ A class to interact with the Nexra GPTWeb API.
12
+ """
11
13
 
12
14
  def __init__(
13
15
  self,
@@ -20,10 +22,10 @@ class LLAMA3(Provider):
20
22
  proxies: dict = {},
21
23
  history_offset: int = 10250,
22
24
  act: str = None,
23
- model: str = "llama3-8b",
24
- system: str = "GPT syle",
25
+
25
26
  ):
26
- """Instantiates Snova
27
+ """
28
+ Initializes the Nexra GPTWeb API with given parameters.
27
29
 
28
30
  Args:
29
31
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
@@ -35,21 +37,18 @@ class LLAMA3(Provider):
35
37
  proxies (dict, optional): Http request proxies. Defaults to {}.
36
38
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
37
39
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
38
- model (str, optional): Snova model name. Defaults to "llama3-70b".
39
- system (str, optional): System prompt for Snova. Defaults to "Answer as concisely as possible.".
40
+ system_prompt (str, optional): System prompt for GPTWeb. Defaults to "You are a helpful AI assistant.".
40
41
  """
41
- if model not in self.AVAILABLE_MODELS:
42
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
-
44
42
  self.session = requests.Session()
45
43
  self.is_conversation = is_conversation
46
44
  self.max_tokens_to_sample = max_tokens
45
+ self.api_endpoint = 'https://nexra.aryahcr.cc/api/chat/gptweb'
46
+ self.stream_chunk_size = 64
47
47
  self.timeout = timeout
48
- self.model = model
49
- self.system = system
50
48
  self.last_response = {}
51
- self.env_type = "tp16405b" if "405b" in model else "tp16"
52
- self.headers = {'content-type': 'application/json'}
49
+ self.headers = {
50
+ "Content-Type": "application/json"
51
+ }
53
52
 
54
53
  self.__available_optimizers = (
55
54
  method
@@ -78,7 +77,7 @@ class LLAMA3(Provider):
78
77
  optimizer: str = None,
79
78
  conversationally: bool = False,
80
79
  ) -> dict:
81
- """Chat with AI
80
+ """Chat with GPTWeb
82
81
 
83
82
  Args:
84
83
  prompt (str): Prompt to be send.
@@ -104,25 +103,34 @@ class LLAMA3(Provider):
104
103
  raise Exception(
105
104
  f"Optimizer is not one of {self.__available_optimizers}"
106
105
  )
107
- data = {'body': {'messages': [{'role': 'system', 'content': self.system}, {'role': 'user', 'content': conversation_prompt}], 'stream': True, 'model': self.model}, 'env_type': self.env_type}
108
106
 
109
- def for_stream(data=data): # Pass data as a default argument
110
- response = self.session.post('https://fast.snova.ai/api/completion', headers=self.headers, json=data, stream=True, timeout=self.timeout)
111
- output = ''
107
+ data = {
108
+ "prompt": conversation_prompt,
109
+ "markdown": False
110
+ }
111
+
112
+ def for_stream():
113
+ response = self.session.post(self.api_endpoint, headers=self.headers, data=json.dumps(data), stream=True, timeout=self.timeout)
114
+ if not response.ok:
115
+ raise Exception(
116
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
117
+ )
118
+
119
+ full_response = ''
112
120
  for line in response.iter_lines(decode_unicode=True):
113
- if line.startswith('data:'):
121
+ if line:
122
+ line = line.lstrip('_') # Remove "_"
114
123
  try:
115
- data = json.loads(line[len('data: '):])
116
- output += data.get("choices", [{}])[0].get("delta", {}).get("content", '')
117
- self.last_response.update(dict(text=output))
118
- yield data if raw else dict(text=output)
124
+ # Attempt to parse the entire line as JSON
125
+ json_data = json.loads(line)
126
+ full_response = json_data.get("gpt", "")
127
+ yield full_response if raw else dict(text=full_response)
119
128
  except json.JSONDecodeError:
120
- if line[len('data: '):] == '[DONE]':
121
- break
129
+ print(f"Skipping invalid JSON line: {line}")
130
+ self.last_response.update(dict(text=full_response))
122
131
  self.conversation.update_chat_history(
123
132
  prompt, self.get_message(self.last_response)
124
133
  )
125
-
126
134
  def for_non_stream():
127
135
  for _ in for_stream():
128
136
  pass
@@ -176,11 +184,10 @@ class LLAMA3(Provider):
176
184
  """
177
185
  assert isinstance(response, dict), "Response should be of dict data-type only"
178
186
  return response["text"]
179
- if __name__ == "__main__":
180
- from rich import print
181
187
 
182
- ai = LLAMA3()
183
- # Stream the response
188
+ if __name__ == '__main__':
189
+ from rich import print
190
+ ai = GPTWeb()
184
191
  response = ai.chat(input(">>> "))
185
192
  for chunk in response:
186
- print(chunk, end="", flush=True)
193
+ print(chunk, end='', flush=True)
@@ -1,18 +1,30 @@
1
+ import os
2
+ import openai
1
3
  import requests
2
- import json
3
4
  from webscout.AIutel import Optimizers
4
5
  from webscout.AIutel import Conversation
5
6
  from webscout.AIutel import AwesomePrompts
6
7
  from webscout.AIbase import Provider
7
8
 
8
9
  class LLAMA3(Provider):
10
+ """
11
+ A class to interact with the Sambanova API using the openai library.
12
+ """
9
13
 
10
- AVAILABLE_MODELS = ["llama3-70b", "llama3-8b", "llama3-405b"]
14
+ AVAILABLE_MODELS = [
15
+ "Meta-Llama-3.1-8B-Instruct",
16
+ "Meta-Llama-3.1-70B-Instruct",
17
+ "Meta-Llama-3.1-405B-Instruct"
18
+ ]
11
19
 
12
20
  def __init__(
13
21
  self,
22
+ api_key: str = None,
14
23
  is_conversation: bool = True,
15
24
  max_tokens: int = 600,
25
+ temperature: float = 1,
26
+ top_p: float = 0.95,
27
+ model: str = "Meta-Llama-3.1-8B-Instruct",
16
28
  timeout: int = 30,
17
29
  intro: str = None,
18
30
  filepath: str = None,
@@ -20,14 +32,18 @@ class LLAMA3(Provider):
20
32
  proxies: dict = {},
21
33
  history_offset: int = 10250,
22
34
  act: str = None,
23
- model: str = "llama3-8b",
24
- system: str = "GPT syle",
35
+ system_prompt: str = "You are a helpful AI assistant.",
25
36
  ):
26
- """Instantiates Snova
37
+ """
38
+ Initializes the Sambanova API with the given parameters.
27
39
 
28
40
  Args:
41
+ api_key (str, optional): Your Sambanova API key. If None, it will use the environment variable "SAMBANOVA_API_KEY". Defaults to None.
29
42
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
30
43
  max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
44
+ temperature (float, optional): The temperature parameter for the model. Defaults to 1.
45
+ top_p (float, optional): The top_p parameter for the model. Defaults to 0.95.
46
+ model (str, optional): The name of the Sambanova model to use. Defaults to "Meta-Llama-3.1-8B-Instruct".
31
47
  timeout (int, optional): Http request timeout. Defaults to 30.
32
48
  intro (str, optional): Conversation introductory prompt. Defaults to None.
33
49
  filepath (str, optional): Path to file containing conversation history. Defaults to None.
@@ -35,28 +51,29 @@ class LLAMA3(Provider):
35
51
  proxies (dict, optional): Http request proxies. Defaults to {}.
36
52
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
37
53
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
38
- model (str, optional): Snova model name. Defaults to "llama3-70b".
39
- system (str, optional): System prompt for Snova. Defaults to "Answer as concisely as possible.".
54
+ system_prompt (str, optional): System instruction to guide the AI's behavior.
55
+ Defaults to "You are a helpful and informative AI assistant.".
40
56
  """
41
57
  if model not in self.AVAILABLE_MODELS:
42
58
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
59
 
44
- self.session = requests.Session()
60
+ self.api_key = api_key or os.environ["SAMBANOVA_API_KEY"]
61
+ self.model = model
62
+ self.temperature = temperature
63
+ self.top_p = top_p
64
+ self.system_prompt = system_prompt # Add this line to set the system_prompt attribute
65
+
66
+ self.session = requests.Session() # Not directly used for Gemini API calls, but can be used for other requests
45
67
  self.is_conversation = is_conversation
46
68
  self.max_tokens_to_sample = max_tokens
47
69
  self.timeout = timeout
48
- self.model = model
49
- self.system = system
50
70
  self.last_response = {}
51
- self.env_type = "tp16405b" if "405b" in model else "tp16"
52
- self.headers = {'content-type': 'application/json'}
53
71
 
54
72
  self.__available_optimizers = (
55
73
  method
56
74
  for method in dir(Optimizers)
57
75
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
58
76
  )
59
- self.session.headers.update(self.headers)
60
77
  Conversation.intro = (
61
78
  AwesomePrompts().get_act(
62
79
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -70,6 +87,12 @@ class LLAMA3(Provider):
70
87
  self.conversation.history_offset = history_offset
71
88
  self.session.proxies = proxies
72
89
 
90
+ # Configure the Sambanova API
91
+ self.client = openai.OpenAI(
92
+ api_key=self.api_key,
93
+ base_url="https://api.sambanova.ai/v1",
94
+ )
95
+
73
96
  def ask(
74
97
  self,
75
98
  prompt: str,
@@ -82,8 +105,8 @@ class LLAMA3(Provider):
82
105
 
83
106
  Args:
84
107
  prompt (str): Prompt to be send.
85
- stream (bool, optional): Flag for streaming response. Defaults to False.
86
- raw (bool, optional): Stream back raw response as received. Defaults to False.
108
+ stream (bool, optional): Not used for Sambanova API. Defaults to False.
109
+ raw (bool, optional): Not used for Sambanova API. Defaults to False.
87
110
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
88
111
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
89
112
  Returns:
@@ -104,66 +127,47 @@ class LLAMA3(Provider):
104
127
  raise Exception(
105
128
  f"Optimizer is not one of {self.__available_optimizers}"
106
129
  )
107
- data = {'body': {'messages': [{'role': 'system', 'content': self.system}, {'role': 'user', 'content': conversation_prompt}], 'stream': True, 'model': self.model}, 'env_type': self.env_type}
108
-
109
- def for_stream(data=data): # Pass data as a default argument
110
- response = self.session.post('https://fast.snova.ai/api/completion', headers=self.headers, json=data, stream=True, timeout=self.timeout)
111
- output = ''
112
- for line in response.iter_lines(decode_unicode=True):
113
- if line.startswith('data:'):
114
- try:
115
- data = json.loads(line[len('data: '):])
116
- output += data.get("choices", [{}])[0].get("delta", {}).get("content", '')
117
- self.last_response.update(dict(text=output))
118
- yield data if raw else dict(text=output)
119
- except json.JSONDecodeError:
120
- if line[len('data: '):] == '[DONE]':
121
- break
122
- self.conversation.update_chat_history(
123
- prompt, self.get_message(self.last_response)
124
- )
125
130
 
126
- def for_non_stream():
127
- for _ in for_stream():
128
- pass
129
- return self.last_response
131
+ response = self.client.chat.completions.create(
132
+ model=self.model,
133
+ messages=[
134
+ {"role": "system", "content": self.system_prompt},
135
+ {"role": "user", "content": conversation_prompt},
136
+ ],
137
+ temperature=self.temperature,
138
+ top_p=self.top_p
139
+ )
130
140
 
131
- return for_stream() if stream else for_non_stream()
141
+ self.last_response.update(dict(text=response.choices[0].message.content))
142
+ self.conversation.update_chat_history(
143
+ prompt, self.get_message(self.last_response)
144
+ )
145
+ return self.last_response
132
146
 
133
147
  def chat(
134
148
  self,
135
149
  prompt: str,
136
- stream: bool = False,
150
+ stream: bool = False, # Streaming not supported by the current google-generativeai library
137
151
  optimizer: str = None,
138
152
  conversationally: bool = False,
139
153
  ) -> str:
140
154
  """Generate response `str`
155
+
141
156
  Args:
142
157
  prompt (str): Prompt to be send.
143
- stream (bool, optional): Flag for streaming response. Defaults to False.
158
+ stream (bool, optional): Not used for Sambanova API. Defaults to False.
144
159
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
145
160
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
146
161
  Returns:
147
162
  str: Response generated
148
163
  """
149
-
150
- def for_stream():
151
- for response in self.ask(
152
- prompt, True, optimizer=optimizer, conversationally=conversationally
153
- ):
154
- yield self.get_message(response)
155
-
156
- def for_non_stream():
157
- return self.get_message(
158
- self.ask(
159
- prompt,
160
- False,
161
- optimizer=optimizer,
162
- conversationally=conversationally,
163
- )
164
+ return self.get_message(
165
+ self.ask(
166
+ prompt,
167
+ optimizer=optimizer,
168
+ conversationally=conversationally,
164
169
  )
165
-
166
- return for_stream() if stream else for_non_stream()
170
+ )
167
171
 
168
172
  def get_message(self, response: dict) -> str:
169
173
  """Retrieves message only from response
@@ -176,11 +180,10 @@ class LLAMA3(Provider):
176
180
  """
177
181
  assert isinstance(response, dict), "Response should be of dict data-type only"
178
182
  return response["text"]
183
+
179
184
  if __name__ == "__main__":
180
185
  from rich import print
181
-
182
- ai = LLAMA3()
183
- # Stream the response
186
+ ai = LLAMA3(api_key='7979b01c-c5ea-40df-9198-f45733fa2208')
184
187
  response = ai.chat(input(">>> "))
185
- for chunk in response:
186
- print(chunk, end="", flush=True)
188
+ for chunks in response:
189
+ print(chunks, end="", flush=True)
@@ -166,7 +166,7 @@ class OLLAMA(Provider):
166
166
  assert isinstance(response, dict), "Response should be of dict data-type only"
167
167
  return response["text"]
168
168
  if __name__ == "__main__":
169
- ollama_provider = OLLAMA(model="qwen:0.5b")
169
+ ollama_provider = OLLAMA(model="qwen2:0.5b")
170
170
  response = ollama_provider.chat("hi", stream=True)
171
171
  for r in response:
172
172
  print(r, end="", flush=True)
@@ -179,6 +179,6 @@ if __name__ == "__main__":
179
179
 
180
180
  ai = PIZZAGPT()
181
181
  # Stream the response
182
- response = ai.chat(input(">>> "))
182
+ response = ai.chat("hi")
183
183
  for chunk in response:
184
184
  print(chunk, end="", flush=True)
@@ -1,18 +1,23 @@
1
1
  import requests
2
2
  import json
3
3
  from typing import Any, Dict, Optional
4
+
4
5
  from webscout.AIutel import Optimizers
5
6
  from webscout.AIutel import Conversation
6
7
  from webscout.AIutel import AwesomePrompts, sanitize_stream
7
8
  from webscout.AIbase import Provider
8
9
  from webscout import exceptions
9
10
 
10
-
11
11
  class RUBIKSAI(Provider):
12
12
  """
13
13
  A class to interact with the Rubiks.ai API.
14
14
  """
15
15
 
16
+ AVAILABLE_MODELS = [
17
+ "gpt-4o-mini",
18
+ "gemini-1.5-pro"
19
+ ]
20
+
16
21
  def __init__(
17
22
  self,
18
23
  is_conversation: bool = True,
@@ -41,8 +46,12 @@ class RUBIKSAI(Provider):
41
46
  history_offset (int, optional): Limit conversation history to this number of last texts.
42
47
  Defaults to 10250.
43
48
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
44
- model (str, optional): AI model to use. Defaults to "gpt-4o-mini".
49
+ model (str, optional): AI model to use. Defaults to "gpt-4o-mini".
50
+ Available models: "gpt-4o-mini", "gemini-1.5-pro"
45
51
  """
52
+ if model not in self.AVAILABLE_MODELS:
53
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
54
+
46
55
  self.session = requests.Session()
47
56
  self.is_conversation = is_conversation
48
57
  self.max_tokens_to_sample = max_tokens
@@ -200,8 +209,9 @@ class RUBIKSAI(Provider):
200
209
  assert isinstance(response, dict), "Response should be of dict data-type only"
201
210
  return response["text"]
202
211
  if __name__ == '__main__':
212
+
203
213
  from rich import print
204
214
  ai = RUBIKSAI()
205
- response = ai.chat(input(">>> "))
215
+ response = ai.chat("hi")
206
216
  for chunk in response:
207
217
  print(chunk, end="", flush=True)