webscout 4.5__py3-none-any.whl → 4.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -0,0 +1,201 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Optional
4
+ from ..AIutel import Optimizers
5
+ from ..AIutel import Conversation
6
+ from ..AIutel import AwesomePrompts, sanitize_stream
7
+ from ..AIbase import Provider
8
+ from webscout import exceptions
9
+
10
+
11
+ class RUBIKSAI(Provider):
12
+ """
13
+ A class to interact with the Rubiks.ai API.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ is_conversation: bool = True,
19
+ max_tokens: int = 600,
20
+ timeout: int = 30,
21
+ intro: str = None,
22
+ filepath: str = None,
23
+ update_file: bool = True,
24
+ proxies: dict = {},
25
+ history_offset: int = 10250,
26
+ act: str = None,
27
+ model: str = "gpt-4o-mini",
28
+ ) -> None:
29
+ """
30
+ Initializes the RUBIKSAI API with given parameters.
31
+
32
+ Args:
33
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
34
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion.
35
+ Defaults to 600.
36
+ timeout (int, optional): Http request timeout. Defaults to 30.
37
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
38
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
39
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
40
+ proxies (dict, optional): Http request proxies. Defaults to {}.
41
+ history_offset (int, optional): Limit conversation history to this number of last texts.
42
+ Defaults to 10250.
43
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
44
+ model (str, optional): AI model to use. Defaults to "gpt-4o-mini".
45
+ """
46
+ self.session = requests.Session()
47
+ self.is_conversation = is_conversation
48
+ self.max_tokens_to_sample = max_tokens
49
+ self.api_endpoint = "https://rubiks.ai/search/api.php"
50
+ self.stream_chunk_size = 64
51
+ self.timeout = timeout
52
+ self.last_response = {}
53
+ self.model = model
54
+ self.headers = {
55
+ "accept": "text/event-stream",
56
+ "accept-encoding": "gzip, deflate, br, zstd",
57
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
58
+ "cache-control": "no-cache",
59
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
60
+ }
61
+
62
+ self.__available_optimizers = (
63
+ method
64
+ for method in dir(Optimizers)
65
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
66
+ )
67
+ self.session.headers.update(self.headers)
68
+ Conversation.intro = (
69
+ AwesomePrompts().get_act(
70
+ act, raise_not_found=True, default=None, case_insensitive=True
71
+ )
72
+ if act
73
+ else intro or Conversation.intro
74
+ )
75
+ self.conversation = Conversation(
76
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
77
+ )
78
+ self.conversation.history_offset = history_offset
79
+ self.session.proxies = proxies
80
+
81
+ def ask(
82
+ self,
83
+ prompt: str,
84
+ stream: bool = False,
85
+ raw: bool = False,
86
+ optimizer: str = None,
87
+ conversationally: bool = False,
88
+ ) -> Dict[str, Any]:
89
+ """
90
+ Sends a prompt to the Rubiks.ai API and returns the response.
91
+
92
+ Args:
93
+ prompt: The text prompt to generate text from.
94
+ stream (bool, optional): Whether to stream the response. Defaults to False.
95
+ raw (bool, optional): Whether to return the raw response. Defaults to False.
96
+ optimizer (str, optional): The name of the optimizer to use. Defaults to None.
97
+ conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
98
+
99
+ Returns:
100
+ The response from the API.
101
+ """
102
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
103
+ if optimizer:
104
+ if optimizer in self.__available_optimizers:
105
+ conversation_prompt = getattr(Optimizers, optimizer)(
106
+ conversation_prompt if conversationally else prompt
107
+ )
108
+ else:
109
+ raise Exception(
110
+ f"Optimizer is not one of {self.__available_optimizers}"
111
+ )
112
+
113
+ params = {
114
+ "q": conversation_prompt,
115
+ "model": self.model,
116
+ }
117
+
118
+ def for_stream():
119
+ response = self.session.get(
120
+ self.api_endpoint, params=params, headers=self.headers, stream=True, timeout=self.timeout
121
+ )
122
+
123
+ if not response.ok:
124
+ raise exceptions.FailedToGenerateResponseError(
125
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
126
+ )
127
+
128
+ streaming_response = ""
129
+ for line in response.iter_lines(decode_unicode=True):
130
+ if line:
131
+ if line.startswith("data: "):
132
+ json_data = line[6:]
133
+ if json_data == "[DONE]":
134
+ break
135
+ try:
136
+ data = json.loads(json_data)
137
+ if "choices" in data and len(data["choices"]) > 0:
138
+ content = data["choices"][0]["delta"].get("content", "")
139
+ streaming_response += content
140
+ yield content if raw else dict(text=streaming_response)
141
+ except json.decoder.JSONDecodeError:
142
+ continue
143
+
144
+ self.last_response.update(dict(text=streaming_response))
145
+ self.conversation.update_chat_history(
146
+ prompt, self.get_message(self.last_response)
147
+ )
148
+
149
+ def for_non_stream():
150
+ for _ in for_stream():
151
+ pass
152
+ return self.last_response
153
+
154
+ return for_stream() if stream else for_non_stream()
155
+
156
+ def chat(
157
+ self,
158
+ prompt: str,
159
+ stream: bool = False,
160
+ optimizer: str = None,
161
+ conversationally: bool = False,
162
+ ) -> str:
163
+ """Generate response `str`
164
+ Args:
165
+ prompt (str): Prompt to be send.
166
+ stream (bool, optional): Flag for streaming response. Defaults to False.
167
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
168
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
169
+ Returns:
170
+ str: Response generated
171
+ """
172
+
173
+ def for_stream():
174
+ for response in self.ask(
175
+ prompt, True, optimizer=optimizer, conversationally=conversationally
176
+ ):
177
+ yield self.get_message(response)
178
+
179
+ def for_non_stream():
180
+ return self.get_message(
181
+ self.ask(
182
+ prompt,
183
+ False,
184
+ optimizer=optimizer,
185
+ conversationally=conversationally,
186
+ )
187
+ )
188
+
189
+ return for_stream() if stream else for_non_stream()
190
+
191
+ def get_message(self, response: dict) -> str:
192
+ """Retrieves message only from response
193
+
194
+ Args:
195
+ response (dict): Response generated by `self.ask`
196
+
197
+ Returns:
198
+ str: Message extracted
199
+ """
200
+ assert isinstance(response, dict), "Response should be of dict data-type only"
201
+ return response["text"]
@@ -39,6 +39,13 @@ from .Geminiflash import GEMINIFLASH
39
39
  from .OLLAMA import OLLAMA
40
40
  from .FreeGemini import FreeGemini
41
41
  from .Andi import AndiSearch
42
+ from .PizzaGPT import *
43
+ from .Llama3 import *
44
+ from .DARKAI import *
45
+ from .koala import *
46
+ from .RUBIKSAI import *
47
+ from .meta import *
48
+
42
49
  __all__ = [
43
50
  'ThinkAnyAI',
44
51
  'Xjai',
@@ -80,7 +87,11 @@ __all__ = [
80
87
  'GEMINIFLASH',
81
88
  'OLLAMA',
82
89
  'FreeGemini',
83
- 'AndiSearch'
84
-
85
-
90
+ 'AndiSearch',
91
+ 'PIZZAGPT',
92
+ 'LLAMA3',
93
+ 'DARKAI',
94
+ 'KOALA',
95
+ 'RUBIKSAI',
96
+ 'Meta',
86
97
  ]
@@ -0,0 +1,239 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Optional
4
+ from ..AIutel import Optimizers
5
+ from ..AIutel import Conversation
6
+ from ..AIutel import AwesomePrompts, sanitize_stream
7
+ from ..AIbase import Provider
8
+ from webscout import exceptions
9
+
10
+ class KOALA(Provider):
11
+ """
12
+ A class to interact with the Koala.sh API.
13
+ """
14
+
15
+ def __init__(
16
+ self,
17
+ is_conversation: bool = True,
18
+ max_tokens: int = 600,
19
+ timeout: int = 30,
20
+ intro: str = None,
21
+ filepath: str = None,
22
+ update_file: bool = True,
23
+ proxies: dict = {},
24
+ history_offset: int = 10250,
25
+ act: str = None,
26
+ model: str = "gpt-4o-mini",
27
+ web_search: bool = True,
28
+
29
+ ) -> None:
30
+ """
31
+ Initializes the KOALASH API with given parameters.
32
+
33
+ Args:
34
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
35
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion.
36
+ Defaults to 600.
37
+ timeout (int, optional): Http request timeout. Defaults to 30.
38
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
39
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
40
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
41
+ proxies (dict, optional): Http request proxies. Defaults to {}.
42
+ history_offset (int, optional): Limit conversation history to this number of last texts.
43
+ Defaults to 10250.
44
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
45
+ model (str, optional): AI model to use. Defaults to "gpt-4o-mini".
46
+ """
47
+ self.session = requests.Session()
48
+ self.is_conversation = is_conversation
49
+ self.max_tokens_to_sample = max_tokens
50
+ self.api_endpoint = "https://koala.sh/api/gpt/"
51
+ self.stream_chunk_size = 64
52
+ self.timeout = timeout
53
+ self.last_response = {}
54
+ self.model = model
55
+ self.headers = {
56
+ "accept": "text/event-stream",
57
+ "accept-encoding": "gzip, deflate, br, zstd",
58
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
59
+ "content-length": "73",
60
+ "content-type": "application/json",
61
+ "dnt": "1",
62
+ "flag-real-time-data": "true" if web_search else "false",
63
+ "origin": "https://koala.sh",
64
+ "priority": "u=1, i",
65
+ "referer": "https://koala.sh/chat",
66
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
67
+ "sec-ch-ua-mobile": "?0",
68
+ "sec-ch-ua-platform": '"Windows"',
69
+ "sec-fetch-dest": "empty",
70
+ "sec-fetch-mode": "cors",
71
+ "sec-fetch-site": "same-origin",
72
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
73
+ }
74
+
75
+ self.__available_optimizers = (
76
+ method
77
+ for method in dir(Optimizers)
78
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
79
+ )
80
+ self.session.headers.update(self.headers)
81
+ Conversation.intro = (
82
+ AwesomePrompts().get_act(
83
+ act, raise_not_found=True, default=None, case_insensitive=True
84
+ )
85
+ if act
86
+ else intro or Conversation.intro
87
+ )
88
+ self.conversation = Conversation(
89
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
90
+ )
91
+ self.conversation.history_offset = history_offset
92
+ self.session.proxies = proxies
93
+
94
+ def ask(
95
+ self,
96
+ prompt: str,
97
+ stream: bool = False,
98
+ raw: bool = False,
99
+ optimizer: str = None,
100
+ conversationally: bool = False,
101
+ ) -> Dict[str, Any]:
102
+ """
103
+ Sends a prompt to the Koala.sh API and returns the response.
104
+
105
+ Args:
106
+ prompt: The text prompt to generate text from.
107
+ stream (bool, optional): Whether to stream the response. Defaults to False.
108
+ raw (bool, optional): Whether to return the raw response. Defaults to False.
109
+ optimizer (str, optional): The name of the optimizer to use. Defaults to None.
110
+ conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
111
+
112
+ Returns:
113
+ The response from the API.
114
+ """
115
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
116
+ if optimizer:
117
+ if optimizer in self.__available_optimizers:
118
+ conversation_prompt = getattr(Optimizers, optimizer)(
119
+ conversation_prompt if conversationally else prompt
120
+ )
121
+ else:
122
+ raise Exception(
123
+ f"Optimizer is not one of {self.__available_optimizers}"
124
+ )
125
+
126
+ payload = {
127
+ "input": conversation_prompt,
128
+ "model": self.model
129
+ }
130
+
131
+ def for_stream():
132
+ response = self.session.post(
133
+ self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
134
+ )
135
+
136
+ if not response.ok:
137
+ raise exceptions.FailedToGenerateResponseError(
138
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
139
+ )
140
+
141
+ streaming_response = ""
142
+ for line in response.iter_lines(decode_unicode=True):
143
+ if line:
144
+ if line.startswith("data:"):
145
+ data = line[len("data:"):].strip()
146
+ if data:
147
+ try:
148
+ event = json.loads(data)
149
+ streaming_response += event.get("choices", [{}])[0].get("delta", {}).get("content", "")
150
+ yield event if raw else dict(text=streaming_response)
151
+ except json.decoder.JSONDecodeError:
152
+ continue
153
+ self.last_response.update(dict(text=streaming_response))
154
+ self.conversation.update_chat_history(
155
+ prompt, self.get_message(self.last_response)
156
+ )
157
+ def for_non_stream():
158
+ response = self.session.post(
159
+ self.api_endpoint, json=payload, headers=self.headers, timeout=self.timeout
160
+ )
161
+
162
+ if not response.ok:
163
+ raise exceptions.FailedToGenerateResponseError(
164
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
165
+ )
166
+
167
+ response_content = response.content.decode('utf-8')
168
+ data_parts = response_content.strip().split('\n\n')
169
+ formatted_response = ''.join([part.replace('data: ', '') for part in data_parts if part.startswith('data: ')])
170
+
171
+ # Remove extra quotes from the formatted response
172
+ formatted_response = formatted_response.replace('""', '')
173
+
174
+ # Split the response into lines and format with new lines before headers
175
+ lines = formatted_response.split('\n')
176
+ formatted_lines = []
177
+ for line in lines:
178
+ if line.startswith('###'):
179
+ formatted_lines.append('\n' + line)
180
+ else:
181
+ formatted_lines.append(line)
182
+
183
+ # Join the formatted lines back into a single string
184
+ final_response = '\n'.join(formatted_lines)
185
+
186
+ # self.last_response.update(dict(text=streaming_response))
187
+ self.conversation.update_chat_history(
188
+ prompt, final_response
189
+ )
190
+ return dict(text=final_response)
191
+
192
+ return for_stream() if stream else for_non_stream()
193
+
194
+ def chat(
195
+ self,
196
+ prompt: str,
197
+ stream: bool = False,
198
+ optimizer: str = None,
199
+ conversationally: bool = False,
200
+ ) -> str:
201
+ """Generate response `str`
202
+ Args:
203
+ prompt (str): Prompt to be send.
204
+ stream (bool, optional): Flag for streaming response. Defaults to False.
205
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
206
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
207
+ Returns:
208
+ str: Response generated
209
+ """
210
+
211
+ def for_stream():
212
+ for response in self.ask(
213
+ prompt, True, optimizer=optimizer, conversationally=conversationally
214
+ ):
215
+ yield self.get_message(response)
216
+
217
+ def for_non_stream():
218
+ return self.get_message(
219
+ self.ask(
220
+ prompt,
221
+ False,
222
+ optimizer=optimizer,
223
+ conversationally=conversationally,
224
+ )
225
+ )
226
+
227
+ return for_stream() if stream else for_non_stream()
228
+
229
+ def get_message(self, response: dict) -> str:
230
+ """Retrieves message only from response
231
+
232
+ Args:
233
+ response (dict): Response generated by `self.ask`
234
+
235
+ Returns:
236
+ str: Message extracted
237
+ """
238
+ assert isinstance(response, dict), "Response should be of dict data-type only"
239
+ return response["text"]