webscout 6.9__py3-none-any.whl → 7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -2,30 +2,91 @@ import re
2
2
  import requests
3
3
  from uuid import uuid4
4
4
  import json
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts, sanitize_stream
8
- from webscout.AIbase import Provider, AsyncProvider
5
+ from typing import Any, Dict, Generator, Optional
6
+
7
+ from webscout.AIbase import AISearch
9
8
  from webscout import exceptions
10
- from typing import Any, AsyncGenerator, Dict
9
+ from webscout import LitAgent
10
+
11
+
12
+ class Response:
13
+ """A wrapper class for Felo API responses.
14
+
15
+ This class automatically converts response objects to their text representation
16
+ when printed or converted to string.
17
+
18
+ Attributes:
19
+ text (str): The text content of the response
20
+
21
+ Example:
22
+ >>> response = Response("Hello, world!")
23
+ >>> print(response)
24
+ Hello, world!
25
+ >>> str(response)
26
+ 'Hello, world!'
27
+ """
28
+ def __init__(self, text: str):
29
+ self.text = text
30
+
31
+ def __str__(self):
32
+ return self.text
33
+
34
+ def __repr__(self):
35
+ return self.text
11
36
 
12
37
 
13
- class Felo(Provider):
38
+ class Felo(AISearch):
39
+ """A class to interact with the Felo AI search API.
40
+
41
+ Felo provides a powerful search interface that returns AI-generated responses
42
+ based on web content. It supports both streaming and non-streaming responses.
43
+
44
+ Basic Usage:
45
+ >>> from webscout import Felo
46
+ >>> ai = Felo()
47
+ >>> # Non-streaming example
48
+ >>> response = ai.search("What is Python?")
49
+ >>> print(response)
50
+ Python is a high-level programming language...
51
+
52
+ >>> # Streaming example
53
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
54
+ ... print(chunk, end="", flush=True)
55
+ Artificial Intelligence is...
56
+
57
+ >>> # Raw response format
58
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
59
+ ... print(chunk)
60
+ {'text': 'Hello'}
61
+ {'text': ' there!'}
62
+
63
+ Args:
64
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
65
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
66
+
67
+ Attributes:
68
+ api_endpoint (str): The Felo API endpoint URL.
69
+ stream_chunk_size (int): Size of chunks when streaming responses.
70
+ timeout (int): Request timeout in seconds.
71
+ headers (dict): HTTP headers used in requests.
72
+ """
73
+
14
74
  def __init__(
15
75
  self,
16
- is_conversation: bool = True,
17
- max_tokens: int = 600,
18
76
  timeout: int = 30,
19
- intro: str = None,
20
- filepath: str = None,
21
- update_file: bool = True,
22
- proxies: dict = {},
23
- history_offset: int = 10250,
24
- act: str = None,
77
+ proxies: Optional[dict] = None,
25
78
  ):
79
+ """Initialize the Felo API client.
80
+
81
+ Args:
82
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
83
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
84
+
85
+ Example:
86
+ >>> ai = Felo(timeout=60) # Longer timeout
87
+ >>> ai = Felo(proxies={'http': 'http://proxy.com:8080'}) # With proxy
88
+ """
26
89
  self.session = requests.Session()
27
- self.is_conversation = is_conversation
28
- self.max_tokens_to_sample = max_tokens
29
90
  self.chat_endpoint = "https://api.felo.ai/search/threads"
30
91
  self.stream_chunk_size = 64
31
92
  self.timeout = timeout
@@ -45,50 +106,64 @@ class Felo(Provider):
45
106
  "sec-fetch-dest": "empty",
46
107
  "sec-fetch-mode": "cors",
47
108
  "sec-fetch-site": "same-site",
48
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
109
+ "user-agent": LitAgent().random()
49
110
  }
50
-
51
- self.__available_optimizers = (
52
- method
53
- for method in dir(Optimizers)
54
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
55
- )
56
111
  self.session.headers.update(self.headers)
57
- Conversation.intro = (
58
- AwesomePrompts().get_act(
59
- act, raise_not_found=True, default=None, case_insensitive=True
60
- )
61
- if act
62
- else intro or Conversation.intro
63
- )
64
- self.conversation = Conversation(
65
- is_conversation, self.max_tokens_to_sample, filepath, update_file
66
- )
67
- self.conversation.history_offset = history_offset
68
- self.session.proxies = proxies
112
+ self.proxies = proxies
69
113
 
70
- def ask(
114
+ def search(
71
115
  self,
72
116
  prompt: str,
73
117
  stream: bool = False,
74
118
  raw: bool = False,
75
- optimizer: str = None,
76
- conversationally: bool = False,
77
- ) -> dict:
78
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
79
- if optimizer:
80
- if optimizer in self.__available_optimizers:
81
- conversation_prompt = getattr(Optimizers, optimizer)(
82
- conversation_prompt if conversationally else prompt
83
- )
84
- else:
85
- raise Exception(
86
- f"Optimizer is not one of {self.__available_optimizers}"
87
- )
88
-
89
- self.session.headers.update(self.headers)
119
+ ) -> Dict[str, Any] | Generator[str, None, None]:
120
+ """Search using the Felo API and get AI-generated responses.
121
+
122
+ This method sends a search query to Felo and returns the AI-generated response.
123
+ It supports both streaming and non-streaming modes, as well as raw response format.
124
+
125
+ Args:
126
+ prompt (str): The search query or prompt to send to the API.
127
+ stream (bool, optional): If True, yields response chunks as they arrive.
128
+ If False, returns complete response. Defaults to False.
129
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
130
+ If False, returns Response objects that convert to text automatically.
131
+ Defaults to False.
132
+
133
+ Returns:
134
+ Union[Dict[str, Any], Generator[str, None, None]]:
135
+ - If stream=False: Returns complete response
136
+ - If stream=True: Yields response chunks as they arrive
137
+
138
+ Raises:
139
+ APIConnectionError: If the API request fails
140
+
141
+ Examples:
142
+ Basic search:
143
+ >>> ai = Felo()
144
+ >>> response = ai.search("What is Python?")
145
+ >>> print(response)
146
+ Python is a programming language...
147
+
148
+ Streaming response:
149
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
150
+ ... print(chunk, end="")
151
+ Artificial Intelligence...
152
+
153
+ Raw response format:
154
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
155
+ ... print(chunk)
156
+ {'text': 'Hello'}
157
+ {'text': ' there!'}
158
+
159
+ Error handling:
160
+ >>> try:
161
+ ... response = ai.search("My question")
162
+ ... except exceptions.APIConnectionError as e:
163
+ ... print(f"API error: {e}")
164
+ """
90
165
  payload = {
91
- "query": conversation_prompt,
166
+ "query": prompt,
92
167
  "search_uuid": uuid4().hex,
93
168
  "lang": "",
94
169
  "agent_lang": "en",
@@ -100,81 +175,55 @@ class Felo(Provider):
100
175
  }
101
176
 
102
177
  def for_stream():
103
- response = self.session.post(
104
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
105
- )
106
- if not response.ok:
107
- raise exceptions.FailedToGenerateResponseError(
108
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
109
- )
178
+ try:
179
+ with self.session.post(
180
+ self.chat_endpoint,
181
+ json=payload,
182
+ stream=True,
183
+ timeout=self.timeout,
184
+ ) as response:
185
+ if not response.ok:
186
+ raise exceptions.APIConnectionError(
187
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
188
+ )
110
189
 
111
- streaming_text = ""
112
- for line in response.iter_lines(decode_unicode=True):
113
- if line.startswith('data:'):
114
- try:
115
- data = json.loads(line[5:].strip())
116
- if data['type'] == 'answer' and 'text' in data['data']:
117
- new_text = data['data']['text']
118
- if len(new_text) > len(streaming_text):
119
- delta = new_text[len(streaming_text):]
120
- streaming_text = new_text
121
- resp = dict(text=delta)
122
- self.last_response.update(dict(text=streaming_text))
123
- yield line if raw else resp
124
- except json.JSONDecodeError:
125
- pass
126
-
127
- self.conversation.update_chat_history(
128
- prompt, self.get_message(self.last_response)
129
- )
190
+ streaming_text = ""
191
+ for line in response.iter_lines(decode_unicode=True):
192
+ if line.startswith('data:'):
193
+ try:
194
+ data = json.loads(line[5:].strip())
195
+ if data['type'] == 'answer' and 'text' in data['data']:
196
+ new_text = data['data']['text']
197
+ if len(new_text) > len(streaming_text):
198
+ delta = new_text[len(streaming_text):]
199
+ streaming_text = new_text
200
+ if raw:
201
+ yield {"text": delta}
202
+ else:
203
+ yield Response(delta)
204
+ except json.JSONDecodeError:
205
+ pass
206
+ self.last_response = Response(streaming_text)
207
+ except requests.exceptions.RequestException as e:
208
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
130
209
 
131
210
  def for_non_stream():
132
211
  full_response = ""
133
212
  for chunk in for_stream():
134
- if not raw:
135
- full_response += chunk['text']
136
- self.last_response = dict(text=full_response)
137
- return self.last_response
138
-
213
+ if raw:
214
+ yield chunk
215
+ else:
216
+ full_response += str(chunk)
217
+ if not raw:
218
+ self.last_response = Response(full_response)
219
+ return self.last_response
220
+
139
221
  return for_stream() if stream else for_non_stream()
140
222
 
141
- def chat(
142
- self,
143
- prompt: str,
144
- stream: bool = False,
145
- optimizer: str = None,
146
- conversationally: bool = False,
147
- ) -> str:
148
- def for_stream():
149
- for response in self.ask(
150
- prompt, True, optimizer=optimizer, conversationally=conversationally
151
- ):
152
- yield self.get_message(response)
153
-
154
- def for_non_stream():
155
- return self.get_message(
156
- self.ask(
157
- prompt,
158
- False,
159
- optimizer=optimizer,
160
- conversationally=conversationally,
161
- )
162
- )
163
-
164
- return for_stream() if stream else for_non_stream()
165
-
166
- def get_message(self, response: dict) -> str:
167
- assert isinstance(response, dict), "Response should be of dict data-type only"
168
-
169
- if "text" in response:
170
- text = re.sub(r'\[\[\d+\]\]', '', response["text"])
171
- return text
172
- else:
173
- return ""
174
223
 
175
- if __name__ == '__main__':
224
+ if __name__ == "__main__":
176
225
  from rich import print
177
226
  ai = Felo()
178
- response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
227
+ response = ai.search(input(">>> "), stream=True, raw=False)
179
228
  for chunk in response:
180
229
  print(chunk, end="", flush=True)
@@ -41,7 +41,7 @@ class BLACKBOXAI:
41
41
  self.session = requests.Session()
42
42
  self.max_tokens_to_sample = max_tokens
43
43
  self.is_conversation = is_conversation
44
- self.chat_endpoint = "https://www.blackbox.ai/api/chat"
44
+ self.chat_endpoint = "https://api.blackbox.ai/api/chat"
45
45
  self.stream_chunk_size = 64
46
46
  self.timeout = timeout
47
47
  self.last_response = {}
@@ -0,0 +1,207 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Generator, Optional
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout import LitAgent as Lit
11
+ class GliderAI(Provider):
12
+ """
13
+ A class to interact with the Glider.so API.
14
+ """
15
+
16
+ AVAILABLE_MODELS = [
17
+ "chat-llama-3-1-70b",
18
+ "chat-llama-3-1-8b",
19
+ "chat-llama-3-2-3b",
20
+ "deepseek-ai/DeepSeek-R1",
21
+ ]
22
+
23
+ model_aliases = {
24
+ "llama-3.1-70b": "chat-llama-3-1-70b",
25
+ "llama-3.1-8b": "chat-llama-3-1-8b",
26
+ "llama-3.2-3b": "chat-llama-3-2-3b",
27
+ "deepseek-r1": "deepseek-ai/DeepSeek-R1",
28
+ }
29
+
30
+
31
+ def __init__(
32
+ self,
33
+ is_conversation: bool = True,
34
+ max_tokens: int = 600,
35
+ timeout: int = 30,
36
+ intro: str = None,
37
+ filepath: str = None,
38
+ update_file: bool = True,
39
+ proxies: dict = {},
40
+ history_offset: int = 10250,
41
+ act: str = None,
42
+ model: str = "llama-3.1-70b",
43
+ system_prompt: str = "You are a helpful AI assistant.",
44
+ ):
45
+ """Initializes the GliderAI API client."""
46
+ if model not in self.AVAILABLE_MODELS and model not in self.model_aliases:
47
+ raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
48
+ self.session = requests.Session()
49
+ self.is_conversation = is_conversation
50
+ self.max_tokens_to_sample = max_tokens
51
+ self.api_endpoint = "https://glider.so/api/chat"
52
+ self.stream_chunk_size = 64
53
+ self.timeout = timeout
54
+ self.last_response = {}
55
+ self.model = self.model_aliases.get(model,model)
56
+ self.system_prompt = system_prompt
57
+ self.headers = {
58
+ "accept": "*/*",
59
+ "accept-language": "en-US,en;q=0.9",
60
+ "content-type": "application/json",
61
+ "origin": "https://glider.so",
62
+ "referer": "https://glider.so/",
63
+ "user-agent": Lit().random(),
64
+ }
65
+ self.session.headers.update(self.headers)
66
+ self.session.proxies = proxies
67
+
68
+ self.__available_optimizers = (
69
+ method
70
+ for method in dir(Optimizers)
71
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
+ )
73
+ Conversation.intro = (
74
+ AwesomePrompts().get_act(
75
+ act, raise_not_found=True, default=None, case_insensitive=True
76
+ )
77
+ if act
78
+ else intro or Conversation.intro
79
+ )
80
+ self.conversation = Conversation(
81
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
82
+ )
83
+ self.conversation.history_offset = history_offset
84
+
85
+ def ask(
86
+ self,
87
+ prompt: str,
88
+ stream: bool = False,
89
+ raw: bool = False,
90
+ optimizer: str = None,
91
+ conversationally: bool = False,
92
+ ) -> Dict[str, Any] | Generator:
93
+ """Chat with AI
94
+
95
+ Args:
96
+ prompt (str): Prompt to be send.
97
+ stream (bool, optional): Flag for streaming response. Defaults to False.
98
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
99
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
100
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
101
+ Returns:
102
+ dict : {}
103
+ ```json
104
+ {
105
+ "text" : "How may I assist you today?"
106
+ }
107
+ ```
108
+ """
109
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
110
+ if optimizer:
111
+ if optimizer in self.__available_optimizers:
112
+ conversation_prompt = getattr(Optimizers, optimizer)(
113
+ conversation_prompt if conversationally else prompt
114
+ )
115
+ else:
116
+ raise Exception(
117
+ f"Optimizer is not one of {self.__available_optimizers}"
118
+ )
119
+
120
+ payload = {
121
+ "messages": [
122
+ {"role": "user", "content": conversation_prompt},
123
+ {"role": "system", "content": self.system_prompt}
124
+ ],
125
+ "model": self.model,
126
+ }
127
+
128
+ def for_stream():
129
+ response = self.session.post(
130
+ self.api_endpoint, json=payload, stream=True, timeout=self.timeout
131
+ )
132
+ if not response.ok:
133
+ raise exceptions.FailedToGenerateResponseError(
134
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
135
+ )
136
+
137
+ streaming_text = ""
138
+ for value in response.iter_lines(decode_unicode=True):
139
+ if value:
140
+ if value.startswith("data: "):
141
+ try:
142
+ data = json.loads(value[6:])
143
+ content = data['choices'][0].get('delta', {}).get("content", "")
144
+ if content:
145
+ streaming_text += content
146
+ yield content if raw else dict(text=content)
147
+ except json.JSONDecodeError:
148
+ if "stop" in value :
149
+ break
150
+
151
+ self.last_response.update(dict(text=streaming_text))
152
+ self.conversation.update_chat_history(
153
+ prompt, self.get_message(self.last_response)
154
+ )
155
+ def for_non_stream():
156
+ for _ in for_stream():
157
+ pass
158
+ return self.last_response
159
+
160
+ return for_stream() if stream else for_non_stream()
161
+
162
+
163
+ def chat(
164
+ self,
165
+ prompt: str,
166
+ stream: bool = False,
167
+ optimizer: str = None,
168
+ conversationally: bool = False,
169
+ ) -> str | Generator[str, None, None]:
170
+ """Generate response `str`
171
+ Args:
172
+ prompt (str): Prompt to be send.
173
+ stream (bool, optional): Flag for streaming response. Defaults to False.
174
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
175
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
176
+ Returns:
177
+ str: Response generated
178
+ """
179
+ def for_stream():
180
+ for response in self.ask(
181
+ prompt, True, optimizer=optimizer, conversationally=conversationally
182
+ ):
183
+ yield self.get_message(response)
184
+ def for_non_stream():
185
+ return self.get_message(
186
+ self.ask(
187
+ prompt,
188
+ False,
189
+ optimizer=optimizer,
190
+ conversationally=conversationally,
191
+ )
192
+ )
193
+ return for_stream() if stream else for_non_stream()
194
+
195
+
196
+ def get_message(self, response: dict) -> str:
197
+ """Retrieves message only from response"""
198
+ assert isinstance(response, dict), "Response should be of dict data-type only"
199
+ return response["text"]
200
+
201
+
202
+ if __name__ == "__main__":
203
+ from rich import print
204
+ ai = GliderAI(model="llama-3.1-70b")
205
+ response = ai.chat("Meaning of Life", stream=True)
206
+ for chunk in response:
207
+ print(chunk, end="", flush=True)
File without changes