webscout 7.9__py3-none-any.whl → 8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (38) hide show
  1. webscout/Extra/GitToolkit/__init__.py +10 -0
  2. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  3. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  4. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  5. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  7. webscout/Provider/AISEARCH/__init__.py +5 -1
  8. webscout/Provider/AISEARCH/hika_search.py +194 -0
  9. webscout/Provider/AISEARCH/monica_search.py +246 -0
  10. webscout/Provider/AISEARCH/scira_search.py +320 -0
  11. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  12. webscout/Provider/AllenAI.py +255 -122
  13. webscout/Provider/DeepSeek.py +1 -2
  14. webscout/Provider/Deepinfra.py +17 -9
  15. webscout/Provider/ExaAI.py +261 -0
  16. webscout/Provider/ExaChat.py +8 -1
  17. webscout/Provider/GithubChat.py +2 -1
  18. webscout/Provider/Netwrck.py +3 -2
  19. webscout/Provider/OpenGPT.py +199 -0
  20. webscout/Provider/PI.py +39 -24
  21. webscout/Provider/Youchat.py +326 -296
  22. webscout/Provider/__init__.py +10 -0
  23. webscout/Provider/ai4chat.py +58 -56
  24. webscout/Provider/akashgpt.py +34 -22
  25. webscout/Provider/freeaichat.py +1 -1
  26. webscout/Provider/labyrinth.py +121 -20
  27. webscout/Provider/llmchatco.py +306 -0
  28. webscout/Provider/scira_chat.py +271 -0
  29. webscout/Provider/typefully.py +280 -0
  30. webscout/version.py +1 -1
  31. webscout/webscout_search.py +118 -54
  32. webscout/webscout_search_async.py +109 -45
  33. {webscout-7.9.dist-info → webscout-8.0.dist-info}/METADATA +2 -2
  34. {webscout-7.9.dist-info → webscout-8.0.dist-info}/RECORD +38 -24
  35. {webscout-7.9.dist-info → webscout-8.0.dist-info}/LICENSE.md +0 -0
  36. {webscout-7.9.dist-info → webscout-8.0.dist-info}/WHEEL +0 -0
  37. {webscout-7.9.dist-info → webscout-8.0.dist-info}/entry_points.txt +0 -0
  38. {webscout-7.9.dist-info → webscout-8.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,246 @@
1
+ import requests
2
+ import json
3
+ import re
4
+ import uuid
5
+ from typing import Dict, Optional, Generator, Union, Any
6
+
7
+ from webscout.AIbase import AISearch
8
+ from webscout import exceptions
9
+ from webscout.litagent import LitAgent
10
+
11
+
12
+ class Response:
13
+ """A wrapper class for Monica API responses.
14
+
15
+ This class automatically converts response objects to their text representation
16
+ when printed or converted to string.
17
+
18
+ Attributes:
19
+ text (str): The text content of the response
20
+
21
+ Example:
22
+ >>> response = Response("Hello, world!")
23
+ >>> print(response)
24
+ Hello, world!
25
+ >>> str(response)
26
+ 'Hello, world!'
27
+ """
28
+ def __init__(self, text: str):
29
+ self.text = text
30
+
31
+ def __str__(self):
32
+ return self.text
33
+
34
+ def __repr__(self):
35
+ return self.text
36
+
37
+
38
+ class Monica(AISearch):
39
+ """A class to interact with the Monica AI search API.
40
+
41
+ Monica provides a powerful search interface that returns AI-generated responses
42
+ based on web content. It supports both streaming and non-streaming responses.
43
+
44
+ Basic Usage:
45
+ >>> from webscout import Monica
46
+ >>> ai = Monica()
47
+ >>> # Non-streaming example
48
+ >>> response = ai.search("What is Python?")
49
+ >>> print(response)
50
+ Python is a high-level programming language...
51
+
52
+ >>> # Streaming example
53
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
54
+ ... print(chunk, end="", flush=True)
55
+ Artificial Intelligence is...
56
+
57
+ >>> # Raw response format
58
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
59
+ ... print(chunk)
60
+ {'text': 'Hello'}
61
+ {'text': ' there!'}
62
+
63
+ Args:
64
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
65
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
66
+ """
67
+
68
+ def __init__(
69
+ self,
70
+ timeout: int = 60,
71
+ proxies: Optional[dict] = None,
72
+ ):
73
+ """Initialize the Monica API client.
74
+
75
+ Args:
76
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
77
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
78
+ """
79
+ self.session = requests.Session()
80
+ self.api_endpoint = "https://monica.so/api/search_v1/search"
81
+ self.stream_chunk_size = 64
82
+ self.timeout = timeout
83
+ self.last_response = {}
84
+ self.client_id = str(uuid.uuid4())
85
+ self.session_id = ""
86
+
87
+ self.headers = {
88
+ "accept": "*/*",
89
+ "accept-encoding": "gzip, deflate, br, zstd",
90
+ "accept-language": "en-US,en;q=0.9",
91
+ "content-type": "application/json",
92
+ "dnt": "1",
93
+ "origin": "https://monica.so",
94
+ "referer": "https://monica.so/answers",
95
+ "sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
96
+ "sec-ch-ua-mobile": "?0",
97
+ "sec-ch-ua-platform": '"Windows"',
98
+ "sec-fetch-dest": "empty",
99
+ "sec-fetch-mode": "cors",
100
+ "sec-fetch-site": "same-origin",
101
+ "sec-gpc": "1",
102
+ "user-agent": LitAgent().random(),
103
+ "x-client-id": self.client_id,
104
+ "x-client-locale": "en",
105
+ "x-client-type": "web",
106
+ "x-client-version": "5.4.3",
107
+ "x-from-channel": "NA",
108
+ "x-product-name": "Monica-Search",
109
+ "x-time-zone": "Asia/Calcutta;-330"
110
+ }
111
+
112
+ self.cookies = {
113
+ "monica_home_theme": "auto",
114
+ }
115
+
116
+ self.session.headers.update(self.headers)
117
+ self.proxies = proxies
118
+
119
+ def search(
120
+ self,
121
+ prompt: str,
122
+ stream: bool = False,
123
+ raw: bool = False,
124
+ ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
125
+ """Search using the Monica API and get AI-generated responses.
126
+
127
+ This method sends a search query to Monica and returns the AI-generated response.
128
+ It supports both streaming and non-streaming modes, as well as raw response format.
129
+
130
+ Args:
131
+ prompt (str): The search query or prompt to send to the API.
132
+ stream (bool, optional): If True, yields response chunks as they arrive.
133
+ If False, returns complete response. Defaults to False.
134
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
135
+ If False, returns Response objects that convert to text automatically.
136
+ Defaults to False.
137
+
138
+ Returns:
139
+ Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
140
+ - If stream=False: Returns complete response as Response object
141
+ - If stream=True: Yields response chunks as either Dict or Response objects
142
+
143
+ Raises:
144
+ APIConnectionError: If the API request fails
145
+ """
146
+ task_id = str(uuid.uuid4())
147
+ payload = {
148
+ "pro": False,
149
+ "query": prompt,
150
+ "round": 1,
151
+ "session_id": self.session_id,
152
+ "language": "auto",
153
+ "task_id": task_id
154
+ }
155
+
156
+ def for_stream():
157
+ try:
158
+ with self.session.post(
159
+ self.api_endpoint,
160
+ json=payload,
161
+ stream=True,
162
+ cookies=self.cookies,
163
+ timeout=self.timeout,
164
+ proxies=self.proxies
165
+ ) as response:
166
+ if not response.ok:
167
+ raise exceptions.APIConnectionError(
168
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
169
+ )
170
+
171
+ # Process the Server-Sent Events (SSE) stream
172
+ for line in response.iter_lines(decode_unicode=True):
173
+ if line and line.startswith("data: "):
174
+ try:
175
+ data = json.loads(line[6:]) # Remove 'data: ' prefix
176
+
177
+ # Save session_id for future requests if present
178
+ if "session_id" in data and data["session_id"]:
179
+ self.session_id = data["session_id"]
180
+
181
+ # Only process chunks with text content
182
+ if "text" in data and data["text"]:
183
+ text_chunk = data["text"]
184
+
185
+ if raw:
186
+ yield {"text": text_chunk}
187
+ else:
188
+ yield Response(text_chunk)
189
+
190
+ # Check if stream is finished
191
+ if "finished" in data and data["finished"]:
192
+ break
193
+
194
+ except json.JSONDecodeError:
195
+ continue
196
+
197
+ except requests.exceptions.RequestException as e:
198
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
199
+
200
+ def for_non_stream():
201
+ full_response = ""
202
+ search_results = []
203
+
204
+ for chunk in for_stream():
205
+ if raw:
206
+ yield chunk
207
+ else:
208
+ full_response += str(chunk)
209
+
210
+ if not raw:
211
+ # Process the full response to clean up formatting
212
+ formatted_response = self.format_response(full_response)
213
+ self.last_response = Response(formatted_response)
214
+ return self.last_response
215
+
216
+ return for_stream() if stream else for_non_stream()
217
+
218
+ @staticmethod
219
+ def format_response(text: str) -> str:
220
+ """Format the response text for better readability.
221
+
222
+ Args:
223
+ text (str): The raw response text
224
+
225
+ Returns:
226
+ str: Formatted text
227
+ """
228
+ # Clean up markdown formatting
229
+ cleaned_text = text.replace('**', '')
230
+
231
+ # Remove any empty lines
232
+ cleaned_text = re.sub(r'\n\s*\n', '\n\n', cleaned_text)
233
+
234
+ # Remove any trailing whitespace
235
+ cleaned_text = cleaned_text.strip()
236
+
237
+ return cleaned_text
238
+
239
+
240
+ if __name__ == "__main__":
241
+ from rich import print
242
+
243
+ ai = Monica()
244
+ response = ai.search(input(">>> "), stream=True, raw=False)
245
+ for chunk in response:
246
+ print(chunk, end="", flush=True)
@@ -0,0 +1,320 @@
1
+ import requests
2
+ import json
3
+ import re
4
+ import uuid
5
+ import time
6
+ from typing import Dict, Optional, Generator, Union, Any
7
+
8
+ from webscout.AIbase import AISearch
9
+ from webscout import exceptions
10
+ from webscout import LitAgent
11
+
12
+
13
+ class Response:
14
+ """A wrapper class for SCIRA API responses.
15
+
16
+ This class automatically converts response objects to their text representation
17
+ when printed or converted to string.
18
+
19
+ Attributes:
20
+ text (str): The text content of the response
21
+
22
+ Example:
23
+ >>> response = Response("Hello, world!")
24
+ >>> print(response)
25
+ Hello, world!
26
+ >>> str(response)
27
+ 'Hello, world!'
28
+ """
29
+ def __init__(self, text: str):
30
+ self.text = text
31
+
32
+ def __str__(self):
33
+ return self.text
34
+
35
+ def __repr__(self):
36
+ return self.text
37
+
38
+
39
+ class Scira(AISearch):
40
+ """A class to interact with the SCIRA AI search API.
41
+
42
+ SCIRA provides a powerful search interface that returns AI-generated responses
43
+ based on web content. It supports both streaming and non-streaming responses.
44
+
45
+ Basic Usage:
46
+ >>> from webscout import Scira
47
+ >>> ai = Scira()
48
+ >>> # Non-streaming example
49
+ >>> response = ai.search("What is Python?")
50
+ >>> print(response)
51
+ Python is a high-level programming language...
52
+
53
+ >>> # Streaming example
54
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
55
+ ... print(chunk, end="", flush=True)
56
+ Artificial Intelligence is...
57
+
58
+ >>> # Raw response format
59
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
60
+ ... print(chunk)
61
+ {'text': 'Hello'}
62
+ {'text': ' there!'}
63
+
64
+ Args:
65
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
66
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
67
+ model (str, optional): Model to use for the search. Defaults to "scira-default".
68
+ group (str, optional): Group parameter. Defaults to "web".
69
+ """
70
+
71
+ AVAILABLE_MODELS = {
72
+ "scira-default": "Grok3",
73
+ "scira-grok-3-mini": "Grok3-mini", # thinking model
74
+ "scira-vision": "Grok2-Vision", # vision model
75
+ "scira-claude": "Sonnet-3.7",
76
+ "scira-optimus": "optimus",
77
+ }
78
+
79
+ def __init__(
80
+ self,
81
+ timeout: int = 60,
82
+ proxies: Optional[dict] = None,
83
+ model: str = "scira-default",
84
+ deepsearch: bool = False
85
+ ):
86
+ """Initialize the SCIRA API client.
87
+
88
+ Args:
89
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
90
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
91
+ model (str, optional): Model to use for the search. Defaults to "scira-default" (Grok3).
92
+ deepsearch (bool, optional): Whether to use deep search mode. If True, uses "extreme" group for more comprehensive results. If False, uses "web" group for faster results. Defaults to False.
93
+
94
+ Example:
95
+ >>> ai = Scira(timeout=120) # Longer timeout
96
+ >>> ai = Scira(proxies={'http': 'http://proxy.com:8080'}) # With proxy
97
+ >>> ai = Scira(model="scira-claude") # Use Claude model
98
+ >>> ai = Scira(deepsearch=True) # Use deep search mode
99
+ """
100
+ # Validate model
101
+ if model not in self.AVAILABLE_MODELS:
102
+ raise ValueError(
103
+ f"Invalid model: {model}. Choose from: {list(self.AVAILABLE_MODELS.keys())}"
104
+ )
105
+
106
+ self.session = requests.Session()
107
+ self.api_endpoint = "https://scira.ai/api/search"
108
+ self.timeout = timeout
109
+ self.proxies = proxies
110
+ self.model = model
111
+
112
+ # Set group based on deepsearch parameter
113
+ self.group = "extreme" if deepsearch else "web"
114
+ self.last_response = {}
115
+
116
+ # Set headers
117
+ self.headers = {
118
+ "Content-Type": "application/json",
119
+ "Accept": "*/*",
120
+ "User-Agent": LitAgent().random()
121
+ }
122
+
123
+ self.session.headers.update(self.headers)
124
+
125
+ def search(
126
+ self,
127
+ prompt: str,
128
+ stream: bool = False,
129
+ raw: bool = False,
130
+ ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
131
+ """Search using the SCIRA API and get AI-generated responses.
132
+
133
+ This method sends a search query to SCIRA and returns the AI-generated response.
134
+ It supports both streaming and non-streaming modes, as well as raw response format.
135
+
136
+ Args:
137
+ prompt (str): The search query or prompt to send to the API.
138
+ stream (bool, optional): If True, yields response chunks as they arrive.
139
+ If False, returns complete response. Defaults to False.
140
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
141
+ If False, returns Response objects that convert to text automatically.
142
+ Defaults to False.
143
+
144
+ Returns:
145
+ Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
146
+ - If stream=False: Returns complete response
147
+ - If stream=True: Yields response chunks as they arrive
148
+
149
+ Raises:
150
+ exceptions.APIConnectionError: If there's an issue connecting to the API
151
+ exceptions.APIResponseError: If the API returns an error response
152
+
153
+ Example:
154
+ >>> ai = Scira()
155
+ >>> # Non-streaming example
156
+ >>> response = ai.search("What is Python?")
157
+ >>> print(response)
158
+ Python is a high-level programming language...
159
+
160
+ >>> # Streaming example
161
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
162
+ ... print(chunk, end="", flush=True)
163
+ Artificial Intelligence is...
164
+ """
165
+ # Create a unique message ID
166
+ message_id = str(uuid.uuid4()).replace("-", "")[:16]
167
+ self.user_id = str(uuid.uuid4()).replace("-", "")[:16]
168
+ # Prepare the payload
169
+ payload = {
170
+ "id": message_id,
171
+ "messages": [
172
+ {
173
+ "role": "user",
174
+ "content": prompt,
175
+ "parts": [
176
+ {
177
+ "type": "text",
178
+ "text": prompt
179
+ }
180
+ ]
181
+ }
182
+ ],
183
+ "model": self.model,
184
+ "group": self.group,
185
+ "user_id": self.user_id,
186
+ "timezone": "Asia/Calcutta"
187
+ }
188
+
189
+ try:
190
+ # Send the request
191
+ response = self.session.post(
192
+ self.api_endpoint,
193
+ headers=self.headers,
194
+ data=json.dumps(payload),
195
+ stream=True,
196
+ timeout=self.timeout,
197
+ proxies=self.proxies
198
+ )
199
+
200
+ # Check for successful response
201
+ if response.status_code != 200:
202
+ raise exceptions.APIResponseError(
203
+ f"API returned error status: {response.status_code}"
204
+ )
205
+
206
+ # Store the last response
207
+ self.last_response = {"status_code": response.status_code}
208
+
209
+ # Handle streaming response
210
+ if stream:
211
+ return self._handle_streaming_response(response, raw)
212
+
213
+ # Handle non-streaming response
214
+ return self._handle_non_streaming_response(response, raw)
215
+
216
+ except requests.RequestException as e:
217
+ raise exceptions.APIConnectionError(f"Error connecting to API: {str(e)}")
218
+
219
+ def _handle_streaming_response(
220
+ self,
221
+ response: requests.Response,
222
+ raw: bool
223
+ ) -> Generator[Union[Dict[str, str], Response], None, None]:
224
+ """Handle streaming response from the API.
225
+
226
+ Args:
227
+ response (requests.Response): The streaming response from the API
228
+ raw (bool): Whether to return raw response dictionaries
229
+
230
+ Yields:
231
+ Union[Dict[str, str], Response]: Response chunks as they arrive
232
+ """
233
+ for line in response.iter_lines():
234
+ if line:
235
+ try:
236
+ # Decode the line
237
+ decoded_line = line.decode("utf-8")
238
+
239
+ # Check if this is a line starting with "0:" (content)
240
+ if re.match(r'^0:', decoded_line):
241
+ # Extract the content after "0:"
242
+ content = re.sub(r'^0:', '', decoded_line)
243
+ # Remove surrounding quotes if present
244
+ content = re.sub(r'^"(.*)"$', r'\1', content)
245
+ # Replace escaped newlines with actual newlines
246
+ content = content.replace('\\n', '\n')
247
+
248
+ if raw:
249
+ yield {"text": content}
250
+ else:
251
+ yield Response(content)
252
+ except Exception:
253
+ # Skip lines that can't be processed
254
+ pass
255
+
256
+ def _handle_non_streaming_response(
257
+ self,
258
+ response: requests.Response,
259
+ raw: bool
260
+ ) -> Union[Dict[str, str], Response]:
261
+ """Handle non-streaming response from the API.
262
+
263
+ Args:
264
+ response (requests.Response): The response from the API
265
+ raw (bool): Whether to return raw response dictionary
266
+
267
+ Returns:
268
+ Union[Dict[str, str], Response]: Complete response
269
+ """
270
+ full_text = ""
271
+
272
+ for line in response.iter_lines():
273
+ if line:
274
+ try:
275
+ # Decode the line
276
+ decoded_line = line.decode("utf-8")
277
+
278
+ # Check if this is a line starting with "0:" (content)
279
+ if re.match(r'^0:', decoded_line):
280
+ # Extract the content after "0:"
281
+ content = re.sub(r'^0:', '', decoded_line)
282
+ # Remove surrounding quotes if present
283
+ content = re.sub(r'^"(.*)"$', r'\1', content)
284
+ # Replace escaped newlines with actual newlines
285
+ content = content.replace('\\n', '\n')
286
+ full_text += content
287
+ except Exception:
288
+ # Skip lines that can't be processed
289
+ pass
290
+
291
+ if raw:
292
+ return {"text": full_text}
293
+ else:
294
+ return Response(full_text)
295
+
296
+ @staticmethod
297
+ def clean_content(text: str) -> str:
298
+ """Clean the response content by removing unnecessary formatting.
299
+
300
+ Args:
301
+ text (str): The text to clean
302
+
303
+ Returns:
304
+ str: The cleaned text
305
+ """
306
+ # Remove any extra whitespace
307
+ cleaned_text = re.sub(r'\s+', ' ', text)
308
+ # Remove any trailing whitespace
309
+ cleaned_text = cleaned_text.strip()
310
+
311
+ return cleaned_text
312
+
313
+
314
+ if __name__ == "__main__":
315
+ from rich import print
316
+ ai = Scira()
317
+ user_query = input(">>> ")
318
+ response = ai.search(user_query, stream=True, raw=False)
319
+ for chunk in response:
320
+ print(chunk, end="", flush=True)