webscout 7.9__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (69) hide show
  1. webscout/Extra/GitToolkit/__init__.py +10 -0
  2. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  3. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  4. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  5. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  7. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  8. webscout/Provider/AISEARCH/ISou.py +1 -1
  9. webscout/Provider/AISEARCH/__init__.py +6 -1
  10. webscout/Provider/AISEARCH/felo_search.py +1 -1
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/AISEARCH/hika_search.py +194 -0
  13. webscout/Provider/AISEARCH/iask_search.py +436 -0
  14. webscout/Provider/AISEARCH/monica_search.py +246 -0
  15. webscout/Provider/AISEARCH/scira_search.py +320 -0
  16. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  17. webscout/Provider/AllenAI.py +255 -122
  18. webscout/Provider/DeepSeek.py +1 -2
  19. webscout/Provider/Deepinfra.py +17 -9
  20. webscout/Provider/ExaAI.py +261 -0
  21. webscout/Provider/ExaChat.py +8 -1
  22. webscout/Provider/GithubChat.py +2 -1
  23. webscout/Provider/Jadve.py +2 -2
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OPENAI/__init__.py +17 -0
  26. webscout/Provider/OPENAI/base.py +46 -0
  27. webscout/Provider/OPENAI/c4ai.py +347 -0
  28. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  29. webscout/Provider/OPENAI/deepinfra.py +284 -0
  30. webscout/Provider/OPENAI/exaai.py +419 -0
  31. webscout/Provider/OPENAI/exachat.py +421 -0
  32. webscout/Provider/OPENAI/freeaichat.py +355 -0
  33. webscout/Provider/OPENAI/glider.py +314 -0
  34. webscout/Provider/OPENAI/heckai.py +337 -0
  35. webscout/Provider/OPENAI/llmchatco.py +325 -0
  36. webscout/Provider/OPENAI/netwrck.py +348 -0
  37. webscout/Provider/OPENAI/scirachat.py +459 -0
  38. webscout/Provider/OPENAI/sonus.py +294 -0
  39. webscout/Provider/OPENAI/typegpt.py +361 -0
  40. webscout/Provider/OPENAI/utils.py +211 -0
  41. webscout/Provider/OPENAI/venice.py +428 -0
  42. webscout/Provider/OPENAI/wisecat.py +381 -0
  43. webscout/Provider/OPENAI/x0gpt.py +389 -0
  44. webscout/Provider/OPENAI/yep.py +329 -0
  45. webscout/Provider/OpenGPT.py +199 -0
  46. webscout/Provider/PI.py +39 -24
  47. webscout/Provider/Venice.py +1 -1
  48. webscout/Provider/Youchat.py +326 -296
  49. webscout/Provider/__init__.py +16 -6
  50. webscout/Provider/ai4chat.py +58 -56
  51. webscout/Provider/akashgpt.py +34 -22
  52. webscout/Provider/freeaichat.py +1 -1
  53. webscout/Provider/labyrinth.py +121 -20
  54. webscout/Provider/llmchatco.py +306 -0
  55. webscout/Provider/scira_chat.py +274 -0
  56. webscout/Provider/typefully.py +280 -0
  57. webscout/Provider/typegpt.py +3 -184
  58. webscout/prompt_manager.py +2 -1
  59. webscout/version.py +1 -1
  60. webscout/webscout_search.py +118 -54
  61. webscout/webscout_search_async.py +109 -45
  62. webscout-8.1.dist-info/METADATA +683 -0
  63. {webscout-7.9.dist-info → webscout-8.1.dist-info}/RECORD +67 -33
  64. webscout/Provider/flowith.py +0 -207
  65. webscout-7.9.dist-info/METADATA +0 -995
  66. {webscout-7.9.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  67. {webscout-7.9.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  68. {webscout-7.9.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  69. {webscout-7.9.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,320 @@
1
+ import requests
2
+ import json
3
+ import re
4
+ import uuid
5
+ import time
6
+ from typing import Dict, Optional, Generator, Union, Any
7
+
8
+ from webscout.AIbase import AISearch
9
+ from webscout import exceptions
10
+ from webscout.litagent import LitAgent
11
+
12
+
13
+ class Response:
14
+ """A wrapper class for SCIRA API responses.
15
+
16
+ This class automatically converts response objects to their text representation
17
+ when printed or converted to string.
18
+
19
+ Attributes:
20
+ text (str): The text content of the response
21
+
22
+ Example:
23
+ >>> response = Response("Hello, world!")
24
+ >>> print(response)
25
+ Hello, world!
26
+ >>> str(response)
27
+ 'Hello, world!'
28
+ """
29
+ def __init__(self, text: str):
30
+ self.text = text
31
+
32
+ def __str__(self):
33
+ return self.text
34
+
35
+ def __repr__(self):
36
+ return self.text
37
+
38
+
39
+ class Scira(AISearch):
40
+ """A class to interact with the SCIRA AI search API.
41
+
42
+ SCIRA provides a powerful search interface that returns AI-generated responses
43
+ based on web content. It supports both streaming and non-streaming responses.
44
+
45
+ Basic Usage:
46
+ >>> from webscout import Scira
47
+ >>> ai = Scira()
48
+ >>> # Non-streaming example
49
+ >>> response = ai.search("What is Python?")
50
+ >>> print(response)
51
+ Python is a high-level programming language...
52
+
53
+ >>> # Streaming example
54
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
55
+ ... print(chunk, end="", flush=True)
56
+ Artificial Intelligence is...
57
+
58
+ >>> # Raw response format
59
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
60
+ ... print(chunk)
61
+ {'text': 'Hello'}
62
+ {'text': ' there!'}
63
+
64
+ Args:
65
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
66
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
67
+ model (str, optional): Model to use for the search. Defaults to "scira-default".
68
+ group (str, optional): Group parameter. Defaults to "web".
69
+ """
70
+
71
+ AVAILABLE_MODELS = {
72
+ "scira-default": "Grok3",
73
+ "scira-grok-3-mini": "Grok3-mini", # thinking model
74
+ "scira-vision": "Grok2-Vision", # vision model
75
+ "scira-claude": "Sonnet-3.7",
76
+ "scira-optimus": "optimus",
77
+ }
78
+
79
+ def __init__(
80
+ self,
81
+ timeout: int = 60,
82
+ proxies: Optional[dict] = None,
83
+ model: str = "scira-default",
84
+ deepsearch: bool = False
85
+ ):
86
+ """Initialize the SCIRA API client.
87
+
88
+ Args:
89
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
90
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
91
+ model (str, optional): Model to use for the search. Defaults to "scira-default" (Grok3).
92
+ deepsearch (bool, optional): Whether to use deep search mode. If True, uses "extreme" group for more comprehensive results. If False, uses "web" group for faster results. Defaults to False.
93
+
94
+ Example:
95
+ >>> ai = Scira(timeout=120) # Longer timeout
96
+ >>> ai = Scira(proxies={'http': 'http://proxy.com:8080'}) # With proxy
97
+ >>> ai = Scira(model="scira-claude") # Use Claude model
98
+ >>> ai = Scira(deepsearch=True) # Use deep search mode
99
+ """
100
+ # Validate model
101
+ if model not in self.AVAILABLE_MODELS:
102
+ raise ValueError(
103
+ f"Invalid model: {model}. Choose from: {list(self.AVAILABLE_MODELS.keys())}"
104
+ )
105
+
106
+ self.session = requests.Session()
107
+ self.api_endpoint = "https://scira.ai/api/search"
108
+ self.timeout = timeout
109
+ self.proxies = proxies
110
+ self.model = model
111
+
112
+ # Set group based on deepsearch parameter
113
+ self.group = "extreme" if deepsearch else "web"
114
+ self.last_response = {}
115
+
116
+ # Set headers
117
+ self.headers = {
118
+ "Content-Type": "application/json",
119
+ "Accept": "*/*",
120
+ "User-Agent": LitAgent().random()
121
+ }
122
+
123
+ self.session.headers.update(self.headers)
124
+
125
+ def search(
126
+ self,
127
+ prompt: str,
128
+ stream: bool = False,
129
+ raw: bool = False,
130
+ ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
131
+ """Search using the SCIRA API and get AI-generated responses.
132
+
133
+ This method sends a search query to SCIRA and returns the AI-generated response.
134
+ It supports both streaming and non-streaming modes, as well as raw response format.
135
+
136
+ Args:
137
+ prompt (str): The search query or prompt to send to the API.
138
+ stream (bool, optional): If True, yields response chunks as they arrive.
139
+ If False, returns complete response. Defaults to False.
140
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
141
+ If False, returns Response objects that convert to text automatically.
142
+ Defaults to False.
143
+
144
+ Returns:
145
+ Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
146
+ - If stream=False: Returns complete response
147
+ - If stream=True: Yields response chunks as they arrive
148
+
149
+ Raises:
150
+ exceptions.APIConnectionError: If there's an issue connecting to the API
151
+ exceptions.APIResponseError: If the API returns an error response
152
+
153
+ Example:
154
+ >>> ai = Scira()
155
+ >>> # Non-streaming example
156
+ >>> response = ai.search("What is Python?")
157
+ >>> print(response)
158
+ Python is a high-level programming language...
159
+
160
+ >>> # Streaming example
161
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
162
+ ... print(chunk, end="", flush=True)
163
+ Artificial Intelligence is...
164
+ """
165
+ # Create a unique message ID
166
+ message_id = str(uuid.uuid4()).replace("-", "")[:16]
167
+ self.user_id = str(uuid.uuid4()).replace("-", "")[:16]
168
+ # Prepare the payload
169
+ payload = {
170
+ "id": message_id,
171
+ "messages": [
172
+ {
173
+ "role": "user",
174
+ "content": prompt,
175
+ "parts": [
176
+ {
177
+ "type": "text",
178
+ "text": prompt
179
+ }
180
+ ]
181
+ }
182
+ ],
183
+ "model": self.model,
184
+ "group": self.group,
185
+ "user_id": self.user_id,
186
+ "timezone": "Asia/Calcutta"
187
+ }
188
+
189
+ try:
190
+ # Send the request
191
+ response = self.session.post(
192
+ self.api_endpoint,
193
+ headers=self.headers,
194
+ data=json.dumps(payload),
195
+ stream=True,
196
+ timeout=self.timeout,
197
+ proxies=self.proxies
198
+ )
199
+
200
+ # Check for successful response
201
+ if response.status_code != 200:
202
+ raise exceptions.APIResponseError(
203
+ f"API returned error status: {response.status_code}"
204
+ )
205
+
206
+ # Store the last response
207
+ self.last_response = {"status_code": response.status_code}
208
+
209
+ # Handle streaming response
210
+ if stream:
211
+ return self._handle_streaming_response(response, raw)
212
+
213
+ # Handle non-streaming response
214
+ return self._handle_non_streaming_response(response, raw)
215
+
216
+ except requests.RequestException as e:
217
+ raise exceptions.APIConnectionError(f"Error connecting to API: {str(e)}")
218
+
219
+ def _handle_streaming_response(
220
+ self,
221
+ response: requests.Response,
222
+ raw: bool
223
+ ) -> Generator[Union[Dict[str, str], Response], None, None]:
224
+ """Handle streaming response from the API.
225
+
226
+ Args:
227
+ response (requests.Response): The streaming response from the API
228
+ raw (bool): Whether to return raw response dictionaries
229
+
230
+ Yields:
231
+ Union[Dict[str, str], Response]: Response chunks as they arrive
232
+ """
233
+ for line in response.iter_lines():
234
+ if line:
235
+ try:
236
+ # Decode the line
237
+ decoded_line = line.decode("utf-8")
238
+
239
+ # Check if this is a line starting with "0:" (content)
240
+ if re.match(r'^0:', decoded_line):
241
+ # Extract the content after "0:"
242
+ content = re.sub(r'^0:', '', decoded_line)
243
+ # Remove surrounding quotes if present
244
+ content = re.sub(r'^"(.*)"$', r'\1', content)
245
+ # Replace escaped newlines with actual newlines
246
+ content = content.replace('\\n', '\n')
247
+
248
+ if raw:
249
+ yield {"text": content}
250
+ else:
251
+ yield Response(content)
252
+ except Exception:
253
+ # Skip lines that can't be processed
254
+ pass
255
+
256
+ def _handle_non_streaming_response(
257
+ self,
258
+ response: requests.Response,
259
+ raw: bool
260
+ ) -> Union[Dict[str, str], Response]:
261
+ """Handle non-streaming response from the API.
262
+
263
+ Args:
264
+ response (requests.Response): The response from the API
265
+ raw (bool): Whether to return raw response dictionary
266
+
267
+ Returns:
268
+ Union[Dict[str, str], Response]: Complete response
269
+ """
270
+ full_text = ""
271
+
272
+ for line in response.iter_lines():
273
+ if line:
274
+ try:
275
+ # Decode the line
276
+ decoded_line = line.decode("utf-8")
277
+
278
+ # Check if this is a line starting with "0:" (content)
279
+ if re.match(r'^0:', decoded_line):
280
+ # Extract the content after "0:"
281
+ content = re.sub(r'^0:', '', decoded_line)
282
+ # Remove surrounding quotes if present
283
+ content = re.sub(r'^"(.*)"$', r'\1', content)
284
+ # Replace escaped newlines with actual newlines
285
+ content = content.replace('\\n', '\n')
286
+ full_text += content
287
+ except Exception:
288
+ # Skip lines that can't be processed
289
+ pass
290
+
291
+ if raw:
292
+ return {"text": full_text}
293
+ else:
294
+ return Response(full_text)
295
+
296
+ @staticmethod
297
+ def clean_content(text: str) -> str:
298
+ """Clean the response content by removing unnecessary formatting.
299
+
300
+ Args:
301
+ text (str): The text to clean
302
+
303
+ Returns:
304
+ str: The cleaned text
305
+ """
306
+ # Remove any extra whitespace
307
+ cleaned_text = re.sub(r'\s+', ' ', text)
308
+ # Remove any trailing whitespace
309
+ cleaned_text = cleaned_text.strip()
310
+
311
+ return cleaned_text
312
+
313
+
314
+ if __name__ == "__main__":
315
+ from rich import print
316
+ ai = Scira()
317
+ user_query = input(">>> ")
318
+ response = ai.search(user_query, stream=True, raw=False)
319
+ for chunk in response:
320
+ print(chunk, end="", flush=True)
@@ -0,0 +1,281 @@
1
+ import requests
2
+ import json
3
+ import re
4
+ from typing import Dict, Optional, Generator, Union, Any
5
+
6
+ from webscout.AIbase import AISearch
7
+ from webscout import exceptions
8
+ from webscout.litagent import LitAgent
9
+
10
+
11
+ class Response:
12
+ """A wrapper class for webpilotai API responses.
13
+
14
+ This class automatically converts response objects to their text representation
15
+ when printed or converted to string.
16
+
17
+ Attributes:
18
+ text (str): The text content of the response
19
+
20
+ Example:
21
+ >>> response = Response("Hello, world!")
22
+ >>> print(response)
23
+ Hello, world!
24
+ >>> str(response)
25
+ 'Hello, world!'
26
+ """
27
+ def __init__(self, text: str):
28
+ self.text = text
29
+
30
+ def __str__(self):
31
+ return self.text
32
+
33
+ def __repr__(self):
34
+ return self.text
35
+
36
+
37
+ class webpilotai(AISearch):
38
+ """A class to interact with the webpilotai (WebPilot) AI search API.
39
+
40
+ webpilotai provides a web-based comprehensive search response interface that returns AI-generated
41
+ responses with source references and related questions. It supports both streaming and
42
+ non-streaming responses.
43
+
44
+ Basic Usage:
45
+ >>> from webscout import webpilotai
46
+ >>> ai = webpilotai()
47
+ >>> # Non-streaming example
48
+ >>> response = ai.search("What is Python?")
49
+ >>> print(response)
50
+ Python is a high-level programming language...
51
+
52
+ >>> # Streaming example
53
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
54
+ ... print(chunk, end="", flush=True)
55
+ Artificial Intelligence is...
56
+
57
+ >>> # Raw response format
58
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
59
+ ... print(chunk)
60
+ {'text': 'Hello'}
61
+ {'text': ' there!'}
62
+
63
+ Args:
64
+ timeout (int, optional): Request timeout in seconds. Defaults to 90.
65
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
66
+ """
67
+
68
+ def __init__(
69
+ self,
70
+ timeout: int = 90,
71
+ proxies: Optional[dict] = None,
72
+ ):
73
+ """Initialize the webpilotai API client.
74
+
75
+ Args:
76
+ timeout (int, optional): Request timeout in seconds. Defaults to 90.
77
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
78
+
79
+ Example:
80
+ >>> ai = webpilotai(timeout=120) # Longer timeout
81
+ >>> ai = webpilotai(proxies={'http': 'http://proxy.com:8080'}) # With proxy
82
+ """
83
+ self.session = requests.Session()
84
+ self.api_endpoint = "https://api.webpilotai.com/rupee/v1/search"
85
+ self.timeout = timeout
86
+ self.last_response = {}
87
+
88
+ # The 'Bearer null' is part of the API's expected headers
89
+ self.headers = {
90
+ 'Accept': 'application/json, text/plain, */*, text/event-stream',
91
+ 'Content-Type': 'application/json;charset=UTF-8',
92
+ 'Authorization': 'Bearer null',
93
+ 'Origin': 'https://www.webpilot.ai',
94
+ 'Referer': 'https://www.webpilot.ai/',
95
+ 'User-Agent': LitAgent().random(),
96
+ }
97
+
98
+ self.session.headers.update(self.headers)
99
+ self.proxies = proxies
100
+
101
+ def search(
102
+ self,
103
+ prompt: str,
104
+ stream: bool = False,
105
+ raw: bool = False,
106
+ ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
107
+ """Search using the webpilotai API and get AI-generated responses.
108
+
109
+ This method sends a search query to webpilotai and returns the AI-generated response.
110
+ It supports both streaming and non-streaming modes, as well as raw response format.
111
+
112
+ Args:
113
+ prompt (str): The search query or prompt to send to the API.
114
+ stream (bool, optional): If True, yields response chunks as they arrive.
115
+ If False, returns complete response. Defaults to False.
116
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
117
+ If False, returns Response objects that convert to text automatically.
118
+ Defaults to False.
119
+
120
+ Returns:
121
+ Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
122
+ - If stream=False: Returns complete response as Response object
123
+ - If stream=True: Yields response chunks as either Dict or Response objects
124
+
125
+ Raises:
126
+ APIConnectionError: If the API request fails
127
+
128
+ Examples:
129
+ Basic search:
130
+ >>> ai = webpilotai()
131
+ >>> response = ai.search("What is Python?")
132
+ >>> print(response)
133
+ Python is a programming language...
134
+
135
+ Streaming response:
136
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
137
+ ... print(chunk, end="")
138
+ Artificial Intelligence...
139
+
140
+ Raw response format:
141
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
142
+ ... print(chunk)
143
+ {'text': 'Hello'}
144
+ {'text': ' there!'}
145
+ """
146
+ payload = {
147
+ "q": prompt,
148
+ "threadId": "" # Empty for new search
149
+ }
150
+
151
+ def for_stream():
152
+ full_response_content = ""
153
+ current_event_name = None
154
+ current_data_buffer = []
155
+
156
+ try:
157
+ with self.session.post(
158
+ self.api_endpoint,
159
+ json=payload,
160
+ stream=True,
161
+ timeout=self.timeout,
162
+ proxies=self.proxies
163
+ ) as response:
164
+ if not response.ok:
165
+ raise exceptions.APIConnectionError(
166
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
167
+ )
168
+
169
+ # Process the stream line by line
170
+ for line in response.iter_lines(decode_unicode=True):
171
+ if not line: # Empty line indicates end of an event
172
+ if current_data_buffer:
173
+ # Process the completed event
174
+ full_data = "\n".join(current_data_buffer)
175
+ if current_event_name == "message":
176
+ try:
177
+ data_payload = json.loads(full_data)
178
+ # Check structure based on the API response
179
+ if data_payload.get('type') == 'data':
180
+ content_chunk = data_payload.get('data', {}).get('content', "")
181
+ if content_chunk:
182
+ full_response_content += content_chunk
183
+
184
+ # Yield the new content chunk
185
+ if raw:
186
+ yield {"text": content_chunk}
187
+ else:
188
+ yield Response(content_chunk)
189
+ except json.JSONDecodeError:
190
+ pass
191
+ except Exception as e:
192
+ # Handle exceptions gracefully in stream processing
193
+ pass
194
+
195
+ # Reset for the next event
196
+ current_event_name = None
197
+ current_data_buffer = []
198
+ continue
199
+
200
+ # Parse SSE fields
201
+ if line.startswith('event:'):
202
+ current_event_name = line[len('event:'):].strip()
203
+ elif line.startswith('data:'):
204
+ data_part = line[len('data:'):]
205
+ # Remove leading space if present (common in SSE)
206
+ if data_part.startswith(' '):
207
+ data_part = data_part[1:]
208
+ current_data_buffer.append(data_part)
209
+
210
+ # Process any remaining data in buffer if stream ended without blank line
211
+ if current_data_buffer and current_event_name == "message":
212
+ try:
213
+ full_data = "\n".join(current_data_buffer)
214
+ data_payload = json.loads(full_data)
215
+ if data_payload.get('type') == 'data':
216
+ content_chunk = data_payload.get('data', {}).get('content', "")
217
+ if content_chunk and len(content_chunk) > len(full_response_content):
218
+ delta = content_chunk[len(full_response_content):]
219
+ full_response_content += delta
220
+
221
+ if raw:
222
+ yield {"text": delta}
223
+ else:
224
+ yield Response(delta)
225
+ except (json.JSONDecodeError, Exception):
226
+ pass
227
+
228
+ except requests.exceptions.Timeout:
229
+ raise exceptions.APIConnectionError("Request timed out")
230
+ except requests.exceptions.RequestException as e:
231
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
232
+
233
+ def for_non_stream():
234
+ full_response = ""
235
+ for chunk in for_stream():
236
+ if raw:
237
+ yield chunk
238
+ else:
239
+ full_response += str(chunk)
240
+
241
+ if not raw:
242
+ # Format the response for better readability
243
+ formatted_response = self.format_response(full_response)
244
+ self.last_response = Response(formatted_response)
245
+ return self.last_response
246
+
247
+ return for_stream() if stream else for_non_stream()
248
+
249
+ @staticmethod
250
+ def format_response(text: str) -> str:
251
+ """Format the response text for better readability.
252
+
253
+ Args:
254
+ text (str): The raw response text
255
+
256
+ Returns:
257
+ str: Formatted text with improved structure
258
+ """
259
+ # Clean up formatting
260
+ # Remove excessive newlines
261
+ clean_text = re.sub(r'\n{3,}', '\n\n', text)
262
+
263
+ # Ensure consistent spacing around sections
264
+ clean_text = re.sub(r'([.!?])\s*\n\s*([A-Z])', r'\1\n\n\2', clean_text)
265
+
266
+ # Clean up any leftover HTML or markdown artifacts
267
+ clean_text = re.sub(r'<[^>]*>', '', clean_text)
268
+
269
+ # Remove trailing whitespace on each line
270
+ clean_text = '\n'.join(line.rstrip() for line in clean_text.split('\n'))
271
+
272
+ return clean_text.strip()
273
+
274
+
275
+ if __name__ == "__main__":
276
+ from rich import print
277
+
278
+ ai = webpilotai()
279
+ response = ai.search(input(">>> "), stream=True, raw=False)
280
+ for chunk in response:
281
+ print(chunk, end="", flush=True)