webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,436 +1,410 @@
1
- import aiohttp
2
- import asyncio
3
- import lxml.html
4
- import re
5
- import urllib.parse
6
- from markdownify import markdownify as md
7
- from typing import Dict, Optional, Generator, Union, AsyncIterator, Literal
8
-
9
- from webscout.AIbase import AISearch
10
- from webscout import exceptions
11
- from webscout.scout import Scout
12
-
13
-
14
- class Response:
15
- """A wrapper class for IAsk API responses.
16
-
17
- This class automatically converts response objects to their text representation
18
- when printed or converted to string.
19
-
20
- Attributes:
21
- text (str): The text content of the response
22
-
23
- Example:
24
- >>> response = Response("Hello, world!")
25
- >>> print(response)
26
- Hello, world!
27
- >>> str(response)
28
- 'Hello, world!'
29
- """
30
- def __init__(self, text: str):
31
- self.text = text
32
-
33
- def __str__(self):
34
- return self.text
35
-
36
- def __repr__(self):
37
- return self.text
38
-
39
-
40
- def cache_find(diff: Union[dict, list]) -> Optional[str]:
41
- """Find HTML content in a nested dictionary or list structure.
42
-
43
- Args:
44
- diff (Union[dict, list]): The nested structure to search
45
-
46
- Returns:
47
- Optional[str]: The found HTML content, or None if not found
48
- """
49
- values = diff if isinstance(diff, list) else diff.values()
50
- for value in values:
51
- if isinstance(value, (list, dict)):
52
- cache = cache_find(value)
53
- if cache:
54
- return cache
55
- if isinstance(value, str) and re.search(r"<p>.+?</p>", value):
56
- return md(value).strip()
57
-
58
- return None
59
-
60
-
61
- ModeType = Literal["question", "academic", "fast", "forums", "wiki", "advanced"]
62
- DetailLevelType = Literal["concise", "detailed", "comprehensive"]
63
-
64
-
65
- class IAsk(AISearch):
66
- """A class to interact with the IAsk AI search API.
67
-
68
- IAsk provides a powerful search interface that returns AI-generated responses
69
- based on web content. It supports both streaming and non-streaming responses,
70
- as well as different search modes and detail levels.
71
-
72
- Basic Usage:
73
- >>> from webscout import IAsk
74
- >>> ai = IAsk()
75
- >>> # Non-streaming example
76
- >>> response = ai.search("What is Python?")
77
- >>> print(response)
78
- Python is a high-level programming language...
79
-
80
- >>> # Streaming example
81
- >>> for chunk in ai.search("Tell me about AI", stream=True):
82
- ... print(chunk, end="", flush=True)
83
- Artificial Intelligence is...
84
-
85
- >>> # With specific mode and detail level
86
- >>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
87
- >>> print(response)
88
- Climate change refers to...
89
-
90
- Args:
91
- timeout (int, optional): Request timeout in seconds. Defaults to 30.
92
- proxies (dict, optional): Proxy configuration for requests. Defaults to None.
93
- mode (ModeType, optional): Default search mode. Defaults to "question".
94
- detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
95
- """
96
-
97
- def __init__(
98
- self,
99
- timeout: int = 30,
100
- proxies: Optional[dict] = None,
101
- mode: ModeType = "question",
102
- detail_level: Optional[DetailLevelType] = None,
103
- ):
104
- """Initialize the IAsk API client.
105
-
106
- Args:
107
- timeout (int, optional): Request timeout in seconds. Defaults to 30.
108
- proxies (dict, optional): Proxy configuration for requests. Defaults to None.
109
- mode (ModeType, optional): Default search mode. Defaults to "question".
110
- detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
111
- """
112
- self.timeout = timeout
113
- self.proxies = proxies or {}
114
- self.default_mode = mode
115
- self.default_detail_level = detail_level
116
- self.api_endpoint = "https://iask.ai/"
117
- self.last_response = {}
118
-
119
- def create_url(self, query: str, mode: ModeType = "question", detail_level: Optional[DetailLevelType] = None) -> str:
120
- """Create a properly formatted URL with mode and detail level parameters.
121
-
122
- Args:
123
- query (str): The search query.
124
- mode (ModeType, optional): Search mode. Defaults to "question".
125
- detail_level (DetailLevelType, optional): Detail level. Defaults to None.
126
-
127
- Returns:
128
- str: Formatted URL with query parameters.
129
-
130
- Example:
131
- >>> ai = IAsk()
132
- >>> url = ai.create_url("Climate change", mode="academic", detail_level="detailed")
133
- >>> print(url)
134
- https://iask.ai/?mode=academic&q=Climate+change&options%5Bdetail_level%5D=detailed
135
- """
136
- # Create a dictionary of parameters with flattened structure
137
- params = {
138
- "mode": mode,
139
- "q": query
140
- }
141
-
142
- # Add detail_level if provided using the flattened format
143
- if detail_level:
144
- params["options[detail_level]"] = detail_level
145
-
146
- # Encode the parameters and build the URL
147
- query_string = urllib.parse.urlencode(params)
148
- url = f"{self.api_endpoint}?{query_string}"
149
-
150
- return url
151
-
152
- def format_html(self, html_content: str) -> str:
153
- """Format HTML content into a more readable text format.
154
-
155
- Args:
156
- html_content (str): The HTML content to format.
157
-
158
- Returns:
159
- str: Formatted text.
160
- """
161
- scout = Scout(html_content, features='html.parser')
162
- output_lines = []
163
-
164
- for child in scout.find_all(['h1', 'h2', 'h3', 'p', 'ol', 'ul', 'div']):
165
- if child.name in ["h1", "h2", "h3"]:
166
- output_lines.append(f"\n**{child.get_text().strip()}**\n")
167
- elif child.name == "p":
168
- text = child.get_text().strip()
169
- text = re.sub(r"^According to Ask AI & Question AI www\.iAsk\.ai:\s*", "", text).strip()
170
- # Remove footnote markers
171
- text = re.sub(r'\[\d+\]\(#fn:\d+ \'see footnote\'\)', '', text)
172
- output_lines.append(text + "\n")
173
- elif child.name in ["ol", "ul"]:
174
- for li in child.find_all("li"):
175
- output_lines.append("- " + li.get_text().strip() + "\n")
176
- elif child.name == "div" and "footnotes" in child.get("class", []):
177
- output_lines.append("\n**Authoritative Sources**\n")
178
- for li in child.find_all("li"):
179
- link = li.find("a")
180
- if link:
181
- output_lines.append(f"- {link.get_text().strip()} ({link.get('href')})\n")
182
-
183
- return "".join(output_lines)
184
-
185
- def search(
186
- self,
187
- prompt: str,
188
- stream: bool = False,
189
- raw: bool = False,
190
- mode: Optional[ModeType] = None,
191
- detail_level: Optional[DetailLevelType] = None,
192
- ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
193
- """Search using the IAsk API and get AI-generated responses.
194
-
195
- This method sends a search query to IAsk and returns the AI-generated response.
196
- It supports both streaming and non-streaming modes, as well as raw response format.
197
-
198
- Args:
199
- prompt (str): The search query or prompt to send to the API.
200
- stream (bool, optional): If True, yields response chunks as they arrive.
201
- If False, returns complete response. Defaults to False.
202
- raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
203
- If False, returns Response objects that convert to text automatically.
204
- Defaults to False.
205
- mode (ModeType, optional): Search mode to use. Defaults to None (uses instance default).
206
- detail_level (DetailLevelType, optional): Detail level to use. Defaults to None (uses instance default).
207
-
208
- Returns:
209
- Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
210
- - If stream=False: Returns complete response as Response object
211
- - If stream=True: Yields response chunks as either Dict or Response objects
212
-
213
- Raises:
214
- APIConnectionError: If the API request fails
215
-
216
- Examples:
217
- Basic search:
218
- >>> ai = IAsk()
219
- >>> response = ai.search("What is Python?")
220
- >>> print(response)
221
- Python is a programming language...
222
-
223
- Streaming response:
224
- >>> for chunk in ai.search("Tell me about AI", stream=True):
225
- ... print(chunk, end="")
226
- Artificial Intelligence...
227
-
228
- Raw response format:
229
- >>> for chunk in ai.search("Hello", stream=True, raw=True):
230
- ... print(chunk)
231
- {'text': 'Hello'}
232
- {'text': ' there!'}
233
-
234
- With specific mode and detail level:
235
- >>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
236
- >>> print(response)
237
- Climate change refers to...
238
- """
239
- # Use provided parameters or fall back to instance defaults
240
- search_mode = mode or self.default_mode
241
- search_detail_level = detail_level or self.default_detail_level
242
-
243
- # For non-streaming, run the async search and return the complete response
244
- if not stream:
245
- # Create a new event loop for this request
246
- loop = asyncio.new_event_loop()
247
- asyncio.set_event_loop(loop)
248
- try:
249
- result = loop.run_until_complete(
250
- self._async_search(prompt, False, raw, search_mode, search_detail_level)
251
- )
252
- return result
253
- finally:
254
- loop.close()
255
-
256
- # For streaming, use a simpler approach with a single event loop
257
- # that stays open until the generator is exhausted
258
- buffer = ""
259
-
260
- def sync_generator():
261
- nonlocal buffer
262
- # Create a new event loop for this generator
263
- loop = asyncio.new_event_loop()
264
- asyncio.set_event_loop(loop)
265
-
266
- try:
267
- # Get the async generator
268
- async_gen_coro = self._async_search(prompt, True, raw, search_mode, search_detail_level)
269
- async_gen = loop.run_until_complete(async_gen_coro)
270
-
271
- # Process chunks one by one
272
- while True:
273
- try:
274
- # Get the next chunk
275
- chunk_coro = async_gen.__anext__()
276
- chunk = loop.run_until_complete(chunk_coro)
277
-
278
- # Update buffer and yield the chunk
279
- if isinstance(chunk, dict) and 'text' in chunk:
280
- buffer += chunk['text']
281
- elif isinstance(chunk, Response):
282
- buffer += chunk.text
283
- else:
284
- buffer += str(chunk)
285
-
286
- yield chunk
287
- except StopAsyncIteration:
288
- break
289
- except Exception as e:
290
- print(f"Error in generator: {e}")
291
- break
292
- finally:
293
- # Store the final response and close the loop
294
- self.last_response = {"text": buffer}
295
- loop.close()
296
-
297
- return sync_generator()
298
-
299
- async def _async_search(
300
- self,
301
- prompt: str,
302
- stream: bool = False,
303
- raw: bool = False,
304
- mode: ModeType = "question",
305
- detail_level: Optional[DetailLevelType] = None,
306
- ) -> Union[Response, AsyncIterator[Union[Dict[str, str], Response]]]:
307
- """Internal async implementation of the search method."""
308
-
309
- async def stream_generator() -> AsyncIterator[str]:
310
- async with aiohttp.ClientSession() as session:
311
- # Prepare parameters
312
- params = {"mode": mode, "q": prompt}
313
- if detail_level:
314
- params["options[detail_level]"] = detail_level
315
-
316
- try:
317
- async with session.get(
318
- self.api_endpoint,
319
- params=params,
320
- proxy=self.proxies.get('http') if self.proxies else None,
321
- timeout=self.timeout
322
- ) as response:
323
- if not response.ok:
324
- raise exceptions.APIConnectionError(
325
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {await response.text()}"
326
- )
327
-
328
- etree = lxml.html.fromstring(await response.text())
329
- phx_node = etree.xpath('//*[starts-with(@id, "phx-")]').pop()
330
- csrf_token = (
331
- etree.xpath('//*[@name="csrf-token"]').pop().get("content")
332
- )
333
-
334
- async with session.ws_connect(
335
- f"{self.api_endpoint}live/websocket",
336
- params={
337
- "_csrf_token": csrf_token,
338
- "vsn": "2.0.0",
339
- },
340
- proxy=self.proxies.get('http') if self.proxies else None,
341
- timeout=self.timeout
342
- ) as wsResponse:
343
- await wsResponse.send_json(
344
- [
345
- None,
346
- None,
347
- f"lv:{phx_node.get('id')}",
348
- "phx_join",
349
- {
350
- "params": {"_csrf_token": csrf_token},
351
- "url": str(response.url),
352
- "session": phx_node.get("data-phx-session"),
353
- },
354
- ]
355
- )
356
- while True:
357
- json_data = await wsResponse.receive_json()
358
- if not json_data:
359
- break
360
- diff: dict = json_data[4]
361
- try:
362
- chunk: str = diff["e"][0][1]["data"]
363
- # Check if the chunk contains HTML content
364
- if re.search(r"<[^>]+>", chunk):
365
- formatted_chunk = self.format_html(chunk)
366
- yield formatted_chunk
367
- else:
368
- yield chunk.replace("<br/>", "\n")
369
- except:
370
- cache = cache_find(diff)
371
- if cache:
372
- if diff.get("response", None):
373
- # Format the cache content if it contains HTML
374
- if re.search(r"<[^>]+>", cache):
375
- formatted_cache = self.format_html(cache)
376
- yield formatted_cache
377
- else:
378
- yield cache
379
- break
380
- except Exception as e:
381
- raise exceptions.APIConnectionError(f"Error connecting to IAsk API: {str(e)}")
382
-
383
- # For non-streaming, collect all chunks and return a single response
384
- if not stream:
385
- buffer = ""
386
- async for chunk in stream_generator():
387
- buffer += chunk
388
- self.last_response = {"text": buffer}
389
- return Response(buffer) if not raw else {"text": buffer}
390
-
391
- # For streaming, create an async generator that yields chunks
392
- async def process_stream():
393
- buffer = ""
394
- async for chunk in stream_generator():
395
- buffer += chunk
396
- if raw:
397
- yield {"text": chunk}
398
- else:
399
- yield Response(chunk)
400
- self.last_response = {"text": buffer}
401
-
402
- # Return the async generator
403
- return process_stream()
404
-
405
-
406
- if __name__ == "__main__":
407
- from rich import print
408
-
409
- ai = IAsk()
410
-
411
- # Example 1: Simple search with default mode
412
- print("\n[bold cyan]Example 1: Simple search with default mode[/bold cyan]")
413
- response = ai.search("What is Python?", stream=True)
414
- for chunk in response:
415
- print(chunk, end="", flush=True)
416
- print("\n\n[bold green]Response complete.[/bold green]\n")
417
-
418
- # Example 2: Search with academic mode
419
- print("\n[bold cyan]Example 2: Search with academic mode[/bold cyan]")
420
- response = ai.search("Quantum computing applications", mode="academic", stream=True)
421
- for chunk in response:
422
- print(chunk, end="", flush=True)
423
- print("\n\n[bold green]Response complete.[/bold green]\n")
424
-
425
- # Example 3: Search with advanced mode and detailed level
426
- print("\n[bold cyan]Example 3: Search with advanced mode and detailed level[/bold cyan]")
427
- response = ai.search("Climate change solutions", mode="advanced", detail_level="detailed", stream=True)
428
- for chunk in response:
429
- print(chunk, end="", flush=True)
430
- print("\n\n[bold green]Response complete.[/bold green]\n")
431
-
432
- # Example 4: Demonstrating the create_url method
433
- print("\n[bold cyan]Example 4: Generated URL for browser access[/bold cyan]")
434
- url = ai.create_url("Helpingai details", mode="question", detail_level="detailed")
435
- print(f"URL: {url}")
436
- print("This URL can be used directly in a browser or with other HTTP clients.")
1
+ import aiohttp
2
+ import asyncio
3
+ import lxml.html
4
+ import re
5
+ import urllib.parse
6
+ from markdownify import markdownify as md
7
+ from typing import Dict, Optional, Generator, Union, AsyncIterator, Literal
8
+
9
+ from webscout.AIbase import AISearch, SearchResponse
10
+ from webscout import exceptions
11
+ from webscout.scout import Scout
12
+
13
+
14
+ def cache_find(diff: Union[dict, list]) -> Optional[str]:
15
+ """Find HTML content in a nested dictionary or list structure.
16
+
17
+ Args:
18
+ diff (Union[dict, list]): The nested structure to search
19
+
20
+ Returns:
21
+ Optional[str]: The found HTML content, or None if not found
22
+ """
23
+ values = diff if isinstance(diff, list) else diff.values()
24
+ for value in values:
25
+ if isinstance(value, (list, dict)):
26
+ cache = cache_find(value)
27
+ if cache:
28
+ return cache
29
+ if isinstance(value, str) and re.search(r"<p>.+?</p>", value):
30
+ return md(value).strip()
31
+
32
+ return None
33
+
34
+
35
+ ModeType = Literal["question", "academic", "fast", "forums", "wiki", "advanced"]
36
+ DetailLevelType = Literal["concise", "detailed", "comprehensive"]
37
+
38
+
39
+ class IAsk(AISearch):
40
+ """A class to interact with the IAsk AI search API.
41
+
42
+ IAsk provides a powerful search interface that returns AI-generated responses
43
+ based on web content. It supports both streaming and non-streaming responses,
44
+ as well as different search modes and detail levels.
45
+
46
+ Basic Usage:
47
+ >>> from webscout import IAsk
48
+ >>> ai = IAsk()
49
+ >>> # Non-streaming example
50
+ >>> response = ai.search("What is Python?")
51
+ >>> print(response)
52
+ Python is a high-level programming language...
53
+
54
+ >>> # Streaming example
55
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
56
+ ... print(chunk, end="", flush=True)
57
+ Artificial Intelligence is...
58
+
59
+ >>> # With specific mode and detail level
60
+ >>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
61
+ >>> print(response)
62
+ Climate change refers to...
63
+
64
+ Args:
65
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
66
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
67
+ mode (ModeType, optional): Default search mode. Defaults to "question".
68
+ detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
69
+ """
70
+
71
+ def __init__(
72
+ self,
73
+ timeout: int = 30,
74
+ proxies: Optional[dict] = None,
75
+ mode: ModeType = "question",
76
+ detail_level: Optional[DetailLevelType] = None,
77
+ ):
78
+ """Initialize the IAsk API client.
79
+
80
+ Args:
81
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
82
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
83
+ mode (ModeType, optional): Default search mode. Defaults to "question".
84
+ detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
85
+ """
86
+ self.timeout = timeout
87
+ self.proxies = proxies or {}
88
+ self.default_mode = mode
89
+ self.default_detail_level = detail_level
90
+ self.api_endpoint = "https://iask.ai/"
91
+ self.last_response = {}
92
+
93
+ def create_url(self, query: str, mode: ModeType = "question", detail_level: Optional[DetailLevelType] = None) -> str:
94
+ """Create a properly formatted URL with mode and detail level parameters.
95
+
96
+ Args:
97
+ query (str): The search query.
98
+ mode (ModeType, optional): Search mode. Defaults to "question".
99
+ detail_level (DetailLevelType, optional): Detail level. Defaults to None.
100
+
101
+ Returns:
102
+ str: Formatted URL with query parameters.
103
+
104
+ Example:
105
+ >>> ai = IAsk()
106
+ >>> url = ai.create_url("Climate change", mode="academic", detail_level="detailed")
107
+ >>> print(url)
108
+ https://iask.ai/?mode=academic&q=Climate+change&options%5Bdetail_level%5D=detailed
109
+ """
110
+ # Create a dictionary of parameters with flattened structure
111
+ params = {
112
+ "mode": mode,
113
+ "q": query
114
+ }
115
+
116
+ # Add detail_level if provided using the flattened format
117
+ if detail_level:
118
+ params["options[detail_level]"] = detail_level
119
+
120
+ # Encode the parameters and build the URL
121
+ query_string = urllib.parse.urlencode(params)
122
+ url = f"{self.api_endpoint}?{query_string}"
123
+
124
+ return url
125
+
126
+ def format_html(self, html_content: str) -> str:
127
+ """Format HTML content into a more readable text format.
128
+
129
+ Args:
130
+ html_content (str): The HTML content to format.
131
+
132
+ Returns:
133
+ str: Formatted text.
134
+ """
135
+ scout = Scout(html_content, features='html.parser')
136
+ output_lines = []
137
+
138
+ for child in scout.find_all(['h1', 'h2', 'h3', 'p', 'ol', 'ul', 'div']):
139
+ if child.name in ["h1", "h2", "h3"]:
140
+ output_lines.append(f"\n**{child.get_text().strip()}**\n")
141
+ elif child.name == "p":
142
+ text = child.get_text().strip()
143
+ text = re.sub(r"^According to Ask AI & Question AI www\.iAsk\.ai:\s*", "", text).strip()
144
+ # Remove footnote markers
145
+ text = re.sub(r'\[\d+\]\(#fn:\d+ \'see footnote\'\)', '', text)
146
+ output_lines.append(text + "\n")
147
+ elif child.name in ["ol", "ul"]:
148
+ for li in child.find_all("li"):
149
+ output_lines.append("- " + li.get_text().strip() + "\n")
150
+ elif child.name == "div" and "footnotes" in child.get("class", []):
151
+ output_lines.append("\n**Authoritative Sources**\n")
152
+ for li in child.find_all("li"):
153
+ link = li.find("a")
154
+ if link:
155
+ output_lines.append(f"- {link.get_text().strip()} ({link.get('href')})\n")
156
+
157
+ return "".join(output_lines)
158
+
159
+ def search(
160
+ self,
161
+ prompt: str,
162
+ stream: bool = False,
163
+ raw: bool = False,
164
+ mode: Optional[ModeType] = None,
165
+ detail_level: Optional[DetailLevelType] = None,
166
+ ) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse], None, None]]:
167
+ """Search using the IAsk API and get AI-generated responses.
168
+
169
+ This method sends a search query to IAsk and returns the AI-generated response.
170
+ It supports both streaming and non-streaming modes, as well as raw response format.
171
+
172
+ Args:
173
+ prompt (str): The search query or prompt to send to the API.
174
+ stream (bool, optional): If True, yields response chunks as they arrive.
175
+ If False, returns complete response. Defaults to False.
176
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
177
+ If False, returns Response objects that convert to text automatically.
178
+ Defaults to False.
179
+ mode (ModeType, optional): Search mode to use. Defaults to None (uses instance default).
180
+ detail_level (DetailLevelType, optional): Detail level to use. Defaults to None (uses instance default).
181
+
182
+ Returns:
183
+ Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
184
+ - If stream=False: Returns complete response as Response object
185
+ - If stream=True: Yields response chunks as either Dict or Response objects
186
+
187
+ Raises:
188
+ APIConnectionError: If the API request fails
189
+
190
+ Examples:
191
+ Basic search:
192
+ >>> ai = IAsk()
193
+ >>> response = ai.search("What is Python?")
194
+ >>> print(response)
195
+ Python is a programming language...
196
+
197
+ Streaming response:
198
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
199
+ ... print(chunk, end="")
200
+ Artificial Intelligence...
201
+
202
+ Raw response format:
203
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
204
+ ... print(chunk)
205
+ {'text': 'Hello'}
206
+ {'text': ' there!'}
207
+
208
+ With specific mode and detail level:
209
+ >>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
210
+ >>> print(response)
211
+ Climate change refers to...
212
+ """
213
+ # Use provided parameters or fall back to instance defaults
214
+ search_mode = mode or self.default_mode
215
+ search_detail_level = detail_level or self.default_detail_level
216
+
217
+ # For non-streaming, run the async search and return the complete response
218
+ if not stream:
219
+ # Create a new event loop for this request
220
+ loop = asyncio.new_event_loop()
221
+ asyncio.set_event_loop(loop)
222
+ try:
223
+ result = loop.run_until_complete(
224
+ self._async_search(prompt, False, raw, search_mode, search_detail_level)
225
+ )
226
+ return result
227
+ finally:
228
+ loop.close()
229
+
230
+ # For streaming, use a simpler approach with a single event loop
231
+ # that stays open until the generator is exhausted
232
+ buffer = ""
233
+
234
+ def sync_generator():
235
+ nonlocal buffer
236
+ # Create a new event loop for this generator
237
+ loop = asyncio.new_event_loop()
238
+ asyncio.set_event_loop(loop)
239
+
240
+ try:
241
+ # Get the async generator
242
+ async_gen_coro = self._async_search(prompt, True, raw, search_mode, search_detail_level)
243
+ async_gen = loop.run_until_complete(async_gen_coro)
244
+
245
+ # Process chunks one by one
246
+ while True:
247
+ try:
248
+ # Get the next chunk
249
+ chunk_coro = async_gen.__anext__()
250
+ chunk = loop.run_until_complete(chunk_coro)
251
+
252
+ # Update buffer and yield the chunk
253
+ if isinstance(chunk, dict) and 'text' in chunk:
254
+ buffer += chunk['text']
255
+ elif isinstance(chunk, SearchResponse):
256
+ buffer += chunk.text
257
+ else:
258
+ buffer += str(chunk)
259
+
260
+ yield chunk
261
+ except StopAsyncIteration:
262
+ break
263
+ except Exception as e:
264
+ print(f"Error in generator: {e}")
265
+ break
266
+ finally:
267
+ # Store the final response and close the loop
268
+ self.last_response = {"text": buffer}
269
+ loop.close()
270
+
271
+ return sync_generator()
272
+
273
+ async def _async_search(
274
+ self,
275
+ prompt: str,
276
+ stream: bool = False,
277
+ raw: bool = False,
278
+ mode: ModeType = "question",
279
+ detail_level: Optional[DetailLevelType] = None,
280
+ ) -> Union[SearchResponse, AsyncIterator[Union[Dict[str, str], SearchResponse]]]:
281
+ """Internal async implementation of the search method."""
282
+
283
+ async def stream_generator() -> AsyncIterator[str]:
284
+ async with aiohttp.ClientSession() as session:
285
+ # Prepare parameters
286
+ params = {"mode": mode, "q": prompt}
287
+ if detail_level:
288
+ params["options[detail_level]"] = detail_level
289
+
290
+ try:
291
+ async with session.get(
292
+ self.api_endpoint,
293
+ params=params,
294
+ proxy=self.proxies.get('http') if self.proxies else None,
295
+ timeout=self.timeout
296
+ ) as response:
297
+ if not response.ok:
298
+ raise exceptions.APIConnectionError(
299
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {await response.text()}"
300
+ )
301
+
302
+ etree = lxml.html.fromstring(await response.text())
303
+ phx_node = etree.xpath('//*[starts-with(@id, "phx-")]').pop()
304
+ csrf_token = (
305
+ etree.xpath('//*[@name="csrf-token"]').pop().get("content")
306
+ )
307
+
308
+ async with session.ws_connect(
309
+ f"{self.api_endpoint}live/websocket",
310
+ params={
311
+ "_csrf_token": csrf_token,
312
+ "vsn": "2.0.0",
313
+ },
314
+ proxy=self.proxies.get('http') if self.proxies else None,
315
+ timeout=self.timeout
316
+ ) as wsResponse:
317
+ await wsResponse.send_json(
318
+ [
319
+ None,
320
+ None,
321
+ f"lv:{phx_node.get('id')}",
322
+ "phx_join",
323
+ {
324
+ "params": {"_csrf_token": csrf_token},
325
+ "url": str(response.url),
326
+ "session": phx_node.get("data-phx-session"),
327
+ },
328
+ ]
329
+ )
330
+ while True:
331
+ json_data = await wsResponse.receive_json()
332
+ if not json_data:
333
+ break
334
+ diff: dict = json_data[4]
335
+ try:
336
+ chunk: str = diff["e"][0][1]["data"]
337
+ # Check if the chunk contains HTML content
338
+ if re.search(r"<[^>]+>", chunk):
339
+ formatted_chunk = self.format_html(chunk)
340
+ yield formatted_chunk
341
+ else:
342
+ yield chunk.replace("<br/>", "\n")
343
+ except:
344
+ cache = cache_find(diff)
345
+ if cache:
346
+ if diff.get("response", None):
347
+ # Format the cache content if it contains HTML
348
+ if re.search(r"<[^>]+>", cache):
349
+ formatted_cache = self.format_html(cache)
350
+ yield formatted_cache
351
+ else:
352
+ yield cache
353
+ break
354
+ except Exception as e:
355
+ raise exceptions.APIConnectionError(f"Error connecting to IAsk API: {str(e)}")
356
+
357
+ # For non-streaming, collect all chunks and return a single response
358
+ if not stream:
359
+ buffer = ""
360
+ async for chunk in stream_generator():
361
+ buffer += chunk
362
+ self.last_response = {"text": buffer}
363
+ return SearchResponse(buffer) if not raw else {"text": buffer}
364
+
365
+ # For streaming, create an async generator that yields chunks
366
+ async def process_stream():
367
+ buffer = ""
368
+ async for chunk in stream_generator():
369
+ buffer += chunk
370
+ if raw:
371
+ yield {"text": chunk}
372
+ else:
373
+ yield SearchResponse(chunk)
374
+ self.last_response = {"text": buffer}
375
+
376
+ # Return the async generator
377
+ return process_stream()
378
+
379
+
380
+ if __name__ == "__main__":
381
+ from rich import print
382
+
383
+ ai = IAsk()
384
+
385
+ # Example 1: Simple search with default mode
386
+ print("\n[bold cyan]Example 1: Simple search with default mode[/bold cyan]")
387
+ response = ai.search("What is Python?", stream=True)
388
+ for chunk in response:
389
+ print(chunk, end="", flush=True)
390
+ print("\n\n[bold green]Response complete.[/bold green]\n")
391
+
392
+ # Example 2: Search with academic mode
393
+ print("\n[bold cyan]Example 2: Search with academic mode[/bold cyan]")
394
+ response = ai.search("Quantum computing applications", mode="academic", stream=True)
395
+ for chunk in response:
396
+ print(chunk, end="", flush=True)
397
+ print("\n\n[bold green]Response complete.[/bold green]\n")
398
+
399
+ # Example 3: Search with advanced mode and detailed level
400
+ print("\n[bold cyan]Example 3: Search with advanced mode and detailed level[/bold cyan]")
401
+ response = ai.search("Climate change solutions", mode="advanced", detail_level="detailed", stream=True)
402
+ for chunk in response:
403
+ print(chunk, end="", flush=True)
404
+ print("\n\n[bold green]Response complete.[/bold green]\n")
405
+
406
+ # Example 4: Demonstrating the create_url method
407
+ print("\n[bold cyan]Example 4: Generated URL for browser access[/bold cyan]")
408
+ url = ai.create_url("Helpingai details", mode="question", detail_level="detailed")
409
+ print(f"URL: {url}")
410
+ print("This URL can be used directly in a browser or with other HTTP clients.")