webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (306) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIbase.py +144 -7
  3. webscout/AIutel.py +249 -131
  4. webscout/Bard.py +579 -206
  5. webscout/DWEBS.py +78 -35
  6. webscout/__init__.py +0 -1
  7. webscout/cli.py +256 -0
  8. webscout/conversation.py +307 -436
  9. webscout/exceptions.py +23 -0
  10. webscout/prompt_manager.py +56 -42
  11. webscout/version.py +1 -1
  12. webscout/webscout_search.py +65 -47
  13. webscout/webscout_search_async.py +81 -126
  14. webscout/yep_search.py +93 -43
  15. {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
  16. webscout-8.2.7.dist-info/RECORD +26 -0
  17. {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
  18. webscout-8.2.7.dist-info/entry_points.txt +3 -0
  19. webscout-8.2.7.dist-info/top_level.txt +1 -0
  20. inferno/__init__.py +0 -6
  21. inferno/__main__.py +0 -9
  22. inferno/cli.py +0 -6
  23. webscout/Extra/GitToolkit/__init__.py +0 -10
  24. webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
  25. webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
  26. webscout/Extra/GitToolkit/gitapi/user.py +0 -96
  27. webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
  28. webscout/Extra/YTToolkit/YTdownloader.py +0 -957
  29. webscout/Extra/YTToolkit/__init__.py +0 -3
  30. webscout/Extra/YTToolkit/transcriber.py +0 -476
  31. webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
  32. webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
  33. webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
  34. webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
  35. webscout/Extra/YTToolkit/ytapi/https.py +0 -88
  36. webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
  37. webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
  38. webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
  39. webscout/Extra/YTToolkit/ytapi/query.py +0 -40
  40. webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
  41. webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
  42. webscout/Extra/YTToolkit/ytapi/video.py +0 -232
  43. webscout/Extra/__init__.py +0 -7
  44. webscout/Extra/autocoder/__init__.py +0 -9
  45. webscout/Extra/autocoder/autocoder.py +0 -849
  46. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  47. webscout/Extra/gguf.py +0 -682
  48. webscout/Extra/tempmail/__init__.py +0 -28
  49. webscout/Extra/tempmail/async_utils.py +0 -141
  50. webscout/Extra/tempmail/base.py +0 -161
  51. webscout/Extra/tempmail/cli.py +0 -187
  52. webscout/Extra/tempmail/emailnator.py +0 -84
  53. webscout/Extra/tempmail/mail_tm.py +0 -361
  54. webscout/Extra/tempmail/temp_mail_io.py +0 -292
  55. webscout/Extra/weather.py +0 -194
  56. webscout/Extra/weather_ascii.py +0 -76
  57. webscout/LLM.py +0 -442
  58. webscout/Litlogger/__init__.py +0 -67
  59. webscout/Litlogger/core/__init__.py +0 -6
  60. webscout/Litlogger/core/level.py +0 -23
  61. webscout/Litlogger/core/logger.py +0 -165
  62. webscout/Litlogger/handlers/__init__.py +0 -12
  63. webscout/Litlogger/handlers/console.py +0 -33
  64. webscout/Litlogger/handlers/file.py +0 -143
  65. webscout/Litlogger/handlers/network.py +0 -173
  66. webscout/Litlogger/styles/__init__.py +0 -7
  67. webscout/Litlogger/styles/colors.py +0 -249
  68. webscout/Litlogger/styles/formats.py +0 -458
  69. webscout/Litlogger/styles/text.py +0 -87
  70. webscout/Litlogger/utils/__init__.py +0 -6
  71. webscout/Litlogger/utils/detectors.py +0 -153
  72. webscout/Litlogger/utils/formatters.py +0 -200
  73. webscout/Local/__init__.py +0 -12
  74. webscout/Local/__main__.py +0 -9
  75. webscout/Local/api.py +0 -576
  76. webscout/Local/cli.py +0 -516
  77. webscout/Local/config.py +0 -75
  78. webscout/Local/llm.py +0 -287
  79. webscout/Local/model_manager.py +0 -253
  80. webscout/Local/server.py +0 -721
  81. webscout/Local/utils.py +0 -93
  82. webscout/Provider/AI21.py +0 -177
  83. webscout/Provider/AISEARCH/DeepFind.py +0 -250
  84. webscout/Provider/AISEARCH/ISou.py +0 -256
  85. webscout/Provider/AISEARCH/Perplexity.py +0 -359
  86. webscout/Provider/AISEARCH/__init__.py +0 -10
  87. webscout/Provider/AISEARCH/felo_search.py +0 -228
  88. webscout/Provider/AISEARCH/genspark_search.py +0 -208
  89. webscout/Provider/AISEARCH/hika_search.py +0 -194
  90. webscout/Provider/AISEARCH/iask_search.py +0 -436
  91. webscout/Provider/AISEARCH/monica_search.py +0 -246
  92. webscout/Provider/AISEARCH/scira_search.py +0 -324
  93. webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
  94. webscout/Provider/Aitopia.py +0 -292
  95. webscout/Provider/AllenAI.py +0 -413
  96. webscout/Provider/Andi.py +0 -228
  97. webscout/Provider/Blackboxai.py +0 -229
  98. webscout/Provider/C4ai.py +0 -432
  99. webscout/Provider/ChatGPTClone.py +0 -226
  100. webscout/Provider/ChatGPTES.py +0 -237
  101. webscout/Provider/ChatGPTGratis.py +0 -194
  102. webscout/Provider/Chatify.py +0 -175
  103. webscout/Provider/Cloudflare.py +0 -273
  104. webscout/Provider/Cohere.py +0 -208
  105. webscout/Provider/DeepSeek.py +0 -196
  106. webscout/Provider/Deepinfra.py +0 -297
  107. webscout/Provider/ElectronHub.py +0 -709
  108. webscout/Provider/ExaAI.py +0 -261
  109. webscout/Provider/ExaChat.py +0 -342
  110. webscout/Provider/Free2GPT.py +0 -241
  111. webscout/Provider/GPTWeb.py +0 -193
  112. webscout/Provider/Gemini.py +0 -169
  113. webscout/Provider/GithubChat.py +0 -367
  114. webscout/Provider/Glider.py +0 -211
  115. webscout/Provider/Groq.py +0 -670
  116. webscout/Provider/HF_space/__init__.py +0 -0
  117. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  118. webscout/Provider/HeckAI.py +0 -233
  119. webscout/Provider/HuggingFaceChat.py +0 -462
  120. webscout/Provider/Hunyuan.py +0 -272
  121. webscout/Provider/Jadve.py +0 -266
  122. webscout/Provider/Koboldai.py +0 -381
  123. webscout/Provider/LambdaChat.py +0 -392
  124. webscout/Provider/Llama.py +0 -200
  125. webscout/Provider/Llama3.py +0 -204
  126. webscout/Provider/Marcus.py +0 -148
  127. webscout/Provider/Netwrck.py +0 -228
  128. webscout/Provider/OLLAMA.py +0 -396
  129. webscout/Provider/OPENAI/__init__.py +0 -25
  130. webscout/Provider/OPENAI/base.py +0 -46
  131. webscout/Provider/OPENAI/c4ai.py +0 -367
  132. webscout/Provider/OPENAI/chatgpt.py +0 -549
  133. webscout/Provider/OPENAI/chatgptclone.py +0 -460
  134. webscout/Provider/OPENAI/deepinfra.py +0 -272
  135. webscout/Provider/OPENAI/e2b.py +0 -1350
  136. webscout/Provider/OPENAI/exaai.py +0 -404
  137. webscout/Provider/OPENAI/exachat.py +0 -433
  138. webscout/Provider/OPENAI/freeaichat.py +0 -352
  139. webscout/Provider/OPENAI/glider.py +0 -316
  140. webscout/Provider/OPENAI/heckai.py +0 -337
  141. webscout/Provider/OPENAI/llmchatco.py +0 -327
  142. webscout/Provider/OPENAI/netwrck.py +0 -348
  143. webscout/Provider/OPENAI/opkfc.py +0 -488
  144. webscout/Provider/OPENAI/scirachat.py +0 -463
  145. webscout/Provider/OPENAI/sonus.py +0 -294
  146. webscout/Provider/OPENAI/standardinput.py +0 -425
  147. webscout/Provider/OPENAI/textpollinations.py +0 -285
  148. webscout/Provider/OPENAI/toolbaz.py +0 -405
  149. webscout/Provider/OPENAI/typegpt.py +0 -346
  150. webscout/Provider/OPENAI/uncovrAI.py +0 -455
  151. webscout/Provider/OPENAI/utils.py +0 -211
  152. webscout/Provider/OPENAI/venice.py +0 -413
  153. webscout/Provider/OPENAI/wisecat.py +0 -381
  154. webscout/Provider/OPENAI/writecream.py +0 -156
  155. webscout/Provider/OPENAI/x0gpt.py +0 -371
  156. webscout/Provider/OPENAI/yep.py +0 -327
  157. webscout/Provider/OpenGPT.py +0 -199
  158. webscout/Provider/Openai.py +0 -496
  159. webscout/Provider/PI.py +0 -344
  160. webscout/Provider/Perplexitylabs.py +0 -415
  161. webscout/Provider/Phind.py +0 -535
  162. webscout/Provider/PizzaGPT.py +0 -198
  163. webscout/Provider/QwenLM.py +0 -254
  164. webscout/Provider/Reka.py +0 -214
  165. webscout/Provider/StandardInput.py +0 -278
  166. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  167. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  168. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  169. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  170. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  171. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  172. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  173. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  174. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  175. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  176. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  177. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  178. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  179. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  180. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  181. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  182. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  183. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  184. webscout/Provider/TTI/__init__.py +0 -12
  185. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  186. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  187. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  188. webscout/Provider/TTI/artbit/__init__.py +0 -22
  189. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  190. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  191. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  192. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  193. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  194. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  195. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  196. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  197. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  198. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  199. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  200. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  201. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  202. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  203. webscout/Provider/TTI/talkai/__init__.py +0 -4
  204. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  205. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  206. webscout/Provider/TTS/__init__.py +0 -7
  207. webscout/Provider/TTS/deepgram.py +0 -156
  208. webscout/Provider/TTS/elevenlabs.py +0 -111
  209. webscout/Provider/TTS/gesserit.py +0 -127
  210. webscout/Provider/TTS/murfai.py +0 -113
  211. webscout/Provider/TTS/parler.py +0 -111
  212. webscout/Provider/TTS/speechma.py +0 -180
  213. webscout/Provider/TTS/streamElements.py +0 -333
  214. webscout/Provider/TTS/utils.py +0 -280
  215. webscout/Provider/TeachAnything.py +0 -187
  216. webscout/Provider/TextPollinationsAI.py +0 -231
  217. webscout/Provider/TwoAI.py +0 -199
  218. webscout/Provider/Venice.py +0 -219
  219. webscout/Provider/VercelAI.py +0 -234
  220. webscout/Provider/WebSim.py +0 -228
  221. webscout/Provider/WiseCat.py +0 -196
  222. webscout/Provider/Writecream.py +0 -211
  223. webscout/Provider/WritingMate.py +0 -197
  224. webscout/Provider/Youchat.py +0 -330
  225. webscout/Provider/__init__.py +0 -198
  226. webscout/Provider/ai4chat.py +0 -202
  227. webscout/Provider/aimathgpt.py +0 -189
  228. webscout/Provider/akashgpt.py +0 -342
  229. webscout/Provider/askmyai.py +0 -158
  230. webscout/Provider/asksteve.py +0 -203
  231. webscout/Provider/bagoodex.py +0 -145
  232. webscout/Provider/cerebras.py +0 -242
  233. webscout/Provider/chatglm.py +0 -205
  234. webscout/Provider/cleeai.py +0 -213
  235. webscout/Provider/copilot.py +0 -428
  236. webscout/Provider/elmo.py +0 -234
  237. webscout/Provider/freeaichat.py +0 -271
  238. webscout/Provider/gaurish.py +0 -244
  239. webscout/Provider/geminiapi.py +0 -208
  240. webscout/Provider/geminiprorealtime.py +0 -160
  241. webscout/Provider/granite.py +0 -187
  242. webscout/Provider/hermes.py +0 -219
  243. webscout/Provider/julius.py +0 -223
  244. webscout/Provider/koala.py +0 -268
  245. webscout/Provider/labyrinth.py +0 -340
  246. webscout/Provider/learnfastai.py +0 -266
  247. webscout/Provider/lepton.py +0 -194
  248. webscout/Provider/llama3mitril.py +0 -180
  249. webscout/Provider/llamatutor.py +0 -192
  250. webscout/Provider/llmchat.py +0 -213
  251. webscout/Provider/llmchatco.py +0 -311
  252. webscout/Provider/meta.py +0 -794
  253. webscout/Provider/multichat.py +0 -325
  254. webscout/Provider/promptrefine.py +0 -193
  255. webscout/Provider/scira_chat.py +0 -277
  256. webscout/Provider/scnet.py +0 -187
  257. webscout/Provider/searchchat.py +0 -293
  258. webscout/Provider/sonus.py +0 -208
  259. webscout/Provider/talkai.py +0 -194
  260. webscout/Provider/toolbaz.py +0 -320
  261. webscout/Provider/turboseek.py +0 -219
  262. webscout/Provider/tutorai.py +0 -252
  263. webscout/Provider/typefully.py +0 -280
  264. webscout/Provider/typegpt.py +0 -232
  265. webscout/Provider/uncovr.py +0 -312
  266. webscout/Provider/x0gpt.py +0 -256
  267. webscout/Provider/yep.py +0 -376
  268. webscout/litagent/__init__.py +0 -29
  269. webscout/litagent/agent.py +0 -455
  270. webscout/litagent/constants.py +0 -60
  271. webscout/litprinter/__init__.py +0 -59
  272. webscout/scout/__init__.py +0 -8
  273. webscout/scout/core/__init__.py +0 -7
  274. webscout/scout/core/crawler.py +0 -140
  275. webscout/scout/core/scout.py +0 -568
  276. webscout/scout/core/search_result.py +0 -96
  277. webscout/scout/core/text_analyzer.py +0 -63
  278. webscout/scout/core/text_utils.py +0 -277
  279. webscout/scout/core/web_analyzer.py +0 -52
  280. webscout/scout/core.py +0 -881
  281. webscout/scout/element.py +0 -460
  282. webscout/scout/parsers/__init__.py +0 -69
  283. webscout/scout/parsers/html5lib_parser.py +0 -172
  284. webscout/scout/parsers/html_parser.py +0 -236
  285. webscout/scout/parsers/lxml_parser.py +0 -178
  286. webscout/scout/utils.py +0 -37
  287. webscout/swiftcli/__init__.py +0 -809
  288. webscout/zeroart/__init__.py +0 -55
  289. webscout/zeroart/base.py +0 -60
  290. webscout/zeroart/effects.py +0 -99
  291. webscout/zeroart/fonts.py +0 -816
  292. webscout-8.2.2.dist-info/RECORD +0 -309
  293. webscout-8.2.2.dist-info/entry_points.txt +0 -5
  294. webscout-8.2.2.dist-info/top_level.txt +0 -3
  295. webstoken/__init__.py +0 -30
  296. webstoken/classifier.py +0 -189
  297. webstoken/keywords.py +0 -216
  298. webstoken/language.py +0 -128
  299. webstoken/ner.py +0 -164
  300. webstoken/normalizer.py +0 -35
  301. webstoken/processor.py +0 -77
  302. webstoken/sentiment.py +0 -206
  303. webstoken/stemmer.py +0 -73
  304. webstoken/tagger.py +0 -60
  305. webstoken/tokenizer.py +0 -158
  306. {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,436 +0,0 @@
1
- import aiohttp
2
- import asyncio
3
- import lxml.html
4
- import re
5
- import urllib.parse
6
- from markdownify import markdownify as md
7
- from typing import Dict, Optional, Generator, Union, AsyncIterator, Literal
8
-
9
- from webscout.AIbase import AISearch
10
- from webscout import exceptions
11
- from webscout.scout import Scout
12
-
13
-
14
- class Response:
15
- """A wrapper class for IAsk API responses.
16
-
17
- This class automatically converts response objects to their text representation
18
- when printed or converted to string.
19
-
20
- Attributes:
21
- text (str): The text content of the response
22
-
23
- Example:
24
- >>> response = Response("Hello, world!")
25
- >>> print(response)
26
- Hello, world!
27
- >>> str(response)
28
- 'Hello, world!'
29
- """
30
- def __init__(self, text: str):
31
- self.text = text
32
-
33
- def __str__(self):
34
- return self.text
35
-
36
- def __repr__(self):
37
- return self.text
38
-
39
-
40
- def cache_find(diff: Union[dict, list]) -> Optional[str]:
41
- """Find HTML content in a nested dictionary or list structure.
42
-
43
- Args:
44
- diff (Union[dict, list]): The nested structure to search
45
-
46
- Returns:
47
- Optional[str]: The found HTML content, or None if not found
48
- """
49
- values = diff if isinstance(diff, list) else diff.values()
50
- for value in values:
51
- if isinstance(value, (list, dict)):
52
- cache = cache_find(value)
53
- if cache:
54
- return cache
55
- if isinstance(value, str) and re.search(r"<p>.+?</p>", value):
56
- return md(value).strip()
57
-
58
- return None
59
-
60
-
61
- ModeType = Literal["question", "academic", "fast", "forums", "wiki", "advanced"]
62
- DetailLevelType = Literal["concise", "detailed", "comprehensive"]
63
-
64
-
65
- class IAsk(AISearch):
66
- """A class to interact with the IAsk AI search API.
67
-
68
- IAsk provides a powerful search interface that returns AI-generated responses
69
- based on web content. It supports both streaming and non-streaming responses,
70
- as well as different search modes and detail levels.
71
-
72
- Basic Usage:
73
- >>> from webscout import IAsk
74
- >>> ai = IAsk()
75
- >>> # Non-streaming example
76
- >>> response = ai.search("What is Python?")
77
- >>> print(response)
78
- Python is a high-level programming language...
79
-
80
- >>> # Streaming example
81
- >>> for chunk in ai.search("Tell me about AI", stream=True):
82
- ... print(chunk, end="", flush=True)
83
- Artificial Intelligence is...
84
-
85
- >>> # With specific mode and detail level
86
- >>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
87
- >>> print(response)
88
- Climate change refers to...
89
-
90
- Args:
91
- timeout (int, optional): Request timeout in seconds. Defaults to 30.
92
- proxies (dict, optional): Proxy configuration for requests. Defaults to None.
93
- mode (ModeType, optional): Default search mode. Defaults to "question".
94
- detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
95
- """
96
-
97
- def __init__(
98
- self,
99
- timeout: int = 30,
100
- proxies: Optional[dict] = None,
101
- mode: ModeType = "question",
102
- detail_level: Optional[DetailLevelType] = None,
103
- ):
104
- """Initialize the IAsk API client.
105
-
106
- Args:
107
- timeout (int, optional): Request timeout in seconds. Defaults to 30.
108
- proxies (dict, optional): Proxy configuration for requests. Defaults to None.
109
- mode (ModeType, optional): Default search mode. Defaults to "question".
110
- detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
111
- """
112
- self.timeout = timeout
113
- self.proxies = proxies or {}
114
- self.default_mode = mode
115
- self.default_detail_level = detail_level
116
- self.api_endpoint = "https://iask.ai/"
117
- self.last_response = {}
118
-
119
- def create_url(self, query: str, mode: ModeType = "question", detail_level: Optional[DetailLevelType] = None) -> str:
120
- """Create a properly formatted URL with mode and detail level parameters.
121
-
122
- Args:
123
- query (str): The search query.
124
- mode (ModeType, optional): Search mode. Defaults to "question".
125
- detail_level (DetailLevelType, optional): Detail level. Defaults to None.
126
-
127
- Returns:
128
- str: Formatted URL with query parameters.
129
-
130
- Example:
131
- >>> ai = IAsk()
132
- >>> url = ai.create_url("Climate change", mode="academic", detail_level="detailed")
133
- >>> print(url)
134
- https://iask.ai/?mode=academic&q=Climate+change&options%5Bdetail_level%5D=detailed
135
- """
136
- # Create a dictionary of parameters with flattened structure
137
- params = {
138
- "mode": mode,
139
- "q": query
140
- }
141
-
142
- # Add detail_level if provided using the flattened format
143
- if detail_level:
144
- params["options[detail_level]"] = detail_level
145
-
146
- # Encode the parameters and build the URL
147
- query_string = urllib.parse.urlencode(params)
148
- url = f"{self.api_endpoint}?{query_string}"
149
-
150
- return url
151
-
152
- def format_html(self, html_content: str) -> str:
153
- """Format HTML content into a more readable text format.
154
-
155
- Args:
156
- html_content (str): The HTML content to format.
157
-
158
- Returns:
159
- str: Formatted text.
160
- """
161
- scout = Scout(html_content, features='html.parser')
162
- output_lines = []
163
-
164
- for child in scout.find_all(['h1', 'h2', 'h3', 'p', 'ol', 'ul', 'div']):
165
- if child.name in ["h1", "h2", "h3"]:
166
- output_lines.append(f"\n**{child.get_text().strip()}**\n")
167
- elif child.name == "p":
168
- text = child.get_text().strip()
169
- text = re.sub(r"^According to Ask AI & Question AI www\.iAsk\.ai:\s*", "", text).strip()
170
- # Remove footnote markers
171
- text = re.sub(r'\[\d+\]\(#fn:\d+ \'see footnote\'\)', '', text)
172
- output_lines.append(text + "\n")
173
- elif child.name in ["ol", "ul"]:
174
- for li in child.find_all("li"):
175
- output_lines.append("- " + li.get_text().strip() + "\n")
176
- elif child.name == "div" and "footnotes" in child.get("class", []):
177
- output_lines.append("\n**Authoritative Sources**\n")
178
- for li in child.find_all("li"):
179
- link = li.find("a")
180
- if link:
181
- output_lines.append(f"- {link.get_text().strip()} ({link.get('href')})\n")
182
-
183
- return "".join(output_lines)
184
-
185
- def search(
186
- self,
187
- prompt: str,
188
- stream: bool = False,
189
- raw: bool = False,
190
- mode: Optional[ModeType] = None,
191
- detail_level: Optional[DetailLevelType] = None,
192
- ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
193
- """Search using the IAsk API and get AI-generated responses.
194
-
195
- This method sends a search query to IAsk and returns the AI-generated response.
196
- It supports both streaming and non-streaming modes, as well as raw response format.
197
-
198
- Args:
199
- prompt (str): The search query or prompt to send to the API.
200
- stream (bool, optional): If True, yields response chunks as they arrive.
201
- If False, returns complete response. Defaults to False.
202
- raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
203
- If False, returns Response objects that convert to text automatically.
204
- Defaults to False.
205
- mode (ModeType, optional): Search mode to use. Defaults to None (uses instance default).
206
- detail_level (DetailLevelType, optional): Detail level to use. Defaults to None (uses instance default).
207
-
208
- Returns:
209
- Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
210
- - If stream=False: Returns complete response as Response object
211
- - If stream=True: Yields response chunks as either Dict or Response objects
212
-
213
- Raises:
214
- APIConnectionError: If the API request fails
215
-
216
- Examples:
217
- Basic search:
218
- >>> ai = IAsk()
219
- >>> response = ai.search("What is Python?")
220
- >>> print(response)
221
- Python is a programming language...
222
-
223
- Streaming response:
224
- >>> for chunk in ai.search("Tell me about AI", stream=True):
225
- ... print(chunk, end="")
226
- Artificial Intelligence...
227
-
228
- Raw response format:
229
- >>> for chunk in ai.search("Hello", stream=True, raw=True):
230
- ... print(chunk)
231
- {'text': 'Hello'}
232
- {'text': ' there!'}
233
-
234
- With specific mode and detail level:
235
- >>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
236
- >>> print(response)
237
- Climate change refers to...
238
- """
239
- # Use provided parameters or fall back to instance defaults
240
- search_mode = mode or self.default_mode
241
- search_detail_level = detail_level or self.default_detail_level
242
-
243
- # For non-streaming, run the async search and return the complete response
244
- if not stream:
245
- # Create a new event loop for this request
246
- loop = asyncio.new_event_loop()
247
- asyncio.set_event_loop(loop)
248
- try:
249
- result = loop.run_until_complete(
250
- self._async_search(prompt, False, raw, search_mode, search_detail_level)
251
- )
252
- return result
253
- finally:
254
- loop.close()
255
-
256
- # For streaming, use a simpler approach with a single event loop
257
- # that stays open until the generator is exhausted
258
- buffer = ""
259
-
260
- def sync_generator():
261
- nonlocal buffer
262
- # Create a new event loop for this generator
263
- loop = asyncio.new_event_loop()
264
- asyncio.set_event_loop(loop)
265
-
266
- try:
267
- # Get the async generator
268
- async_gen_coro = self._async_search(prompt, True, raw, search_mode, search_detail_level)
269
- async_gen = loop.run_until_complete(async_gen_coro)
270
-
271
- # Process chunks one by one
272
- while True:
273
- try:
274
- # Get the next chunk
275
- chunk_coro = async_gen.__anext__()
276
- chunk = loop.run_until_complete(chunk_coro)
277
-
278
- # Update buffer and yield the chunk
279
- if isinstance(chunk, dict) and 'text' in chunk:
280
- buffer += chunk['text']
281
- elif isinstance(chunk, Response):
282
- buffer += chunk.text
283
- else:
284
- buffer += str(chunk)
285
-
286
- yield chunk
287
- except StopAsyncIteration:
288
- break
289
- except Exception as e:
290
- print(f"Error in generator: {e}")
291
- break
292
- finally:
293
- # Store the final response and close the loop
294
- self.last_response = {"text": buffer}
295
- loop.close()
296
-
297
- return sync_generator()
298
-
299
- async def _async_search(
300
- self,
301
- prompt: str,
302
- stream: bool = False,
303
- raw: bool = False,
304
- mode: ModeType = "question",
305
- detail_level: Optional[DetailLevelType] = None,
306
- ) -> Union[Response, AsyncIterator[Union[Dict[str, str], Response]]]:
307
- """Internal async implementation of the search method."""
308
-
309
- async def stream_generator() -> AsyncIterator[str]:
310
- async with aiohttp.ClientSession() as session:
311
- # Prepare parameters
312
- params = {"mode": mode, "q": prompt}
313
- if detail_level:
314
- params["options[detail_level]"] = detail_level
315
-
316
- try:
317
- async with session.get(
318
- self.api_endpoint,
319
- params=params,
320
- proxy=self.proxies.get('http') if self.proxies else None,
321
- timeout=self.timeout
322
- ) as response:
323
- if not response.ok:
324
- raise exceptions.APIConnectionError(
325
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {await response.text()}"
326
- )
327
-
328
- etree = lxml.html.fromstring(await response.text())
329
- phx_node = etree.xpath('//*[starts-with(@id, "phx-")]').pop()
330
- csrf_token = (
331
- etree.xpath('//*[@name="csrf-token"]').pop().get("content")
332
- )
333
-
334
- async with session.ws_connect(
335
- f"{self.api_endpoint}live/websocket",
336
- params={
337
- "_csrf_token": csrf_token,
338
- "vsn": "2.0.0",
339
- },
340
- proxy=self.proxies.get('http') if self.proxies else None,
341
- timeout=self.timeout
342
- ) as wsResponse:
343
- await wsResponse.send_json(
344
- [
345
- None,
346
- None,
347
- f"lv:{phx_node.get('id')}",
348
- "phx_join",
349
- {
350
- "params": {"_csrf_token": csrf_token},
351
- "url": str(response.url),
352
- "session": phx_node.get("data-phx-session"),
353
- },
354
- ]
355
- )
356
- while True:
357
- json_data = await wsResponse.receive_json()
358
- if not json_data:
359
- break
360
- diff: dict = json_data[4]
361
- try:
362
- chunk: str = diff["e"][0][1]["data"]
363
- # Check if the chunk contains HTML content
364
- if re.search(r"<[^>]+>", chunk):
365
- formatted_chunk = self.format_html(chunk)
366
- yield formatted_chunk
367
- else:
368
- yield chunk.replace("<br/>", "\n")
369
- except:
370
- cache = cache_find(diff)
371
- if cache:
372
- if diff.get("response", None):
373
- # Format the cache content if it contains HTML
374
- if re.search(r"<[^>]+>", cache):
375
- formatted_cache = self.format_html(cache)
376
- yield formatted_cache
377
- else:
378
- yield cache
379
- break
380
- except Exception as e:
381
- raise exceptions.APIConnectionError(f"Error connecting to IAsk API: {str(e)}")
382
-
383
- # For non-streaming, collect all chunks and return a single response
384
- if not stream:
385
- buffer = ""
386
- async for chunk in stream_generator():
387
- buffer += chunk
388
- self.last_response = {"text": buffer}
389
- return Response(buffer) if not raw else {"text": buffer}
390
-
391
- # For streaming, create an async generator that yields chunks
392
- async def process_stream():
393
- buffer = ""
394
- async for chunk in stream_generator():
395
- buffer += chunk
396
- if raw:
397
- yield {"text": chunk}
398
- else:
399
- yield Response(chunk)
400
- self.last_response = {"text": buffer}
401
-
402
- # Return the async generator
403
- return process_stream()
404
-
405
-
406
- if __name__ == "__main__":
407
- from rich import print
408
-
409
- ai = IAsk()
410
-
411
- # Example 1: Simple search with default mode
412
- print("\n[bold cyan]Example 1: Simple search with default mode[/bold cyan]")
413
- response = ai.search("What is Python?", stream=True)
414
- for chunk in response:
415
- print(chunk, end="", flush=True)
416
- print("\n\n[bold green]Response complete.[/bold green]\n")
417
-
418
- # Example 2: Search with academic mode
419
- print("\n[bold cyan]Example 2: Search with academic mode[/bold cyan]")
420
- response = ai.search("Quantum computing applications", mode="academic", stream=True)
421
- for chunk in response:
422
- print(chunk, end="", flush=True)
423
- print("\n\n[bold green]Response complete.[/bold green]\n")
424
-
425
- # Example 3: Search with advanced mode and detailed level
426
- print("\n[bold cyan]Example 3: Search with advanced mode and detailed level[/bold cyan]")
427
- response = ai.search("Climate change solutions", mode="advanced", detail_level="detailed", stream=True)
428
- for chunk in response:
429
- print(chunk, end="", flush=True)
430
- print("\n\n[bold green]Response complete.[/bold green]\n")
431
-
432
- # Example 4: Demonstrating the create_url method
433
- print("\n[bold cyan]Example 4: Generated URL for browser access[/bold cyan]")
434
- url = ai.create_url("Helpingai details", mode="question", detail_level="detailed")
435
- print(f"URL: {url}")
436
- print("This URL can be used directly in a browser or with other HTTP clients.")
@@ -1,246 +0,0 @@
1
- import requests
2
- import json
3
- import re
4
- import uuid
5
- from typing import Dict, Optional, Generator, Union, Any
6
-
7
- from webscout.AIbase import AISearch
8
- from webscout import exceptions
9
- from webscout.litagent import LitAgent
10
-
11
-
12
- class Response:
13
- """A wrapper class for Monica API responses.
14
-
15
- This class automatically converts response objects to their text representation
16
- when printed or converted to string.
17
-
18
- Attributes:
19
- text (str): The text content of the response
20
-
21
- Example:
22
- >>> response = Response("Hello, world!")
23
- >>> print(response)
24
- Hello, world!
25
- >>> str(response)
26
- 'Hello, world!'
27
- """
28
- def __init__(self, text: str):
29
- self.text = text
30
-
31
- def __str__(self):
32
- return self.text
33
-
34
- def __repr__(self):
35
- return self.text
36
-
37
-
38
- class Monica(AISearch):
39
- """A class to interact with the Monica AI search API.
40
-
41
- Monica provides a powerful search interface that returns AI-generated responses
42
- based on web content. It supports both streaming and non-streaming responses.
43
-
44
- Basic Usage:
45
- >>> from webscout import Monica
46
- >>> ai = Monica()
47
- >>> # Non-streaming example
48
- >>> response = ai.search("What is Python?")
49
- >>> print(response)
50
- Python is a high-level programming language...
51
-
52
- >>> # Streaming example
53
- >>> for chunk in ai.search("Tell me about AI", stream=True):
54
- ... print(chunk, end="", flush=True)
55
- Artificial Intelligence is...
56
-
57
- >>> # Raw response format
58
- >>> for chunk in ai.search("Hello", stream=True, raw=True):
59
- ... print(chunk)
60
- {'text': 'Hello'}
61
- {'text': ' there!'}
62
-
63
- Args:
64
- timeout (int, optional): Request timeout in seconds. Defaults to 60.
65
- proxies (dict, optional): Proxy configuration for requests. Defaults to None.
66
- """
67
-
68
- def __init__(
69
- self,
70
- timeout: int = 60,
71
- proxies: Optional[dict] = None,
72
- ):
73
- """Initialize the Monica API client.
74
-
75
- Args:
76
- timeout (int, optional): Request timeout in seconds. Defaults to 60.
77
- proxies (dict, optional): Proxy configuration for requests. Defaults to None.
78
- """
79
- self.session = requests.Session()
80
- self.api_endpoint = "https://monica.so/api/search_v1/search"
81
- self.stream_chunk_size = 64
82
- self.timeout = timeout
83
- self.last_response = {}
84
- self.client_id = str(uuid.uuid4())
85
- self.session_id = ""
86
-
87
- self.headers = {
88
- "accept": "*/*",
89
- "accept-encoding": "gzip, deflate, br, zstd",
90
- "accept-language": "en-US,en;q=0.9",
91
- "content-type": "application/json",
92
- "dnt": "1",
93
- "origin": "https://monica.so",
94
- "referer": "https://monica.so/answers",
95
- "sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
96
- "sec-ch-ua-mobile": "?0",
97
- "sec-ch-ua-platform": '"Windows"',
98
- "sec-fetch-dest": "empty",
99
- "sec-fetch-mode": "cors",
100
- "sec-fetch-site": "same-origin",
101
- "sec-gpc": "1",
102
- "user-agent": LitAgent().random(),
103
- "x-client-id": self.client_id,
104
- "x-client-locale": "en",
105
- "x-client-type": "web",
106
- "x-client-version": "5.4.3",
107
- "x-from-channel": "NA",
108
- "x-product-name": "Monica-Search",
109
- "x-time-zone": "Asia/Calcutta;-330"
110
- }
111
-
112
- self.cookies = {
113
- "monica_home_theme": "auto",
114
- }
115
-
116
- self.session.headers.update(self.headers)
117
- self.proxies = proxies
118
-
119
- def search(
120
- self,
121
- prompt: str,
122
- stream: bool = False,
123
- raw: bool = False,
124
- ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
125
- """Search using the Monica API and get AI-generated responses.
126
-
127
- This method sends a search query to Monica and returns the AI-generated response.
128
- It supports both streaming and non-streaming modes, as well as raw response format.
129
-
130
- Args:
131
- prompt (str): The search query or prompt to send to the API.
132
- stream (bool, optional): If True, yields response chunks as they arrive.
133
- If False, returns complete response. Defaults to False.
134
- raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
135
- If False, returns Response objects that convert to text automatically.
136
- Defaults to False.
137
-
138
- Returns:
139
- Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
140
- - If stream=False: Returns complete response as Response object
141
- - If stream=True: Yields response chunks as either Dict or Response objects
142
-
143
- Raises:
144
- APIConnectionError: If the API request fails
145
- """
146
- task_id = str(uuid.uuid4())
147
- payload = {
148
- "pro": False,
149
- "query": prompt,
150
- "round": 1,
151
- "session_id": self.session_id,
152
- "language": "auto",
153
- "task_id": task_id
154
- }
155
-
156
- def for_stream():
157
- try:
158
- with self.session.post(
159
- self.api_endpoint,
160
- json=payload,
161
- stream=True,
162
- cookies=self.cookies,
163
- timeout=self.timeout,
164
- proxies=self.proxies
165
- ) as response:
166
- if not response.ok:
167
- raise exceptions.APIConnectionError(
168
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
169
- )
170
-
171
- # Process the Server-Sent Events (SSE) stream
172
- for line in response.iter_lines(decode_unicode=True):
173
- if line and line.startswith("data: "):
174
- try:
175
- data = json.loads(line[6:]) # Remove 'data: ' prefix
176
-
177
- # Save session_id for future requests if present
178
- if "session_id" in data and data["session_id"]:
179
- self.session_id = data["session_id"]
180
-
181
- # Only process chunks with text content
182
- if "text" in data and data["text"]:
183
- text_chunk = data["text"]
184
-
185
- if raw:
186
- yield {"text": text_chunk}
187
- else:
188
- yield Response(text_chunk)
189
-
190
- # Check if stream is finished
191
- if "finished" in data and data["finished"]:
192
- break
193
-
194
- except json.JSONDecodeError:
195
- continue
196
-
197
- except requests.exceptions.RequestException as e:
198
- raise exceptions.APIConnectionError(f"Request failed: {e}")
199
-
200
- def for_non_stream():
201
- full_response = ""
202
- search_results = []
203
-
204
- for chunk in for_stream():
205
- if raw:
206
- yield chunk
207
- else:
208
- full_response += str(chunk)
209
-
210
- if not raw:
211
- # Process the full response to clean up formatting
212
- formatted_response = self.format_response(full_response)
213
- self.last_response = Response(formatted_response)
214
- return self.last_response
215
-
216
- return for_stream() if stream else for_non_stream()
217
-
218
- @staticmethod
219
- def format_response(text: str) -> str:
220
- """Format the response text for better readability.
221
-
222
- Args:
223
- text (str): The raw response text
224
-
225
- Returns:
226
- str: Formatted text
227
- """
228
- # Clean up markdown formatting
229
- cleaned_text = text.replace('**', '')
230
-
231
- # Remove any empty lines
232
- cleaned_text = re.sub(r'\n\s*\n', '\n\n', cleaned_text)
233
-
234
- # Remove any trailing whitespace
235
- cleaned_text = cleaned_text.strip()
236
-
237
- return cleaned_text
238
-
239
-
240
- if __name__ == "__main__":
241
- from rich import print
242
-
243
- ai = Monica()
244
- response = ai.search(input(">>> "), stream=True, raw=False)
245
- for chunk in response:
246
- print(chunk, end="", flush=True)