webscout 7.9__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (69) hide show
  1. webscout/Extra/GitToolkit/__init__.py +10 -0
  2. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  3. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  4. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  5. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  7. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  8. webscout/Provider/AISEARCH/ISou.py +1 -1
  9. webscout/Provider/AISEARCH/__init__.py +6 -1
  10. webscout/Provider/AISEARCH/felo_search.py +1 -1
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/AISEARCH/hika_search.py +194 -0
  13. webscout/Provider/AISEARCH/iask_search.py +436 -0
  14. webscout/Provider/AISEARCH/monica_search.py +246 -0
  15. webscout/Provider/AISEARCH/scira_search.py +320 -0
  16. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  17. webscout/Provider/AllenAI.py +255 -122
  18. webscout/Provider/DeepSeek.py +1 -2
  19. webscout/Provider/Deepinfra.py +17 -9
  20. webscout/Provider/ExaAI.py +261 -0
  21. webscout/Provider/ExaChat.py +8 -1
  22. webscout/Provider/GithubChat.py +2 -1
  23. webscout/Provider/Jadve.py +2 -2
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OPENAI/__init__.py +17 -0
  26. webscout/Provider/OPENAI/base.py +46 -0
  27. webscout/Provider/OPENAI/c4ai.py +347 -0
  28. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  29. webscout/Provider/OPENAI/deepinfra.py +284 -0
  30. webscout/Provider/OPENAI/exaai.py +419 -0
  31. webscout/Provider/OPENAI/exachat.py +421 -0
  32. webscout/Provider/OPENAI/freeaichat.py +355 -0
  33. webscout/Provider/OPENAI/glider.py +314 -0
  34. webscout/Provider/OPENAI/heckai.py +337 -0
  35. webscout/Provider/OPENAI/llmchatco.py +325 -0
  36. webscout/Provider/OPENAI/netwrck.py +348 -0
  37. webscout/Provider/OPENAI/scirachat.py +459 -0
  38. webscout/Provider/OPENAI/sonus.py +294 -0
  39. webscout/Provider/OPENAI/typegpt.py +361 -0
  40. webscout/Provider/OPENAI/utils.py +211 -0
  41. webscout/Provider/OPENAI/venice.py +428 -0
  42. webscout/Provider/OPENAI/wisecat.py +381 -0
  43. webscout/Provider/OPENAI/x0gpt.py +389 -0
  44. webscout/Provider/OPENAI/yep.py +329 -0
  45. webscout/Provider/OpenGPT.py +199 -0
  46. webscout/Provider/PI.py +39 -24
  47. webscout/Provider/Venice.py +1 -1
  48. webscout/Provider/Youchat.py +326 -296
  49. webscout/Provider/__init__.py +16 -6
  50. webscout/Provider/ai4chat.py +58 -56
  51. webscout/Provider/akashgpt.py +34 -22
  52. webscout/Provider/freeaichat.py +1 -1
  53. webscout/Provider/labyrinth.py +121 -20
  54. webscout/Provider/llmchatco.py +306 -0
  55. webscout/Provider/scira_chat.py +274 -0
  56. webscout/Provider/typefully.py +280 -0
  57. webscout/Provider/typegpt.py +3 -184
  58. webscout/prompt_manager.py +2 -1
  59. webscout/version.py +1 -1
  60. webscout/webscout_search.py +118 -54
  61. webscout/webscout_search_async.py +109 -45
  62. webscout-8.1.dist-info/METADATA +683 -0
  63. {webscout-7.9.dist-info → webscout-8.1.dist-info}/RECORD +67 -33
  64. webscout/Provider/flowith.py +0 -207
  65. webscout-7.9.dist-info/METADATA +0 -995
  66. {webscout-7.9.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  67. {webscout-7.9.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  68. {webscout-7.9.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  69. {webscout-7.9.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,436 @@
1
+ import aiohttp
2
+ import asyncio
3
+ import lxml.html
4
+ import re
5
+ import urllib.parse
6
+ from markdownify import markdownify as md
7
+ from typing import Dict, Optional, Generator, Union, AsyncIterator, Literal
8
+
9
+ from webscout.AIbase import AISearch
10
+ from webscout import exceptions
11
+ from webscout.scout import Scout
12
+
13
+
14
+ class Response:
15
+ """A wrapper class for IAsk API responses.
16
+
17
+ This class automatically converts response objects to their text representation
18
+ when printed or converted to string.
19
+
20
+ Attributes:
21
+ text (str): The text content of the response
22
+
23
+ Example:
24
+ >>> response = Response("Hello, world!")
25
+ >>> print(response)
26
+ Hello, world!
27
+ >>> str(response)
28
+ 'Hello, world!'
29
+ """
30
+ def __init__(self, text: str):
31
+ self.text = text
32
+
33
+ def __str__(self):
34
+ return self.text
35
+
36
+ def __repr__(self):
37
+ return self.text
38
+
39
+
40
+ def cache_find(diff: Union[dict, list]) -> Optional[str]:
41
+ """Find HTML content in a nested dictionary or list structure.
42
+
43
+ Args:
44
+ diff (Union[dict, list]): The nested structure to search
45
+
46
+ Returns:
47
+ Optional[str]: The found HTML content, or None if not found
48
+ """
49
+ values = diff if isinstance(diff, list) else diff.values()
50
+ for value in values:
51
+ if isinstance(value, (list, dict)):
52
+ cache = cache_find(value)
53
+ if cache:
54
+ return cache
55
+ if isinstance(value, str) and re.search(r"<p>.+?</p>", value):
56
+ return md(value).strip()
57
+
58
+ return None
59
+
60
+
61
+ ModeType = Literal["question", "academic", "fast", "forums", "wiki", "advanced"]
62
+ DetailLevelType = Literal["concise", "detailed", "comprehensive"]
63
+
64
+
65
+ class IAsk(AISearch):
66
+ """A class to interact with the IAsk AI search API.
67
+
68
+ IAsk provides a powerful search interface that returns AI-generated responses
69
+ based on web content. It supports both streaming and non-streaming responses,
70
+ as well as different search modes and detail levels.
71
+
72
+ Basic Usage:
73
+ >>> from webscout import IAsk
74
+ >>> ai = IAsk()
75
+ >>> # Non-streaming example
76
+ >>> response = ai.search("What is Python?")
77
+ >>> print(response)
78
+ Python is a high-level programming language...
79
+
80
+ >>> # Streaming example
81
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
82
+ ... print(chunk, end="", flush=True)
83
+ Artificial Intelligence is...
84
+
85
+ >>> # With specific mode and detail level
86
+ >>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
87
+ >>> print(response)
88
+ Climate change refers to...
89
+
90
+ Args:
91
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
92
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
93
+ mode (ModeType, optional): Default search mode. Defaults to "question".
94
+ detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
95
+ """
96
+
97
+ def __init__(
98
+ self,
99
+ timeout: int = 30,
100
+ proxies: Optional[dict] = None,
101
+ mode: ModeType = "question",
102
+ detail_level: Optional[DetailLevelType] = None,
103
+ ):
104
+ """Initialize the IAsk API client.
105
+
106
+ Args:
107
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
108
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
109
+ mode (ModeType, optional): Default search mode. Defaults to "question".
110
+ detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
111
+ """
112
+ self.timeout = timeout
113
+ self.proxies = proxies or {}
114
+ self.default_mode = mode
115
+ self.default_detail_level = detail_level
116
+ self.api_endpoint = "https://iask.ai/"
117
+ self.last_response = {}
118
+
119
+ def create_url(self, query: str, mode: ModeType = "question", detail_level: Optional[DetailLevelType] = None) -> str:
120
+ """Create a properly formatted URL with mode and detail level parameters.
121
+
122
+ Args:
123
+ query (str): The search query.
124
+ mode (ModeType, optional): Search mode. Defaults to "question".
125
+ detail_level (DetailLevelType, optional): Detail level. Defaults to None.
126
+
127
+ Returns:
128
+ str: Formatted URL with query parameters.
129
+
130
+ Example:
131
+ >>> ai = IAsk()
132
+ >>> url = ai.create_url("Climate change", mode="academic", detail_level="detailed")
133
+ >>> print(url)
134
+ https://iask.ai/?mode=academic&q=Climate+change&options%5Bdetail_level%5D=detailed
135
+ """
136
+ # Create a dictionary of parameters with flattened structure
137
+ params = {
138
+ "mode": mode,
139
+ "q": query
140
+ }
141
+
142
+ # Add detail_level if provided using the flattened format
143
+ if detail_level:
144
+ params["options[detail_level]"] = detail_level
145
+
146
+ # Encode the parameters and build the URL
147
+ query_string = urllib.parse.urlencode(params)
148
+ url = f"{self.api_endpoint}?{query_string}"
149
+
150
+ return url
151
+
152
+ def format_html(self, html_content: str) -> str:
153
+ """Format HTML content into a more readable text format.
154
+
155
+ Args:
156
+ html_content (str): The HTML content to format.
157
+
158
+ Returns:
159
+ str: Formatted text.
160
+ """
161
+ scout = Scout(html_content, features='html.parser')
162
+ output_lines = []
163
+
164
+ for child in scout.find_all(['h1', 'h2', 'h3', 'p', 'ol', 'ul', 'div']):
165
+ if child.name in ["h1", "h2", "h3"]:
166
+ output_lines.append(f"\n**{child.get_text().strip()}**\n")
167
+ elif child.name == "p":
168
+ text = child.get_text().strip()
169
+ text = re.sub(r"^According to Ask AI & Question AI www\.iAsk\.ai:\s*", "", text).strip()
170
+ # Remove footnote markers
171
+ text = re.sub(r'\[\d+\]\(#fn:\d+ \'see footnote\'\)', '', text)
172
+ output_lines.append(text + "\n")
173
+ elif child.name in ["ol", "ul"]:
174
+ for li in child.find_all("li"):
175
+ output_lines.append("- " + li.get_text().strip() + "\n")
176
+ elif child.name == "div" and "footnotes" in child.get("class", []):
177
+ output_lines.append("\n**Authoritative Sources**\n")
178
+ for li in child.find_all("li"):
179
+ link = li.find("a")
180
+ if link:
181
+ output_lines.append(f"- {link.get_text().strip()} ({link.get('href')})\n")
182
+
183
+ return "".join(output_lines)
184
+
185
+ def search(
186
+ self,
187
+ prompt: str,
188
+ stream: bool = False,
189
+ raw: bool = False,
190
+ mode: Optional[ModeType] = None,
191
+ detail_level: Optional[DetailLevelType] = None,
192
+ ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
193
+ """Search using the IAsk API and get AI-generated responses.
194
+
195
+ This method sends a search query to IAsk and returns the AI-generated response.
196
+ It supports both streaming and non-streaming modes, as well as raw response format.
197
+
198
+ Args:
199
+ prompt (str): The search query or prompt to send to the API.
200
+ stream (bool, optional): If True, yields response chunks as they arrive.
201
+ If False, returns complete response. Defaults to False.
202
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
203
+ If False, returns Response objects that convert to text automatically.
204
+ Defaults to False.
205
+ mode (ModeType, optional): Search mode to use. Defaults to None (uses instance default).
206
+ detail_level (DetailLevelType, optional): Detail level to use. Defaults to None (uses instance default).
207
+
208
+ Returns:
209
+ Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
210
+ - If stream=False: Returns complete response as Response object
211
+ - If stream=True: Yields response chunks as either Dict or Response objects
212
+
213
+ Raises:
214
+ APIConnectionError: If the API request fails
215
+
216
+ Examples:
217
+ Basic search:
218
+ >>> ai = IAsk()
219
+ >>> response = ai.search("What is Python?")
220
+ >>> print(response)
221
+ Python is a programming language...
222
+
223
+ Streaming response:
224
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
225
+ ... print(chunk, end="")
226
+ Artificial Intelligence...
227
+
228
+ Raw response format:
229
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
230
+ ... print(chunk)
231
+ {'text': 'Hello'}
232
+ {'text': ' there!'}
233
+
234
+ With specific mode and detail level:
235
+ >>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
236
+ >>> print(response)
237
+ Climate change refers to...
238
+ """
239
+ # Use provided parameters or fall back to instance defaults
240
+ search_mode = mode or self.default_mode
241
+ search_detail_level = detail_level or self.default_detail_level
242
+
243
+ # For non-streaming, run the async search and return the complete response
244
+ if not stream:
245
+ # Create a new event loop for this request
246
+ loop = asyncio.new_event_loop()
247
+ asyncio.set_event_loop(loop)
248
+ try:
249
+ result = loop.run_until_complete(
250
+ self._async_search(prompt, False, raw, search_mode, search_detail_level)
251
+ )
252
+ return result
253
+ finally:
254
+ loop.close()
255
+
256
+ # For streaming, use a simpler approach with a single event loop
257
+ # that stays open until the generator is exhausted
258
+ buffer = ""
259
+
260
+ def sync_generator():
261
+ nonlocal buffer
262
+ # Create a new event loop for this generator
263
+ loop = asyncio.new_event_loop()
264
+ asyncio.set_event_loop(loop)
265
+
266
+ try:
267
+ # Get the async generator
268
+ async_gen_coro = self._async_search(prompt, True, raw, search_mode, search_detail_level)
269
+ async_gen = loop.run_until_complete(async_gen_coro)
270
+
271
+ # Process chunks one by one
272
+ while True:
273
+ try:
274
+ # Get the next chunk
275
+ chunk_coro = async_gen.__anext__()
276
+ chunk = loop.run_until_complete(chunk_coro)
277
+
278
+ # Update buffer and yield the chunk
279
+ if isinstance(chunk, dict) and 'text' in chunk:
280
+ buffer += chunk['text']
281
+ elif isinstance(chunk, Response):
282
+ buffer += chunk.text
283
+ else:
284
+ buffer += str(chunk)
285
+
286
+ yield chunk
287
+ except StopAsyncIteration:
288
+ break
289
+ except Exception as e:
290
+ print(f"Error in generator: {e}")
291
+ break
292
+ finally:
293
+ # Store the final response and close the loop
294
+ self.last_response = {"text": buffer}
295
+ loop.close()
296
+
297
+ return sync_generator()
298
+
299
+ async def _async_search(
300
+ self,
301
+ prompt: str,
302
+ stream: bool = False,
303
+ raw: bool = False,
304
+ mode: ModeType = "question",
305
+ detail_level: Optional[DetailLevelType] = None,
306
+ ) -> Union[Response, AsyncIterator[Union[Dict[str, str], Response]]]:
307
+ """Internal async implementation of the search method."""
308
+
309
+ async def stream_generator() -> AsyncIterator[str]:
310
+ async with aiohttp.ClientSession() as session:
311
+ # Prepare parameters
312
+ params = {"mode": mode, "q": prompt}
313
+ if detail_level:
314
+ params["options[detail_level]"] = detail_level
315
+
316
+ try:
317
+ async with session.get(
318
+ self.api_endpoint,
319
+ params=params,
320
+ proxy=self.proxies.get('http') if self.proxies else None,
321
+ timeout=self.timeout
322
+ ) as response:
323
+ if not response.ok:
324
+ raise exceptions.APIConnectionError(
325
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {await response.text()}"
326
+ )
327
+
328
+ etree = lxml.html.fromstring(await response.text())
329
+ phx_node = etree.xpath('//*[starts-with(@id, "phx-")]').pop()
330
+ csrf_token = (
331
+ etree.xpath('//*[@name="csrf-token"]').pop().get("content")
332
+ )
333
+
334
+ async with session.ws_connect(
335
+ f"{self.api_endpoint}live/websocket",
336
+ params={
337
+ "_csrf_token": csrf_token,
338
+ "vsn": "2.0.0",
339
+ },
340
+ proxy=self.proxies.get('http') if self.proxies else None,
341
+ timeout=self.timeout
342
+ ) as wsResponse:
343
+ await wsResponse.send_json(
344
+ [
345
+ None,
346
+ None,
347
+ f"lv:{phx_node.get('id')}",
348
+ "phx_join",
349
+ {
350
+ "params": {"_csrf_token": csrf_token},
351
+ "url": str(response.url),
352
+ "session": phx_node.get("data-phx-session"),
353
+ },
354
+ ]
355
+ )
356
+ while True:
357
+ json_data = await wsResponse.receive_json()
358
+ if not json_data:
359
+ break
360
+ diff: dict = json_data[4]
361
+ try:
362
+ chunk: str = diff["e"][0][1]["data"]
363
+ # Check if the chunk contains HTML content
364
+ if re.search(r"<[^>]+>", chunk):
365
+ formatted_chunk = self.format_html(chunk)
366
+ yield formatted_chunk
367
+ else:
368
+ yield chunk.replace("<br/>", "\n")
369
+ except:
370
+ cache = cache_find(diff)
371
+ if cache:
372
+ if diff.get("response", None):
373
+ # Format the cache content if it contains HTML
374
+ if re.search(r"<[^>]+>", cache):
375
+ formatted_cache = self.format_html(cache)
376
+ yield formatted_cache
377
+ else:
378
+ yield cache
379
+ break
380
+ except Exception as e:
381
+ raise exceptions.APIConnectionError(f"Error connecting to IAsk API: {str(e)}")
382
+
383
+ # For non-streaming, collect all chunks and return a single response
384
+ if not stream:
385
+ buffer = ""
386
+ async for chunk in stream_generator():
387
+ buffer += chunk
388
+ self.last_response = {"text": buffer}
389
+ return Response(buffer) if not raw else {"text": buffer}
390
+
391
+ # For streaming, create an async generator that yields chunks
392
+ async def process_stream():
393
+ buffer = ""
394
+ async for chunk in stream_generator():
395
+ buffer += chunk
396
+ if raw:
397
+ yield {"text": chunk}
398
+ else:
399
+ yield Response(chunk)
400
+ self.last_response = {"text": buffer}
401
+
402
+ # Return the async generator
403
+ return process_stream()
404
+
405
+
406
+ if __name__ == "__main__":
407
+ from rich import print
408
+
409
+ ai = IAsk()
410
+
411
+ # Example 1: Simple search with default mode
412
+ print("\n[bold cyan]Example 1: Simple search with default mode[/bold cyan]")
413
+ response = ai.search("What is Python?", stream=True)
414
+ for chunk in response:
415
+ print(chunk, end="", flush=True)
416
+ print("\n\n[bold green]Response complete.[/bold green]\n")
417
+
418
+ # Example 2: Search with academic mode
419
+ print("\n[bold cyan]Example 2: Search with academic mode[/bold cyan]")
420
+ response = ai.search("Quantum computing applications", mode="academic", stream=True)
421
+ for chunk in response:
422
+ print(chunk, end="", flush=True)
423
+ print("\n\n[bold green]Response complete.[/bold green]\n")
424
+
425
+ # Example 3: Search with advanced mode and detailed level
426
+ print("\n[bold cyan]Example 3: Search with advanced mode and detailed level[/bold cyan]")
427
+ response = ai.search("Climate change solutions", mode="advanced", detail_level="detailed", stream=True)
428
+ for chunk in response:
429
+ print(chunk, end="", flush=True)
430
+ print("\n\n[bold green]Response complete.[/bold green]\n")
431
+
432
+ # Example 4: Demonstrating the create_url method
433
+ print("\n[bold cyan]Example 4: Generated URL for browser access[/bold cyan]")
434
+ url = ai.create_url("Helpingai details", mode="question", detail_level="detailed")
435
+ print(f"URL: {url}")
436
+ print("This URL can be used directly in a browser or with other HTTP clients.")
@@ -0,0 +1,246 @@
1
+ import requests
2
+ import json
3
+ import re
4
+ import uuid
5
+ from typing import Dict, Optional, Generator, Union, Any
6
+
7
+ from webscout.AIbase import AISearch
8
+ from webscout import exceptions
9
+ from webscout.litagent import LitAgent
10
+
11
+
12
+ class Response:
13
+ """A wrapper class for Monica API responses.
14
+
15
+ This class automatically converts response objects to their text representation
16
+ when printed or converted to string.
17
+
18
+ Attributes:
19
+ text (str): The text content of the response
20
+
21
+ Example:
22
+ >>> response = Response("Hello, world!")
23
+ >>> print(response)
24
+ Hello, world!
25
+ >>> str(response)
26
+ 'Hello, world!'
27
+ """
28
+ def __init__(self, text: str):
29
+ self.text = text
30
+
31
+ def __str__(self):
32
+ return self.text
33
+
34
+ def __repr__(self):
35
+ return self.text
36
+
37
+
38
+ class Monica(AISearch):
39
+ """A class to interact with the Monica AI search API.
40
+
41
+ Monica provides a powerful search interface that returns AI-generated responses
42
+ based on web content. It supports both streaming and non-streaming responses.
43
+
44
+ Basic Usage:
45
+ >>> from webscout import Monica
46
+ >>> ai = Monica()
47
+ >>> # Non-streaming example
48
+ >>> response = ai.search("What is Python?")
49
+ >>> print(response)
50
+ Python is a high-level programming language...
51
+
52
+ >>> # Streaming example
53
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
54
+ ... print(chunk, end="", flush=True)
55
+ Artificial Intelligence is...
56
+
57
+ >>> # Raw response format
58
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
59
+ ... print(chunk)
60
+ {'text': 'Hello'}
61
+ {'text': ' there!'}
62
+
63
+ Args:
64
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
65
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
66
+ """
67
+
68
+ def __init__(
69
+ self,
70
+ timeout: int = 60,
71
+ proxies: Optional[dict] = None,
72
+ ):
73
+ """Initialize the Monica API client.
74
+
75
+ Args:
76
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
77
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
78
+ """
79
+ self.session = requests.Session()
80
+ self.api_endpoint = "https://monica.so/api/search_v1/search"
81
+ self.stream_chunk_size = 64
82
+ self.timeout = timeout
83
+ self.last_response = {}
84
+ self.client_id = str(uuid.uuid4())
85
+ self.session_id = ""
86
+
87
+ self.headers = {
88
+ "accept": "*/*",
89
+ "accept-encoding": "gzip, deflate, br, zstd",
90
+ "accept-language": "en-US,en;q=0.9",
91
+ "content-type": "application/json",
92
+ "dnt": "1",
93
+ "origin": "https://monica.so",
94
+ "referer": "https://monica.so/answers",
95
+ "sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
96
+ "sec-ch-ua-mobile": "?0",
97
+ "sec-ch-ua-platform": '"Windows"',
98
+ "sec-fetch-dest": "empty",
99
+ "sec-fetch-mode": "cors",
100
+ "sec-fetch-site": "same-origin",
101
+ "sec-gpc": "1",
102
+ "user-agent": LitAgent().random(),
103
+ "x-client-id": self.client_id,
104
+ "x-client-locale": "en",
105
+ "x-client-type": "web",
106
+ "x-client-version": "5.4.3",
107
+ "x-from-channel": "NA",
108
+ "x-product-name": "Monica-Search",
109
+ "x-time-zone": "Asia/Calcutta;-330"
110
+ }
111
+
112
+ self.cookies = {
113
+ "monica_home_theme": "auto",
114
+ }
115
+
116
+ self.session.headers.update(self.headers)
117
+ self.proxies = proxies
118
+
119
+ def search(
120
+ self,
121
+ prompt: str,
122
+ stream: bool = False,
123
+ raw: bool = False,
124
+ ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
125
+ """Search using the Monica API and get AI-generated responses.
126
+
127
+ This method sends a search query to Monica and returns the AI-generated response.
128
+ It supports both streaming and non-streaming modes, as well as raw response format.
129
+
130
+ Args:
131
+ prompt (str): The search query or prompt to send to the API.
132
+ stream (bool, optional): If True, yields response chunks as they arrive.
133
+ If False, returns complete response. Defaults to False.
134
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
135
+ If False, returns Response objects that convert to text automatically.
136
+ Defaults to False.
137
+
138
+ Returns:
139
+ Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
140
+ - If stream=False: Returns complete response as Response object
141
+ - If stream=True: Yields response chunks as either Dict or Response objects
142
+
143
+ Raises:
144
+ APIConnectionError: If the API request fails
145
+ """
146
+ task_id = str(uuid.uuid4())
147
+ payload = {
148
+ "pro": False,
149
+ "query": prompt,
150
+ "round": 1,
151
+ "session_id": self.session_id,
152
+ "language": "auto",
153
+ "task_id": task_id
154
+ }
155
+
156
+ def for_stream():
157
+ try:
158
+ with self.session.post(
159
+ self.api_endpoint,
160
+ json=payload,
161
+ stream=True,
162
+ cookies=self.cookies,
163
+ timeout=self.timeout,
164
+ proxies=self.proxies
165
+ ) as response:
166
+ if not response.ok:
167
+ raise exceptions.APIConnectionError(
168
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
169
+ )
170
+
171
+ # Process the Server-Sent Events (SSE) stream
172
+ for line in response.iter_lines(decode_unicode=True):
173
+ if line and line.startswith("data: "):
174
+ try:
175
+ data = json.loads(line[6:]) # Remove 'data: ' prefix
176
+
177
+ # Save session_id for future requests if present
178
+ if "session_id" in data and data["session_id"]:
179
+ self.session_id = data["session_id"]
180
+
181
+ # Only process chunks with text content
182
+ if "text" in data and data["text"]:
183
+ text_chunk = data["text"]
184
+
185
+ if raw:
186
+ yield {"text": text_chunk}
187
+ else:
188
+ yield Response(text_chunk)
189
+
190
+ # Check if stream is finished
191
+ if "finished" in data and data["finished"]:
192
+ break
193
+
194
+ except json.JSONDecodeError:
195
+ continue
196
+
197
+ except requests.exceptions.RequestException as e:
198
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
199
+
200
+ def for_non_stream():
201
+ full_response = ""
202
+ search_results = []
203
+
204
+ for chunk in for_stream():
205
+ if raw:
206
+ yield chunk
207
+ else:
208
+ full_response += str(chunk)
209
+
210
+ if not raw:
211
+ # Process the full response to clean up formatting
212
+ formatted_response = self.format_response(full_response)
213
+ self.last_response = Response(formatted_response)
214
+ return self.last_response
215
+
216
+ return for_stream() if stream else for_non_stream()
217
+
218
+ @staticmethod
219
+ def format_response(text: str) -> str:
220
+ """Format the response text for better readability.
221
+
222
+ Args:
223
+ text (str): The raw response text
224
+
225
+ Returns:
226
+ str: Formatted text
227
+ """
228
+ # Clean up markdown formatting
229
+ cleaned_text = text.replace('**', '')
230
+
231
+ # Remove any empty lines
232
+ cleaned_text = re.sub(r'\n\s*\n', '\n\n', cleaned_text)
233
+
234
+ # Remove any trailing whitespace
235
+ cleaned_text = cleaned_text.strip()
236
+
237
+ return cleaned_text
238
+
239
+
240
+ if __name__ == "__main__":
241
+ from rich import print
242
+
243
+ ai = Monica()
244
+ response = ai.search(input(">>> "), stream=True, raw=False)
245
+ for chunk in response:
246
+ print(chunk, end="", flush=True)