webscout 1.2.8__tar.gz → 1.2.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (40) hide show
  1. {webscout-1.2.8 → webscout-1.2.9}/PKG-INFO +9 -11
  2. {webscout-1.2.8 → webscout-1.2.9}/README.md +8 -10
  3. {webscout-1.2.8 → webscout-1.2.9}/setup.py +1 -1
  4. {webscout-1.2.8 → webscout-1.2.9}/webscout/__init__.py +0 -1
  5. {webscout-1.2.8 → webscout-1.2.9}/webscout/transcriber.py +496 -496
  6. webscout-1.2.9/webscout/version.py +2 -0
  7. {webscout-1.2.8 → webscout-1.2.9}/webscout.egg-info/PKG-INFO +9 -11
  8. webscout-1.2.8/webscout/version.py +0 -2
  9. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/__init__.py +0 -0
  10. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/documents/__init__.py +0 -0
  11. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/documents/query_results_extractor.py +0 -0
  12. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
  13. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/networks/__init__.py +0 -0
  14. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/networks/filepath_converter.py +0 -0
  15. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/networks/google_searcher.py +0 -0
  16. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/networks/network_configs.py +0 -0
  17. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/networks/webpage_fetcher.py +0 -0
  18. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/utilsdw/__init__.py +0 -0
  19. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/utilsdw/enver.py +0 -0
  20. {webscout-1.2.8 → webscout-1.2.9}/DeepWEBS/utilsdw/logger.py +0 -0
  21. {webscout-1.2.8 → webscout-1.2.9}/LICENSE.md +0 -0
  22. {webscout-1.2.8 → webscout-1.2.9}/setup.cfg +0 -0
  23. {webscout-1.2.8 → webscout-1.2.9}/webscout/AI.py +0 -0
  24. {webscout-1.2.8 → webscout-1.2.9}/webscout/AIbase.py +0 -0
  25. {webscout-1.2.8 → webscout-1.2.9}/webscout/AIutel.py +0 -0
  26. {webscout-1.2.8 → webscout-1.2.9}/webscout/DWEBS.py +0 -0
  27. {webscout-1.2.8 → webscout-1.2.9}/webscout/HelpingAI.py +0 -0
  28. {webscout-1.2.8 → webscout-1.2.9}/webscout/LLM.py +0 -0
  29. {webscout-1.2.8 → webscout-1.2.9}/webscout/__main__.py +0 -0
  30. {webscout-1.2.8 → webscout-1.2.9}/webscout/cli.py +0 -0
  31. {webscout-1.2.8 → webscout-1.2.9}/webscout/exceptions.py +0 -0
  32. {webscout-1.2.8 → webscout-1.2.9}/webscout/models.py +0 -0
  33. {webscout-1.2.8 → webscout-1.2.9}/webscout/utils.py +0 -0
  34. {webscout-1.2.8 → webscout-1.2.9}/webscout/webscout_search.py +0 -0
  35. {webscout-1.2.8 → webscout-1.2.9}/webscout/webscout_search_async.py +0 -0
  36. {webscout-1.2.8 → webscout-1.2.9}/webscout.egg-info/SOURCES.txt +0 -0
  37. {webscout-1.2.8 → webscout-1.2.9}/webscout.egg-info/dependency_links.txt +0 -0
  38. {webscout-1.2.8 → webscout-1.2.9}/webscout.egg-info/entry_points.txt +0 -0
  39. {webscout-1.2.8 → webscout-1.2.9}/webscout.egg-info/requires.txt +0 -0
  40. {webscout-1.2.8 → webscout-1.2.9}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.2.8
3
+ Version: 1.2.9
4
4
  Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models and now can transcribe yt videos
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -357,11 +357,12 @@ import logging
357
357
  import sys
358
358
  from itertools import chain
359
359
  from random import shuffle
360
-
361
360
  import requests
362
361
  from webscout import AsyncWEBS
363
362
 
364
- # bypass curl-cffi NotImplementedError in windows https://curl-cffi.readthedocs.io/en/latest/faq/
363
+ # If you have proxies, define them here
364
+ proxies = None
365
+
365
366
  if sys.platform.lower().startswith("win"):
366
367
  asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
367
368
 
@@ -373,24 +374,21 @@ def get_words():
373
374
 
374
375
  async def aget_results(word):
375
376
  async with AsyncWEBS(proxies=proxies) as WEBS:
376
- results = [r async for r in WEBS.text(word, max_results=None)]
377
+ results = await WEBS.text(word, max_results=None)
377
378
  return results
378
379
 
379
380
  async def main():
380
381
  words = get_words()
381
382
  shuffle(words)
382
- tasks = []
383
- for word in words[:10]:
384
- tasks.append(aget_results(word))
383
+ tasks = [aget_results(word) for word in words[:10]]
385
384
  results = await asyncio.gather(*tasks)
386
385
  print(f"Done")
387
386
  for r in chain.from_iterable(results):
388
387
  print(r)
389
-
390
388
 
391
- if __name__ == "__main__":
392
- logging.basicConfig(level=logging.DEBUG)
393
- asyncio.run(main())
389
+ logging.basicConfig(level=logging.DEBUG)
390
+
391
+ await main()
394
392
  ```
395
393
  It is important to note that the WEBS and AsyncWEBS classes should always be used as a context manager (with statement).
396
394
  This ensures proper resource management and cleanup, as the context manager will automatically handle opening and closing the HTTP client connection.
@@ -305,11 +305,12 @@ import logging
305
305
  import sys
306
306
  from itertools import chain
307
307
  from random import shuffle
308
-
309
308
  import requests
310
309
  from webscout import AsyncWEBS
311
310
 
312
- # bypass curl-cffi NotImplementedError in windows https://curl-cffi.readthedocs.io/en/latest/faq/
311
+ # If you have proxies, define them here
312
+ proxies = None
313
+
313
314
  if sys.platform.lower().startswith("win"):
314
315
  asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
315
316
 
@@ -321,24 +322,21 @@ def get_words():
321
322
 
322
323
  async def aget_results(word):
323
324
  async with AsyncWEBS(proxies=proxies) as WEBS:
324
- results = [r async for r in WEBS.text(word, max_results=None)]
325
+ results = await WEBS.text(word, max_results=None)
325
326
  return results
326
327
 
327
328
  async def main():
328
329
  words = get_words()
329
330
  shuffle(words)
330
- tasks = []
331
- for word in words[:10]:
332
- tasks.append(aget_results(word))
331
+ tasks = [aget_results(word) for word in words[:10]]
333
332
  results = await asyncio.gather(*tasks)
334
333
  print(f"Done")
335
334
  for r in chain.from_iterable(results):
336
335
  print(r)
337
-
338
336
 
339
- if __name__ == "__main__":
340
- logging.basicConfig(level=logging.DEBUG)
341
- asyncio.run(main())
337
+ logging.basicConfig(level=logging.DEBUG)
338
+
339
+ await main()
342
340
  ```
343
341
  It is important to note that the WEBS and AsyncWEBS classes should always be used as a context manager (with statement).
344
342
  This ensures proper resource management and cleanup, as the context manager will automatically handle opening and closing the HTTP client connection.
@@ -9,7 +9,7 @@ with open("README.md", encoding="utf-8") as f:
9
9
 
10
10
  setup(
11
11
  name="webscout",
12
- version="1.2.8",
12
+ version="1.2.9",
13
13
  description="Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models and now can transcribe yt videos",
14
14
  long_description=README,
15
15
  long_description_content_type="text/markdown",
@@ -9,7 +9,6 @@ from .webscout_search import WEBS
9
9
  from .webscout_search_async import AsyncWEBS
10
10
  from .version import __version__
11
11
  from .DWEBS import DeepWEBS
12
- from .AIutel import appdir
13
12
  from .transcriber import transcriber
14
13
 
15
14