ai-parrot 0.3.3__tar.gz → 0.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ai-parrot might be problematic. Click here for more details.

Files changed (159) hide show
  1. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/.github/workflows/release.yml +1 -1
  2. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/.gitignore +3 -0
  3. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/Makefile +1 -1
  4. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/PKG-INFO +9 -27
  5. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/ai_parrot.egg-info/PKG-INFO +9 -27
  6. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/ai_parrot.egg-info/SOURCES.txt +7 -0
  7. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/ai_parrot.egg-info/requires.txt +7 -26
  8. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/app.py +2 -2
  9. ai_parrot-0.3.5/documents/AR_Certification_Skill_Practice_Scorecard_EXAMPLE.pdf +0 -0
  10. ai_parrot-0.3.5/documents/Day 1_Essentials_AR_PPT.pdf +0 -0
  11. ai_parrot-0.3.5/documents/video_2024-09-11_19-43-58.mp3 +0 -0
  12. ai_parrot-0.3.5/documents/video_2024-09-11_19-43-58.mp4 +0 -0
  13. ai_parrot-0.3.5/documents/video_2024-09-11_19-43-58.vtt +122 -0
  14. ai_parrot-0.3.5/examples/analyze_video.py +33 -0
  15. ai_parrot-0.3.5/examples/extract_frames.py +74 -0
  16. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/examples/load_pdf.py +3 -2
  17. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/examples/test_bot.py +1 -1
  18. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/pdf.py +1 -8
  19. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/videolocal.py +3 -2
  20. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/version.py +1 -1
  21. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/pyproject.toml +1 -1
  22. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/setup.py +9 -33
  23. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/.flake8 +0 -0
  24. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/.github/dependabot.yml +0 -0
  25. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/.github/workflows/codeql-analysis.yml +0 -0
  26. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/.isort.cfg +0 -0
  27. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/.pylintrc +0 -0
  28. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/INSTALL +0 -0
  29. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/LICENSE +0 -0
  30. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/README.md +0 -0
  31. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/SECURITY.md +0 -0
  32. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/ai_parrot.egg-info/dependency_links.txt +0 -0
  33. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/ai_parrot.egg-info/top_level.txt +0 -0
  34. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/documents/ex-code-loaders.txt +0 -0
  35. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/etc/navigator-ssl.ini +0 -0
  36. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/etc/navigator.ini +0 -0
  37. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/etc/ssl/domain.ext +0 -0
  38. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/etc/ssl/navigator.local.crt +0 -0
  39. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/etc/ssl/navigator.local.csr +0 -0
  40. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/etc/ssl/navigator.local.key +0 -0
  41. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/etc/ssl/rootCA.crt +0 -0
  42. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/etc/ssl/rootCA.key +0 -0
  43. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/etc/ssl/rootCA.srl +0 -0
  44. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/examples/check_bot.py +0 -0
  45. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/examples/test_question.py +0 -0
  46. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/mypy.ini +0 -0
  47. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/__init__.py +0 -0
  48. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/__init__.py +0 -0
  49. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/abstract.py +0 -0
  50. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/asktroc.py +0 -0
  51. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/base.py +0 -0
  52. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/basic.py +0 -0
  53. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/bose.py +0 -0
  54. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/cody.py +0 -0
  55. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/copilot.py +0 -0
  56. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/dataframe.py +0 -0
  57. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/hragents.py +0 -0
  58. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/odoo.py +0 -0
  59. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/retrievals/__init__.py +0 -0
  60. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/chatbots/retrievals/constitutional.py +0 -0
  61. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/conf.py +0 -0
  62. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/__init__.py +0 -0
  63. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/tools/__init__.py +0 -0
  64. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/tools/bing.py +0 -0
  65. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/tools/config.py +0 -0
  66. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/tools/duckgo.py +0 -0
  67. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/tools/file.py +0 -0
  68. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/tools/google.py +0 -0
  69. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/tools/gtrends.py +0 -0
  70. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/tools/md2pdf.py +0 -0
  71. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/tools/rag.py +0 -0
  72. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/tools/search.py +0 -0
  73. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/crew/tools/url.py +0 -0
  74. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/exceptions.c +0 -0
  75. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/exceptions.pyx +0 -0
  76. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/handlers/__init__.py +0 -0
  77. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/handlers/bots.py +0 -0
  78. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/handlers/chat.py +0 -0
  79. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/interfaces/__init__.py +0 -0
  80. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/interfaces/database.py +0 -0
  81. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/llms/__init__.py +0 -0
  82. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/llms/abstract.py +0 -0
  83. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/llms/anthropic.py +0 -0
  84. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/llms/google.py +0 -0
  85. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/llms/groq.py +0 -0
  86. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/llms/hf.py +0 -0
  87. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/llms/openai.py +0 -0
  88. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/llms/pipes.py +0 -0
  89. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/llms/vertex.py +0 -0
  90. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/__init__.py +0 -0
  91. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/abstract.py +0 -0
  92. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/audio.py +0 -0
  93. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/basepdf.py +0 -0
  94. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/basevideo.py +0 -0
  95. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/csv.py +0 -0
  96. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/dir.py +0 -0
  97. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/excel.py +0 -0
  98. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/github.py +0 -0
  99. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/handlers/__init__.py +0 -0
  100. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/handlers/data.py +0 -0
  101. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/image.py +0 -0
  102. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/json.py +0 -0
  103. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/pdfchapters.py +0 -0
  104. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/pdffn.py +0 -0
  105. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/pdfimages.py +0 -0
  106. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/pdfmark.py +0 -0
  107. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/pdftables.py +0 -0
  108. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/ppt.py +0 -0
  109. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/qa.py +0 -0
  110. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/repo.py +0 -0
  111. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/rtd.py +0 -0
  112. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/txt.py +0 -0
  113. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/utils/__init__.py +0 -0
  114. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/utils/models.py +0 -0
  115. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/video.py +0 -0
  116. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/vimeo.py +0 -0
  117. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/web.py +0 -0
  118. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/web_base.py +0 -0
  119. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/word.py +0 -0
  120. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/loaders/youtube.py +0 -0
  121. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/manager.py +0 -0
  122. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/models.py +0 -0
  123. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/py.typed +0 -0
  124. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/stores/__init__.py +0 -0
  125. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/stores/abstract.py +0 -0
  126. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/stores/milvus.py +0 -0
  127. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/stores/qdrant.py +0 -0
  128. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/tools/__init__.py +0 -0
  129. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/tools/abstract.py +0 -0
  130. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/tools/asknews.py +0 -0
  131. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/tools/bing.py +0 -0
  132. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/tools/duck.py +0 -0
  133. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/tools/google.py +0 -0
  134. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/tools/stack.py +0 -0
  135. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/tools/weather.py +0 -0
  136. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/tools/wikipedia.py +0 -0
  137. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/tools/zipcode.py +0 -0
  138. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/utils/__init__.py +0 -0
  139. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/utils/parsers/__init__.py +0 -0
  140. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/utils/parsers/toml.c +0 -0
  141. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/utils/parsers/toml.pyx +0 -0
  142. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/utils/toml.py +0 -0
  143. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/utils/types.cpp +0 -0
  144. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/utils/types.pyx +0 -0
  145. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/parrot/utils/uv.py +0 -0
  146. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/pytest.ini +0 -0
  147. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/requirements/requirements-dev.txt +0 -0
  148. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/resources/__init__.py +0 -0
  149. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/resources/quick.py +0 -0
  150. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/resources/users/__init__.py +0 -0
  151. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/resources/users/handlers.py +0 -0
  152. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/resources/users/models.py +0 -0
  153. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/run.py +0 -0
  154. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/settings/__init__.py +0 -0
  155. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/settings/settings.py +0 -0
  156. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/setup.cfg +0 -0
  157. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/templates/.compiled +0 -0
  158. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/templates/README.md +0 -0
  159. {ai_parrot-0.3.3 → ai_parrot-0.3.5}/tox.ini +0 -0
@@ -26,7 +26,7 @@ jobs:
26
26
 
27
27
  - name: Build wheels on Ubuntu
28
28
  if: matrix.os == 'ubuntu-latest'
29
- uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2014_x86_64
29
+ uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_x86_64
30
30
  with:
31
31
  python-versions: 'cp39-cp39 cp310-cp310 cp311-cp311 cp312-cp312'
32
32
  build-requirements: 'cython numpy'
@@ -175,3 +175,6 @@ docs/videos/*.mkv
175
175
  docs/videos/*.mp3
176
176
  docs/vimeo/*
177
177
  docs/youtube/*
178
+
179
+ # more documents:
180
+ documents/*
@@ -14,7 +14,7 @@ install:
14
14
  # QS requirements
15
15
  pip install --upgrade querysource[analytics]
16
16
  # and Parrot:
17
- # pip install -e .[google,milvus,analytics]
17
+ # pip install -e .[google,milvus,groq,analytics]
18
18
 
19
19
  develop:
20
20
  # Install Parrot
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ai-parrot
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Summary: Live Chatbots based on Langchain chatbots and Agents Integrated into Navigator Framework or used into aiohttp applications.
5
5
  Home-page: https://github.com/phenobarbital/ai-parrot
6
6
  Author: Jesus Lara
@@ -26,7 +26,7 @@ Classifier: Programming Language :: Python :: 3.11
26
26
  Classifier: Programming Language :: Python :: 3.12
27
27
  Classifier: Programming Language :: Python :: 3 :: Only
28
28
  Classifier: Framework :: AsyncIO
29
- Requires-Python: >=3.10.12
29
+ Requires-Python: >=3.9.20
30
30
  Description-Content-Type: text/markdown
31
31
  License-File: LICENSE
32
32
  Requires-Dist: Cython==3.0.11
@@ -58,11 +58,8 @@ Requires-Dist: sentence-transformers==3.0.1
58
58
  Requires-Dist: tabulate==0.9.0
59
59
  Requires-Dist: tiktoken==0.7.0
60
60
  Requires-Dist: tokenizers==0.19.1
61
- Requires-Dist: unstructured==0.14.3
62
- Requires-Dist: unstructured-client==0.18.0
63
- Requires-Dist: youtube-transcript-api==0.6.2
64
- Requires-Dist: selenium==4.18.1
65
- Requires-Dist: webdriver_manager==4.0.1
61
+ Requires-Dist: selenium>=4.18.1
62
+ Requires-Dist: webdriver_manager>=4.0.1
66
63
  Requires-Dist: transitions==0.9.0
67
64
  Requires-Dist: sentencepiece==0.2.0
68
65
  Requires-Dist: duckduckgo-search==5.3.0
@@ -79,9 +76,12 @@ Requires-Dist: mediawikiapi==1.2
79
76
  Requires-Dist: pyowm==3.3.0
80
77
  Requires-Dist: O365==2.0.35
81
78
  Requires-Dist: stackapi==0.3.1
82
- Requires-Dist: timm==1.0.9
83
79
  Requires-Dist: torchvision==0.19.1
80
+ Requires-Dist: tf-keras==2.17.0
84
81
  Provides-Extra: loaders
82
+ Requires-Dist: unstructured==0.14.3; extra == "loaders"
83
+ Requires-Dist: unstructured-client==0.18.0; extra == "loaders"
84
+ Requires-Dist: youtube-transcript-api==0.6.2; extra == "loaders"
85
85
  Requires-Dist: pymupdf==1.24.4; extra == "loaders"
86
86
  Requires-Dist: pymupdf4llm==0.0.1; extra == "loaders"
87
87
  Requires-Dist: pdf4llm==0.0.6; extra == "loaders"
@@ -107,7 +107,7 @@ Requires-Dist: paddleocr==2.8.1; extra == "loaders"
107
107
  Requires-Dist: ftfy==6.2.3; extra == "loaders"
108
108
  Requires-Dist: librosa==0.10.1; extra == "loaders"
109
109
  Requires-Dist: XlsxWriter==3.2.0; extra == "loaders"
110
- Requires-Dist: xformers==0.0.27.post2; extra == "loaders"
110
+ Requires-Dist: timm==1.0.9; extra == "loaders"
111
111
  Provides-Extra: anthropic
112
112
  Requires-Dist: langchain-anthropic==0.1.11; extra == "anthropic"
113
113
  Requires-Dist: anthropic==0.25.2; extra == "anthropic"
@@ -142,24 +142,6 @@ Requires-Dist: gradio-client==0.2.9; extra == "analytics"
142
142
  Requires-Dist: streamlit==1.37.1; extra == "analytics"
143
143
  Requires-Dist: simsimd==4.3.1; extra == "analytics"
144
144
  Requires-Dist: opencv-python==4.10.0.84; extra == "analytics"
145
- Provides-Extra: all
146
- Requires-Dist: langchain-milvus==0.1.1; extra == "all"
147
- Requires-Dist: milvus==2.3.5; extra == "all"
148
- Requires-Dist: pymilvus==2.4.4; extra == "all"
149
- Requires-Dist: groq==0.11.0; extra == "all"
150
- Requires-Dist: langchain-groq==0.1.4; extra == "all"
151
- Requires-Dist: llama-index-llms-huggingface==0.2.7; extra == "all"
152
- Requires-Dist: langchain-google-vertexai==1.0.8; extra == "all"
153
- Requires-Dist: langchain-google-genai==1.0.8; extra == "all"
154
- Requires-Dist: google-generativeai==0.7.2; extra == "all"
155
- Requires-Dist: vertexai==1.60.0; extra == "all"
156
- Requires-Dist: google-cloud-aiplatform>=1.60.0; extra == "all"
157
- Requires-Dist: grpc-google-iam-v1==0.13.0; extra == "all"
158
- Requires-Dist: langchain-openai==0.1.21; extra == "all"
159
- Requires-Dist: openai==1.40.8; extra == "all"
160
- Requires-Dist: llama-index-llms-openai==0.1.11; extra == "all"
161
- Requires-Dist: langchain-anthropic==0.1.23; extra == "all"
162
- Requires-Dist: anthropic==0.34.0; extra == "all"
163
145
 
164
146
  # AI Parrot: Python package for creating Chatbots
165
147
  This is an open-source Python package for creating Chatbots based on Langchain and Navigator.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ai-parrot
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Summary: Live Chatbots based on Langchain chatbots and Agents Integrated into Navigator Framework or used into aiohttp applications.
5
5
  Home-page: https://github.com/phenobarbital/ai-parrot
6
6
  Author: Jesus Lara
@@ -26,7 +26,7 @@ Classifier: Programming Language :: Python :: 3.11
26
26
  Classifier: Programming Language :: Python :: 3.12
27
27
  Classifier: Programming Language :: Python :: 3 :: Only
28
28
  Classifier: Framework :: AsyncIO
29
- Requires-Python: >=3.10.12
29
+ Requires-Python: >=3.9.20
30
30
  Description-Content-Type: text/markdown
31
31
  License-File: LICENSE
32
32
  Requires-Dist: Cython==3.0.11
@@ -58,11 +58,8 @@ Requires-Dist: sentence-transformers==3.0.1
58
58
  Requires-Dist: tabulate==0.9.0
59
59
  Requires-Dist: tiktoken==0.7.0
60
60
  Requires-Dist: tokenizers==0.19.1
61
- Requires-Dist: unstructured==0.14.3
62
- Requires-Dist: unstructured-client==0.18.0
63
- Requires-Dist: youtube-transcript-api==0.6.2
64
- Requires-Dist: selenium==4.18.1
65
- Requires-Dist: webdriver_manager==4.0.1
61
+ Requires-Dist: selenium>=4.18.1
62
+ Requires-Dist: webdriver_manager>=4.0.1
66
63
  Requires-Dist: transitions==0.9.0
67
64
  Requires-Dist: sentencepiece==0.2.0
68
65
  Requires-Dist: duckduckgo-search==5.3.0
@@ -79,9 +76,12 @@ Requires-Dist: mediawikiapi==1.2
79
76
  Requires-Dist: pyowm==3.3.0
80
77
  Requires-Dist: O365==2.0.35
81
78
  Requires-Dist: stackapi==0.3.1
82
- Requires-Dist: timm==1.0.9
83
79
  Requires-Dist: torchvision==0.19.1
80
+ Requires-Dist: tf-keras==2.17.0
84
81
  Provides-Extra: loaders
82
+ Requires-Dist: unstructured==0.14.3; extra == "loaders"
83
+ Requires-Dist: unstructured-client==0.18.0; extra == "loaders"
84
+ Requires-Dist: youtube-transcript-api==0.6.2; extra == "loaders"
85
85
  Requires-Dist: pymupdf==1.24.4; extra == "loaders"
86
86
  Requires-Dist: pymupdf4llm==0.0.1; extra == "loaders"
87
87
  Requires-Dist: pdf4llm==0.0.6; extra == "loaders"
@@ -107,7 +107,7 @@ Requires-Dist: paddleocr==2.8.1; extra == "loaders"
107
107
  Requires-Dist: ftfy==6.2.3; extra == "loaders"
108
108
  Requires-Dist: librosa==0.10.1; extra == "loaders"
109
109
  Requires-Dist: XlsxWriter==3.2.0; extra == "loaders"
110
- Requires-Dist: xformers==0.0.27.post2; extra == "loaders"
110
+ Requires-Dist: timm==1.0.9; extra == "loaders"
111
111
  Provides-Extra: anthropic
112
112
  Requires-Dist: langchain-anthropic==0.1.11; extra == "anthropic"
113
113
  Requires-Dist: anthropic==0.25.2; extra == "anthropic"
@@ -142,24 +142,6 @@ Requires-Dist: gradio-client==0.2.9; extra == "analytics"
142
142
  Requires-Dist: streamlit==1.37.1; extra == "analytics"
143
143
  Requires-Dist: simsimd==4.3.1; extra == "analytics"
144
144
  Requires-Dist: opencv-python==4.10.0.84; extra == "analytics"
145
- Provides-Extra: all
146
- Requires-Dist: langchain-milvus==0.1.1; extra == "all"
147
- Requires-Dist: milvus==2.3.5; extra == "all"
148
- Requires-Dist: pymilvus==2.4.4; extra == "all"
149
- Requires-Dist: groq==0.11.0; extra == "all"
150
- Requires-Dist: langchain-groq==0.1.4; extra == "all"
151
- Requires-Dist: llama-index-llms-huggingface==0.2.7; extra == "all"
152
- Requires-Dist: langchain-google-vertexai==1.0.8; extra == "all"
153
- Requires-Dist: langchain-google-genai==1.0.8; extra == "all"
154
- Requires-Dist: google-generativeai==0.7.2; extra == "all"
155
- Requires-Dist: vertexai==1.60.0; extra == "all"
156
- Requires-Dist: google-cloud-aiplatform>=1.60.0; extra == "all"
157
- Requires-Dist: grpc-google-iam-v1==0.13.0; extra == "all"
158
- Requires-Dist: langchain-openai==0.1.21; extra == "all"
159
- Requires-Dist: openai==1.40.8; extra == "all"
160
- Requires-Dist: llama-index-llms-openai==0.1.11; extra == "all"
161
- Requires-Dist: langchain-anthropic==0.1.23; extra == "all"
162
- Requires-Dist: anthropic==0.34.0; extra == "all"
163
145
 
164
146
  # AI Parrot: Python package for creating Chatbots
165
147
  This is an open-source Python package for creating Chatbots based on Langchain and Navigator.
@@ -22,7 +22,12 @@ ai_parrot.egg-info/SOURCES.txt
22
22
  ai_parrot.egg-info/dependency_links.txt
23
23
  ai_parrot.egg-info/requires.txt
24
24
  ai_parrot.egg-info/top_level.txt
25
+ documents/AR_Certification_Skill_Practice_Scorecard_EXAMPLE.pdf
26
+ documents/Day 1_Essentials_AR_PPT.pdf
25
27
  documents/ex-code-loaders.txt
28
+ documents/video_2024-09-11_19-43-58.mp3
29
+ documents/video_2024-09-11_19-43-58.mp4
30
+ documents/video_2024-09-11_19-43-58.vtt
26
31
  etc/navigator-ssl.ini
27
32
  etc/navigator.ini
28
33
  etc/ssl/domain.ext
@@ -32,7 +37,9 @@ etc/ssl/navigator.local.key
32
37
  etc/ssl/rootCA.crt
33
38
  etc/ssl/rootCA.key
34
39
  etc/ssl/rootCA.srl
40
+ examples/analyze_video.py
35
41
  examples/check_bot.py
42
+ examples/extract_frames.py
36
43
  examples/load_pdf.py
37
44
  examples/test_bot.py
38
45
  examples/test_question.py
@@ -27,11 +27,8 @@ sentence-transformers==3.0.1
27
27
  tabulate==0.9.0
28
28
  tiktoken==0.7.0
29
29
  tokenizers==0.19.1
30
- unstructured==0.14.3
31
- unstructured-client==0.18.0
32
- youtube-transcript-api==0.6.2
33
- selenium==4.18.1
34
- webdriver_manager==4.0.1
30
+ selenium>=4.18.1
31
+ webdriver_manager>=4.0.1
35
32
  transitions==0.9.0
36
33
  sentencepiece==0.2.0
37
34
  duckduckgo-search==5.3.0
@@ -48,27 +45,8 @@ mediawikiapi==1.2
48
45
  pyowm==3.3.0
49
46
  O365==2.0.35
50
47
  stackapi==0.3.1
51
- timm==1.0.9
52
48
  torchvision==0.19.1
53
-
54
- [all]
55
- langchain-milvus==0.1.1
56
- milvus==2.3.5
57
- pymilvus==2.4.4
58
- groq==0.11.0
59
- langchain-groq==0.1.4
60
- llama-index-llms-huggingface==0.2.7
61
- langchain-google-vertexai==1.0.8
62
- langchain-google-genai==1.0.8
63
- google-generativeai==0.7.2
64
- vertexai==1.60.0
65
- google-cloud-aiplatform>=1.60.0
66
- grpc-google-iam-v1==0.13.0
67
- langchain-openai==0.1.21
68
- openai==1.40.8
69
- llama-index-llms-openai==0.1.11
70
- langchain-anthropic==0.1.23
71
- anthropic==0.34.0
49
+ tf-keras==2.17.0
72
50
 
73
51
  [analytics]
74
52
  annoy==1.17.3
@@ -100,6 +78,9 @@ langchain-groq==0.1.9
100
78
  llama-index-llms-huggingface==0.2.7
101
79
 
102
80
  [loaders]
81
+ unstructured==0.14.3
82
+ unstructured-client==0.18.0
83
+ youtube-transcript-api==0.6.2
103
84
  pymupdf==1.24.4
104
85
  pymupdf4llm==0.0.1
105
86
  pdf4llm==0.0.6
@@ -125,7 +106,7 @@ paddleocr==2.8.1
125
106
  ftfy==6.2.3
126
107
  librosa==0.10.1
127
108
  XlsxWriter==3.2.0
128
- xformers==0.0.27.post2
109
+ timm==1.0.9
129
110
 
130
111
  [milvus]
131
112
  langchain-milvus>=0.1.4
@@ -3,7 +3,7 @@ from navigator.handlers.types import AppHandler
3
3
  from navigator.background import BackgroundQueue
4
4
  from navigator_auth import AuthHandler
5
5
  from parrot.manager import ChatbotManager
6
- from parrot.loaders.handlers import DataManagement
6
+ # from parrot.loaders.handlers import DataManagement
7
7
  from parrot.conf import STATIC_DIR
8
8
  from parrot.handlers.bots import (
9
9
  FeedbackTypeHandler,
@@ -53,7 +53,7 @@ class Main(AppHandler):
53
53
  ChatbotSharingQuestion
54
54
  )
55
55
  # Management APIs:
56
- DataManagement.configure(self.app)
56
+ # DataManagement.configure(self.app)
57
57
 
58
58
 
59
59
  async def on_prepare(self, request, response):
@@ -0,0 +1,122 @@
1
+ WEBVTT
2
+
3
+ 1
4
+ 00:00:00.000 --> 00:00:09.320
5
+ Hi, I'm Kyle from the Bose Support Team and here on the Bose Team we take merchandising
6
+
7
+ 2
8
+ 00:00:09.320 --> 00:00:11.980
9
+ and brand advocacy to the next level.
10
+
11
+ 3
12
+ 00:00:11.980 --> 00:00:16.540
13
+ Every visit starts with arriving at the location and greeting an associate by the door.
14
+
15
+ 4
16
+ 00:00:16.540 --> 00:00:19.000
17
+ We move on to the merchandising side of our visit.
18
+
19
+ 5
20
+ 00:00:19.000 --> 00:00:23.440
21
+ We head over to our displays to ensure that they're clean, bright and fully stocked.
22
+
23
+ 6
24
+ 00:00:23.440 --> 00:00:27.800
25
+ After our displays are clean and bright, we need to test the functionality of these displays. We go
26
+
27
+ 7
28
+ 00:00:27.800 --> 00:00:32.220
29
+ through each display ensuring that the demo is functioning as intended. If we
30
+
31
+ 8
32
+ 00:00:32.220 --> 00:00:36.980
33
+ run into an issue, we troubleshoot immediately or we call the call center
34
+
35
+ 9
36
+ 00:00:36.980 --> 00:00:41.300
37
+ for further support. This can be more in-depth troubleshooting or to order
38
+
39
+ 10
40
+ 00:00:41.300 --> 00:00:48.400
41
+ parts that are needed to resolve the issue. These can be simple fixes such as installing a wire, swapping out the media card, or even
42
+
43
+ 11
44
+ 00:00:48.400 --> 00:00:49.400
45
+ a simple reboot.
46
+
47
+ 12
48
+ 00:00:49.400 --> 00:00:54.680
49
+ They can even be more advanced fixes such as entire display rewiring, product changeouts,
50
+
51
+ 13
52
+ 00:00:54.680 --> 00:00:56.920
53
+ or even full display changeouts.
54
+
55
+ 14
56
+ 00:00:56.920 --> 00:01:01.280
57
+ Our ticketing system houses all of our tickets and allows for reps to properly manage their
58
+
59
+ 15
60
+ 00:01:01.280 --> 00:01:05.000
61
+ market opening, closing tickets, as well as seeing when parts
62
+
63
+ 16
64
+ 00:01:05.000 --> 00:01:06.280
65
+ have been shipped or delivered.
66
+
67
+ 17
68
+ 00:01:06.280 --> 00:01:11.120
69
+ Once we've ensured that our displays are properly merchandised and fully functional,
70
+
71
+ 18
72
+ 00:01:11.120 --> 00:01:13.720
73
+ we move on to the brand advocacy part of our visit.
74
+
75
+ 19
76
+ 00:01:13.720 --> 00:01:17.520
77
+ We do this in several ways, by building the relationship with the store, this is done
78
+
79
+ 20
80
+ 00:01:17.520 --> 00:01:22.700
81
+ through routine check-ins with staff and management in the field to cover common concerns or questions
82
+
83
+ 21
84
+ 00:01:22.700 --> 00:01:28.160
85
+ that they may have, as well as fill them in on some new features that they may not know. Training events such
86
+
87
+ 22
88
+ 00:01:28.160 --> 00:01:32.880
89
+ as morning huddles, lunch and learns, or customized training experiences where we
90
+
91
+ 23
92
+ 00:01:32.880 --> 00:01:37.360
93
+ can take a full team sit down and do some in-depth training. Demoing new
94
+
95
+ 24
96
+ 00:01:37.360 --> 00:01:42.040
97
+ products letting both customers and store associates get a hands-on
98
+
99
+ 25
100
+ 00:01:42.040 --> 00:01:49.640
101
+ experience with a newly released product and promotion events where we have an opportunity to get on the front line and practice what we preach,
102
+
103
+ 26
104
+ 00:01:49.640 --> 00:01:51.780
105
+ demoing and selling them ourselves.
106
+
107
+ 27
108
+ 00:01:51.780 --> 00:01:57.680
109
+ Through these regular engagements, we are able to impact not just a store but an entire
110
+
111
+ 28
112
+ 00:01:57.680 --> 00:01:58.680
113
+ market.
114
+
115
+ 29
116
+ 00:01:58.680 --> 00:02:02.960
117
+ This is just a quick walkthrough of what we do day to day here at the Bose program.
118
+
119
+ 30
120
+ 00:02:02.960 --> 00:02:05.000
121
+ I hope you enjoyed and have a great day.
122
+
@@ -0,0 +1,33 @@
1
+ import asyncio
2
+ from navconfig import BASE_DIR
3
+ from parrot.llms.vertex import VertexLLM
4
+ from parrot.loaders.videolocal import (
5
+ VideoLocalLoader
6
+ )
7
+
8
+
9
+
10
+
11
+ async def process_video(doc):
12
+ llm = VertexLLM(
13
+ model='gemini-1.5-pro',
14
+ temperature=0.1,
15
+ top_k=30,
16
+ Top_p=0.5,
17
+ )
18
+ print(':: Processing: ', doc)
19
+ loader = VideoLocalLoader(
20
+ doc,
21
+ source_type=f"Video {doc.name}",
22
+ llm=llm.get_llm(),
23
+ language="en"
24
+ )
25
+ docs = loader.extract()
26
+ print('DOCS > ', docs)
27
+
28
+
29
+ if __name__ == '__main__':
30
+ doc = BASE_DIR.joinpath('documents', 'video_2024-09-11_19-43-58.mp4')
31
+ asyncio.run(
32
+ process_video(doc)
33
+ )
@@ -0,0 +1,74 @@
1
+ import os
2
+ from pathlib import PurePath, Path
3
+ import cv2
4
+ from navconfig import BASE_DIR
5
+
6
+ def better_resolution(image_path, output_path):
7
+ # Load the super-resolution model (example using the EDSR model)
8
+ sr = cv2.dnn_superres.DnnSuperResImpl_create()
9
+
10
+ # Read the model
11
+ path_to_model = 'EDSR_x4.pb'
12
+ # You need to download this model from OpenCV's model zoo
13
+ sr.readModel(path_to_model)
14
+
15
+ # Set the model and scale
16
+ sr.setModel("edsr", 4) # EDSR model with 4x upscaling
17
+
18
+ # Read the input image
19
+ image = cv2.imread(image_path)
20
+
21
+ # Upscale the image
22
+ upscaled_image = sr.upsample(image)
23
+
24
+ # Save the result
25
+ cv2.imwrite(output_path, upscaled_image)
26
+ print(f"Saved super-resolution image: {output_path}")
27
+
28
+ def extract_frames(
29
+ video_path,
30
+ output_dir: PurePath,
31
+ interval=5,
32
+ upscale_factor=2
33
+ ):
34
+ if not output_dir.exists():
35
+ output_dir.mkdir(mode=0o777, parents=True, exist_ok=True)
36
+
37
+ cap = cv2.VideoCapture(str(video_path))
38
+
39
+ # Get frames per second (fps) of the video
40
+ fps = cap.get(cv2.CAP_PROP_FPS)
41
+ frame_interval = int(fps * interval)
42
+
43
+ frame_count = 0
44
+ success, frame = cap.read()
45
+
46
+ while success:
47
+ if frame_count % frame_interval == 0:
48
+ # Get the original dimensions
49
+ height, width = frame.shape[:2]
50
+ # Upscale the frame by the given factor
51
+ frame_upscaled = cv2.resize(
52
+ frame,
53
+ (width * upscale_factor, height * upscale_factor),
54
+ interpolation=cv2.INTER_CUBIC
55
+ )
56
+ frame_name = f"frame_{frame_count}.jpg"
57
+ upscaled_name = f"frame_{frame_count}_upscaled.jpg"
58
+ frame_path = os.path.join(output_dir, frame_name)
59
+ upscaled_path = output_dir.joinpath(upscaled_name)
60
+ cv2.imwrite(frame_path, frame_upscaled)
61
+ # better_resolution(frame_path, upscaled_path)
62
+ print(f"Extracted {frame_name}")
63
+
64
+ frame_count += 1
65
+ success, frame = cap.read()
66
+
67
+ cap.release()
68
+ print("Finished extracting frames.")
69
+
70
+ # Usage
71
+ if __name__ == '__main__':
72
+ video_file = BASE_DIR.joinpath('documents', 'video_2024-09-11_19-43-58.mp4')
73
+ output_folder = BASE_DIR.joinpath('documents', 'extracted_frames')
74
+ extract_frames(video_file, output_folder, interval=2)
@@ -7,13 +7,14 @@ from parrot.loaders import (
7
7
 
8
8
  async def process_pdf():
9
9
  llm = VertexLLM(
10
- model='gemini-1.5-flash-001',
10
+ model='gemini-1.5-pro',
11
11
  temperature=0.1,
12
12
  top_k=30,
13
13
  Top_p=0.5,
14
14
  )
15
15
  # Add LLM
16
- doc = BASE_DIR.joinpath('documents', 'AR_Certification_Skill_Practice_Scorecard_EXAMPLE.pdf')
16
+ # doc = BASE_DIR.joinpath('documents', 'AR_Certification_Skill_Practice_Scorecard_EXAMPLE.pdf')
17
+ doc = BASE_DIR.joinpath('documents', 'Day 1_Essentials_AR_PPT.pdf')
17
18
  print(':: Processing: ', doc)
18
19
  # PDF Files
19
20
  loader = PDFLoader(
@@ -13,7 +13,7 @@ async def get_agent():
13
13
  # Top_p=0.6,
14
14
  # )
15
15
  llm = VertexLLM(
16
- model='gemini-1.5-flash-001',
16
+ model='gemini-1.5-pro',
17
17
  temperature=0.1,
18
18
  top_k=30,
19
19
  Top_p=0.5,
@@ -49,14 +49,6 @@ class PDFLoader(BasePDF):
49
49
  self.parse_images = kwargs.get('parse_images', False)
50
50
  self.page_as_images = kwargs.get('page_as_images', False)
51
51
  if self.page_as_images is True:
52
- # # Load the processor and model from Hugging Face
53
- # self.image_processor = DonutProcessor.from_pretrained(
54
- # "naver-clova-ix/donut-base-finetuned-docvqa"
55
- # )
56
- # self.image_model = VisionEncoderDecoderModel.from_pretrained(
57
- # "naver-clova-ix/donut-base-finetuned-docvqa",
58
-
59
- # )
60
52
  # Load the processor and model from Hugging Face
61
53
  self.image_processor = LayoutLMv3Processor.from_pretrained(
62
54
  "microsoft/layoutlmv3-base",
@@ -388,6 +380,7 @@ class PDFLoader(BasePDF):
388
380
  # TODO passing the image to a AI visual to get explanation
389
381
  # Get the extracted text from the image
390
382
  text = self.extract_page_text(img_path)
383
+ print('TEXT EXTRACTED >> ', text)
391
384
  url = f'/static/images/{img_name}'
392
385
  image_meta = {
393
386
  "url": url,
@@ -26,14 +26,15 @@ class VideoLocalLoader(BaseVideoLoader):
26
26
 
27
27
  def load_video(self, path: PurePath) -> list:
28
28
  metadata = {
29
- "source": f"{path}",
30
29
  "url": f"{path.name}",
31
- "index": path.stem,
30
+ "source": f"{path}",
32
31
  "filename": f"{path}",
32
+ "index": path.stem,
33
33
  "question": '',
34
34
  "answer": '',
35
35
  'type': 'video_transcript',
36
36
  "source_type": self._source_type,
37
+ "data": {},
37
38
  "summary": '',
38
39
  "document_meta": {
39
40
  "language": self._language,
@@ -3,7 +3,7 @@
3
3
  __title__ = "ai-parrot"
4
4
  __description__ = "Live Chatbots based on Langchain chatbots and Agents \
5
5
  Integrated into Navigator Framework or used into aiohttp applications."
6
- __version__ = "0.3.3"
6
+ __version__ = "0.3.5"
7
7
  __author__ = "Jesus Lara"
8
8
  __author_email__ = "jesuslarag@gmail.com"
9
9
  __license__ = "MIT"
@@ -34,7 +34,7 @@ classifiers=[
34
34
  "License :: OSI Approved :: MIT License",
35
35
  ]
36
36
  description-file = "README.md"
37
- requires-python = ">=3.10.12"
37
+ requires-python = ">=3.9.20"
38
38
 
39
39
  [tool.pytest.ini_options]
40
40
  addopts = [