symbolicai 0.17.6__tar.gz → 0.18.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (265) hide show
  1. {symbolicai-0.17.6 → symbolicai-0.18.1}/PKG-INFO +1 -1
  2. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/neurosymbolic_engine.md +10 -0
  3. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/__init__.py +1 -1
  4. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/neurosymbolic/__init__.py +4 -0
  5. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/neurosymbolic/engine_groq.py +20 -27
  6. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/search/engine_openai.py +100 -34
  7. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/mixin/__init__.py +2 -0
  8. symbolicai-0.18.1/symai/backend/mixin/groq.py +10 -0
  9. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/components.py +18 -11
  10. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/misc/console.py +5 -5
  11. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/shellsv.py +1 -0
  12. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/utils.py +5 -5
  13. {symbolicai-0.17.6 → symbolicai-0.18.1}/symbolicai.egg-info/PKG-INFO +1 -1
  14. {symbolicai-0.17.6 → symbolicai-0.18.1}/symbolicai.egg-info/SOURCES.txt +1 -0
  15. symbolicai-0.18.1/tests/engines/search/openai_engine.py +58 -0
  16. symbolicai-0.17.6/tests/engines/search/openai_engine.py +0 -119
  17. {symbolicai-0.17.6 → symbolicai-0.18.1}/.gitbook.yaml +0 -0
  18. {symbolicai-0.17.6 → symbolicai-0.18.1}/.github/FUNDING.yml +0 -0
  19. {symbolicai-0.17.6 → symbolicai-0.18.1}/.gitignore +0 -0
  20. {symbolicai-0.17.6 → symbolicai-0.18.1}/.symai/symsh.config.json +0 -0
  21. {symbolicai-0.17.6 → symbolicai-0.18.1}/CITATION.cff +0 -0
  22. {symbolicai-0.17.6 → symbolicai-0.18.1}/Dockerfile +0 -0
  23. {symbolicai-0.17.6 → symbolicai-0.18.1}/MANIFEST.in +0 -0
  24. {symbolicai-0.17.6 → symbolicai-0.18.1}/README.md +0 -0
  25. {symbolicai-0.17.6 → symbolicai-0.18.1}/app.py +0 -0
  26. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/banner.png +0 -0
  27. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/cat.jpg +0 -0
  28. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/cat.png +0 -0
  29. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/contract_flow.png +0 -0
  30. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/img1.png +0 -0
  31. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/img10.png +0 -0
  32. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/img2.png +0 -0
  33. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/img3.png +0 -0
  34. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/img4.png +0 -0
  35. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/img5.png +0 -0
  36. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/img6.png +0 -0
  37. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/img7.png +0 -0
  38. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/img8.png +0 -0
  39. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/img9.png +0 -0
  40. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/preview.gif +0 -0
  41. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/screen1.jpeg +0 -0
  42. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/symai_logo.png +0 -0
  43. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/symsh.png +0 -0
  44. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/vid1.png +0 -0
  45. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/vid2.png +0 -0
  46. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/vid3.png +0 -0
  47. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/vid4.png +0 -0
  48. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/vid5.png +0 -0
  49. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/images/vid6.png +0 -0
  50. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/results/news.html +0 -0
  51. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/results/news.png +0 -0
  52. {symbolicai-0.17.6 → symbolicai-0.18.1}/assets/results/news_prev.png +0 -0
  53. {symbolicai-0.17.6 → symbolicai-0.18.1}/bin/install.ps1 +0 -0
  54. {symbolicai-0.17.6 → symbolicai-0.18.1}/bin/install.sh +0 -0
  55. {symbolicai-0.17.6 → symbolicai-0.18.1}/build.py +0 -0
  56. {symbolicai-0.17.6 → symbolicai-0.18.1}/docker-compose.yml +0 -0
  57. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/clip_engine.md +0 -0
  58. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/custom_engine.md +0 -0
  59. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/drawing_engine.md +0 -0
  60. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/file_engine.md +0 -0
  61. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/indexing_engine.md +0 -0
  62. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/local_engine.md +0 -0
  63. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/ocr_engine.md +0 -0
  64. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/search_engine.md +0 -0
  65. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/speech_to_text_engine.md +0 -0
  66. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/symbolic_engine.md +0 -0
  67. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/ENGINES/webscraping_engine.md +0 -0
  68. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/FEATURES/contracts.md +0 -0
  69. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/FEATURES/error_handling.md +0 -0
  70. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/FEATURES/expressions.md +0 -0
  71. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/FEATURES/import.md +0 -0
  72. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/FEATURES/operations.md +0 -0
  73. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/FEATURES/primitives.md +0 -0
  74. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/INSTALLATION.md +0 -0
  75. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/INTRODUCTION.md +0 -0
  76. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/LICENSE +0 -0
  77. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/QUICKSTART.md +0 -0
  78. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/SUMMARY.md +0 -0
  79. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/TOOLS/chatbot.md +0 -0
  80. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/TOOLS/packages.md +0 -0
  81. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/TOOLS/shell.md +0 -0
  82. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/TUTORIALS/chatbot.md +0 -0
  83. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/TUTORIALS/context.md +0 -0
  84. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/TUTORIALS/data_query.md +0 -0
  85. {symbolicai-0.17.6 → symbolicai-0.18.1}/docs/source/TUTORIALS/video_tutorials.md +0 -0
  86. {symbolicai-0.17.6 → symbolicai-0.18.1}/environment.yml +0 -0
  87. {symbolicai-0.17.6 → symbolicai-0.18.1}/examples/contracts.ipynb +0 -0
  88. {symbolicai-0.17.6 → symbolicai-0.18.1}/examples/primitives.ipynb +0 -0
  89. {symbolicai-0.17.6 → symbolicai-0.18.1}/icon_converter.py +0 -0
  90. {symbolicai-0.17.6 → symbolicai-0.18.1}/installer.py +0 -0
  91. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/Basics.ipynb +0 -0
  92. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/ChatBot.ipynb +0 -0
  93. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/Conversation.ipynb +0 -0
  94. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/Indexer.ipynb +0 -0
  95. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/News.ipynb +0 -0
  96. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/Queries.ipynb +0 -0
  97. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/TTS_Persona.ipynb +0 -0
  98. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/Lean engine.png +0 -0
  99. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/a_star.txt +0 -0
  100. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/abstract.py +0 -0
  101. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/audio.mp3 +0 -0
  102. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/dbpedia_samples.jsonl +0 -0
  103. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/dbpedia_samples_prepared_train.jsonl +0 -0
  104. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/dbpedia_samples_prepared_valid.jsonl +0 -0
  105. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/demo.py +0 -0
  106. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/demo_strategy.py +0 -0
  107. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/docs.py +0 -0
  108. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/einsteins_puzzle.txt +0 -0
  109. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/file.json +0 -0
  110. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/lean.py +0 -0
  111. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/news.py +0 -0
  112. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/paper.pdf +0 -0
  113. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/paper.py +0 -0
  114. {symbolicai-0.17.6 → symbolicai-0.18.1}/legacy/notebooks/examples/sql.py +0 -0
  115. {symbolicai-0.17.6 → symbolicai-0.18.1}/public/eai.svg +0 -0
  116. {symbolicai-0.17.6 → symbolicai-0.18.1}/pyproject.toml +0 -0
  117. {symbolicai-0.17.6 → symbolicai-0.18.1}/pytest.ini +0 -0
  118. {symbolicai-0.17.6 → symbolicai-0.18.1}/setup.cfg +0 -0
  119. {symbolicai-0.17.6 → symbolicai-0.18.1}/setup.py +0 -0
  120. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/TERMS_OF_SERVICE.md +0 -0
  121. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/__init__.py +0 -0
  122. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/base.py +0 -0
  123. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/driver/webclient.py +0 -0
  124. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/__init__.py +0 -0
  125. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/drawing/engine_bfl.py +0 -0
  126. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/drawing/engine_gpt_image.py +0 -0
  127. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/embedding/engine_llama_cpp.py +0 -0
  128. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/embedding/engine_openai.py +0 -0
  129. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/embedding/engine_plugin_embeddings.py +0 -0
  130. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/execute/engine_python.py +0 -0
  131. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/files/engine_io.py +0 -0
  132. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/imagecaptioning/engine_blip2.py +0 -0
  133. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/imagecaptioning/engine_llavacpp_client.py +0 -0
  134. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/index/engine_pinecone.py +0 -0
  135. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/index/engine_vectordb.py +0 -0
  136. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/lean/engine_lean4.py +0 -0
  137. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py +0 -0
  138. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py +0 -0
  139. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py +0 -0
  140. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py +0 -0
  141. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/neurosymbolic/engine_huggingface.py +0 -0
  142. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/neurosymbolic/engine_llama_cpp.py +0 -0
  143. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py +0 -0
  144. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py +0 -0
  145. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/ocr/engine_apilayer.py +0 -0
  146. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/output/engine_stdout.py +0 -0
  147. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/search/engine_perplexity.py +0 -0
  148. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/search/engine_serpapi.py +0 -0
  149. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/speech_to_text/engine_local_whisper.py +0 -0
  150. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/symbolic/engine_wolframalpha.py +0 -0
  151. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/text_to_speech/engine_openai.py +0 -0
  152. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/text_vision/engine_clip.py +0 -0
  153. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/userinput/engine_console.py +0 -0
  154. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/engines/webscraping/engine_requests.py +0 -0
  155. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/mixin/anthropic.py +0 -0
  156. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/mixin/deepseek.py +0 -0
  157. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/mixin/google.py +0 -0
  158. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/mixin/openai.py +0 -0
  159. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/backend/settings.py +0 -0
  160. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/chat.py +0 -0
  161. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/collect/__init__.py +0 -0
  162. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/collect/dynamic.py +0 -0
  163. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/collect/pipeline.py +0 -0
  164. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/collect/stats.py +0 -0
  165. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/constraints.py +0 -0
  166. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/core.py +0 -0
  167. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/core_ext.py +0 -0
  168. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/endpoints/__init__py +0 -0
  169. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/endpoints/api.py +0 -0
  170. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/exceptions.py +0 -0
  171. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/__init__.py +0 -0
  172. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/api_builder.py +0 -0
  173. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/arxiv_pdf_parser.py +0 -0
  174. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/bibtex_parser.py +0 -0
  175. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/conversation.py +0 -0
  176. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/document.py +0 -0
  177. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/file_merger.py +0 -0
  178. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/graph.py +0 -0
  179. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/html_style_template.py +0 -0
  180. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/__init__.py +0 -0
  181. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/blip_2.py +0 -0
  182. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/clip.py +0 -0
  183. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/console.py +0 -0
  184. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/dall_e.py +0 -0
  185. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/file.py +0 -0
  186. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/flux.py +0 -0
  187. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/gpt_image.py +0 -0
  188. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/input.py +0 -0
  189. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/llava.py +0 -0
  190. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/naive_vectordb.py +0 -0
  191. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/naive_webscraping.py +0 -0
  192. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/ocr.py +0 -0
  193. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/openai_search.py +0 -0
  194. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/perplexity.py +0 -0
  195. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/pinecone.py +0 -0
  196. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/python.py +0 -0
  197. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/serpapi.py +0 -0
  198. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/terminal.py +0 -0
  199. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/tts.py +0 -0
  200. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/whisper.py +0 -0
  201. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/interfaces/wolframalpha.py +0 -0
  202. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/metrics/__init__.py +0 -0
  203. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/metrics/similarity.py +0 -0
  204. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/os_command.py +0 -0
  205. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/packages/__init__.py +0 -0
  206. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/packages/symdev.py +0 -0
  207. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/packages/sympkg.py +0 -0
  208. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/packages/symrun.py +0 -0
  209. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/personas/__init__.py +0 -0
  210. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/personas/builder.py +0 -0
  211. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/personas/dialogue.py +0 -0
  212. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/personas/persona.py +0 -0
  213. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/personas/research/__init__.py +0 -0
  214. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/personas/research/yann_lecun.py +0 -0
  215. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/personas/sales/__init__.py +0 -0
  216. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/personas/sales/erik_james.py +0 -0
  217. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/personas/student/__init__.py +0 -0
  218. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/personas/student/max_tenner.py +0 -0
  219. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/repo_cloner.py +0 -0
  220. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/seo_query_optimizer.py +0 -0
  221. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/solver.py +0 -0
  222. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/strategies/__init__.py +0 -0
  223. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/strategies/cot.py +0 -0
  224. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/summarizer.py +0 -0
  225. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/taypan_interpreter.py +0 -0
  226. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/extended/vectordb.py +0 -0
  227. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/formatter/__init__.py +0 -0
  228. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/formatter/emoji.pytxt +0 -0
  229. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/formatter/formatter.py +0 -0
  230. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/formatter/regex.py +0 -0
  231. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/functional.py +0 -0
  232. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/imports.py +0 -0
  233. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/interfaces.py +0 -0
  234. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/memory.py +0 -0
  235. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/menu/__init__.py +0 -0
  236. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/menu/screen.py +0 -0
  237. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/misc/__init__.py +0 -0
  238. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/misc/loader.py +0 -0
  239. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/models/__init__.py +0 -0
  240. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/models/base.py +0 -0
  241. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/models/errors.py +0 -0
  242. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/ops/__init__.py +0 -0
  243. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/ops/measures.py +0 -0
  244. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/ops/primitives.py +0 -0
  245. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/post_processors.py +0 -0
  246. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/pre_processors.py +0 -0
  247. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/processor.py +0 -0
  248. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/prompts.py +0 -0
  249. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/server/__init__.py +0 -0
  250. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/server/huggingface_server.py +0 -0
  251. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/server/llama_cpp_server.py +0 -0
  252. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/shell.py +0 -0
  253. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/strategy.py +0 -0
  254. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/symbol.py +0 -0
  255. {symbolicai-0.17.6 → symbolicai-0.18.1}/symai/symsh.md +0 -0
  256. {symbolicai-0.17.6 → symbolicai-0.18.1}/symbolicai.egg-info/dependency_links.txt +0 -0
  257. {symbolicai-0.17.6 → symbolicai-0.18.1}/symbolicai.egg-info/entry_points.txt +0 -0
  258. {symbolicai-0.17.6 → symbolicai-0.18.1}/symbolicai.egg-info/requires.txt +0 -0
  259. {symbolicai-0.17.6 → symbolicai-0.18.1}/symbolicai.egg-info/top_level.txt +0 -0
  260. {symbolicai-0.17.6 → symbolicai-0.18.1}/tests/README.md +0 -0
  261. {symbolicai-0.17.6 → symbolicai-0.18.1}/tests/data/audio.mp3 +0 -0
  262. {symbolicai-0.17.6 → symbolicai-0.18.1}/tests/data/pg1727.txt +0 -0
  263. {symbolicai-0.17.6 → symbolicai-0.18.1}/tests/engines/search/perplexity_engine.py +0 -0
  264. {symbolicai-0.17.6 → symbolicai-0.18.1}/trusted_repos.yml +0 -0
  265. {symbolicai-0.17.6 → symbolicai-0.18.1}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: symbolicai
3
- Version: 0.17.6
3
+ Version: 0.18.1
4
4
  Summary: A Neurosymbolic Perspective on Large Language Models
5
5
  Author-email: Marius-Constantin Dinu <marius@extensity.ai>, Leoveanu-Condrei Claudiu <leo@extensity.ai>
6
6
  Project-URL: Homepage, https://extensity.ai
@@ -197,6 +197,16 @@ data = json.loads(resp.value)
197
197
  # data == {"team":"Los Angeles Dodgers", "year":2020, "coach":"Dave Roberts"}
198
198
  ```
199
199
 
200
+ ### Groq JSON mode caveat
201
+
202
+ Groq currently has a quirk (arguably a bug) when combining JSON Object Mode with an explicit `tool_choice: "none"`. Their API may return:
203
+
204
+ ```
205
+ Error code: 400 - {'error': {'message': 'Tool choice is none, but model called a tool', 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '{"name": "<|constrain|>json" …'}}
206
+ ```
207
+
208
+ This happens because JSON Object Mode internally invokes a JSON “constrainer” tool (`<|constrain|>json`), which collides with `tool_choice: "none"`. Groq’s own docs state JSON modes can’t be mixed with tool use; here, the model implicitly “uses a tool” even when you didn’t ask. As of Aug. 22, 2025, this behavior is triggered regardless of whether you set `tool_choice: "auto"` or `tool_choice: "none"`. Enforcing not choosing that tool via prompting also doesn't work.
209
+
200
210
  ---
201
211
 
202
212
  ## Token Counting & Truncation
@@ -33,7 +33,7 @@ os.environ['TOKENIZERS_PARALLELISM'] = "false"
33
33
  # Create singleton instance
34
34
  config_manager = settings.SymAIConfig()
35
35
 
36
- SYMAI_VERSION = "0.17.6"
36
+ SYMAI_VERSION = "0.18.1"
37
37
  __version__ = SYMAI_VERSION
38
38
  __root_dir__ = config_manager.config_dir
39
39
 
@@ -1,11 +1,13 @@
1
1
  from ...mixin import (ANTHROPIC_CHAT_MODELS, ANTHROPIC_REASONING_MODELS,
2
2
  DEEPSEEK_CHAT_MODELS, DEEPSEEK_REASONING_MODELS,
3
3
  GOOGLE_CHAT_MODELS, GOOGLE_REASONING_MODELS,
4
+ GROQ_CHAT_MODELS, GROQ_REASONING_MODELS,
4
5
  OPENAI_CHAT_MODELS, OPENAI_REASONING_MODELS)
5
6
  from .engine_anthropic_claudeX_chat import ClaudeXChatEngine
6
7
  from .engine_anthropic_claudeX_reasoning import ClaudeXReasoningEngine
7
8
  from .engine_deepseekX_reasoning import DeepSeekXReasoningEngine
8
9
  from .engine_google_geminiX_reasoning import GeminiXReasoningEngine
10
+ from .engine_groq import GroqEngine
9
11
  from .engine_openai_gptX_chat import GPTXChatEngine
10
12
  from .engine_openai_gptX_reasoning import GPTXReasoningEngine
11
13
 
@@ -17,4 +19,6 @@ ENGINE_MAPPING = {
17
19
  **{model_name: GeminiXReasoningEngine for model_name in GOOGLE_REASONING_MODELS},
18
20
  **{model_name: GPTXChatEngine for model_name in OPENAI_CHAT_MODELS},
19
21
  **{model_name: GPTXReasoningEngine for model_name in OPENAI_REASONING_MODELS},
22
+ **{model_name: GroqEngine for model_name in GROQ_CHAT_MODELS},
23
+ **{model_name: GroqEngine for model_name in GROQ_REASONING_MODELS},
20
24
  }
@@ -29,7 +29,7 @@ class GroqEngine(Engine):
29
29
  if self.id() != 'neurosymbolic':
30
30
  return # do not initialize if not neurosymbolic; avoids conflict with llama.cpp check in EngineRepository.register_from_package
31
31
  openai.api_key = self.config['NEUROSYMBOLIC_ENGINE_API_KEY']
32
- self.model = self.config['NEUROSYMBOLIC_ENGINE_MODEL'].replace('groq:', '')
32
+ self.model = self.config['NEUROSYMBOLIC_ENGINE_MODEL'] # Keep the original config name to avoid confusion in downstream tasks
33
33
  self.seed = None
34
34
  self.name = self.__class__.__name__
35
35
 
@@ -49,7 +49,7 @@ class GroqEngine(Engine):
49
49
  if 'NEUROSYMBOLIC_ENGINE_API_KEY' in kwargs:
50
50
  openai.api_key = kwargs['NEUROSYMBOLIC_ENGINE_API_KEY']
51
51
  if 'NEUROSYMBOLIC_ENGINE_MODEL' in kwargs:
52
- self.model = kwargs['NEUROSYMBOLIC_ENGINE_MODEL'].replace('groq:', '')
52
+ self.model = kwargs['NEUROSYMBOLIC_ENGINE_MODEL']
53
53
  if 'seed' in kwargs:
54
54
  self.seed = kwargs['seed']
55
55
 
@@ -59,18 +59,9 @@ class GroqEngine(Engine):
59
59
  def compute_remaining_tokens(self, prompts: list) -> int:
60
60
  raise NotImplementedError("Token counting not implemented for this engine.")
61
61
 
62
- def _handle_image_content(self, content: str) -> list:
63
- """Handle image content by processing vision patterns and returning image file data."""
64
- def extract_pattern(text):
65
- pattern = r'<<vision:(.*?):>>'
66
- return re.findall(pattern, text)
67
- raise NotImplementedError("Image content handling not implemented for this engine.")
68
-
69
-
70
- def _remove_vision_pattern(self, text: str) -> str:
71
- """Remove vision patterns from text."""
72
- pattern = r'<<vision:(.*?):>>'
73
- return re.sub(pattern, '', text)
62
+ def _handle_prefix(self, model_name: str) -> str:
63
+ """Handle prefix for model name."""
64
+ return model_name.replace('groq:', '')
74
65
 
75
66
  def _extract_thinking_content(self, output: list[str]) -> tuple[str | None, list[str]]:
76
67
  """Extract thinking content from model output if present and return cleaned output."""
@@ -115,7 +106,7 @@ class GroqEngine(Engine):
115
106
  openai.api_key = self.config['NEUROSYMBOLIC_ENGINE_API_KEY']
116
107
 
117
108
  callback = self.client.chat.completions.create
118
- kwargs['model'] = kwargs['model'] if 'model' in kwargs else self.model
109
+ kwargs['model'] = self._handle_prefix(kwargs['model']) if 'model' in kwargs else self._handle_prefix(self.model)
119
110
 
120
111
  if except_remedy is not None:
121
112
  res = except_remedy(self, e, callback, argument)
@@ -160,12 +151,6 @@ class GroqEngine(Engine):
160
151
  if argument.prop.response_format:
161
152
  _rsp_fmt = argument.prop.response_format
162
153
  assert _rsp_fmt.get('type') is not None, 'Expected format `{ "type": "json_object" }`! We are using the OpenAI compatible API for Groq. See more here: https://console.groq.com/docs/tool-use'
163
- if _rsp_fmt["type"] == "json_object":
164
- # OpenAI docs:
165
- # "Important: when using JSON mode, you must also instruct the model
166
- # to produce JSON yourself via a system or user message"
167
- # Assuming this stays true even for this engine
168
- system += f'<RESPONSE_FORMAT/>\nYou are a helpful assistant designed to output JSON.\n\n'
169
154
 
170
155
  ref = argument.prop.instance
171
156
  static_ctxt, dyn_ctxt = ref.global_context
@@ -254,25 +239,33 @@ class GroqEngine(Engine):
254
239
  CustomUserWarning("If N is supplied, it must be equal to 1. We default to 1 to not crash your program.")
255
240
  n = 1
256
241
 
242
+ # Handle Groq JSON-mode quirk: JSON Object Mode internally uses a constrainer tool.
243
+ response_format = kwargs.get('response_format')
244
+ tool_choice = kwargs.get('tool_choice', 'auto' if kwargs.get('tools') else 'none')
245
+ tools = kwargs.get('tools')
246
+ if response_format and isinstance(response_format, dict) and response_format.get('type') == 'json_object':
247
+ if tool_choice in (None, 'none'): tool_choice = 'auto'
248
+ if tools: tools = None
249
+
257
250
  payload = {
258
251
  "messages": messages,
259
- "model": kwargs.get('model', self.model),
252
+ "model": self._handle_prefix(kwargs.get('model', self.model)),
260
253
  "seed": kwargs.get('seed', self.seed),
261
254
  "max_completion_tokens": kwargs.get('max_completion_tokens'),
262
255
  "stop": kwargs.get('stop'),
263
- "temperature": kwargs.get('temperature', 0.6), # Default temperature for Kimi K2 (https://huggingface.co/moonshotai/Kimi-K2-Instruct)
256
+ "temperature": kwargs.get('temperature', 1), # Default temperature for gpt-oss-120b
264
257
  "frequency_penalty": kwargs.get('frequency_penalty', 0),
265
258
  "presence_penalty": kwargs.get('presence_penalty', 0),
266
259
  "reasoning_effort": kwargs.get('reasoning_effort'), # Field available only for qwen3 models
267
260
  "service_tier": kwargs.get('service_tier', 'on_demand'),
268
261
  "top_p": kwargs.get('top_p', 1),
269
262
  "n": n,
270
- "tools": kwargs.get('tools'),
271
- "tool_choice": kwargs.get('tool_choice', 'none'),
272
- "response_format": kwargs.get('response_format'),
263
+ "tools": tools,
264
+ "tool_choice": tool_choice,
265
+ "response_format": response_format,
273
266
  }
274
267
 
275
- if not self.model.startswith('qwen'):
268
+ if not self._handle_prefix(self.model).startswith('qwen'):
276
269
  del payload['reasoning_effort']
277
270
 
278
271
  return payload
@@ -26,7 +26,7 @@ TRACKING_KEYS = {
26
26
 
27
27
  @dataclass
28
28
  class Citation:
29
- id: str
29
+ id: int
30
30
  title: str
31
31
  url: str
32
32
  start: int
@@ -47,10 +47,10 @@ class SearchResult(Result):
47
47
  self._value = None
48
48
  self._citations = []
49
49
  return
50
- replaced_text, ordered = self._replace_links_with_citations(text, annotations, id_mode="sequential")
50
+ replaced_text, ordered, starts_ends = self._insert_citation_markers(text, annotations)
51
51
  self._value = replaced_text
52
52
  self._citations = [
53
- Citation(id=cid, title=title, url=url, start=0, end=0)
53
+ Citation(id=cid, title=title, url=url, start=starts_ends[cid][0], end=starts_ends[cid][1])
54
54
  for cid, title, url in ordered
55
55
  ]
56
56
 
@@ -59,6 +59,8 @@ class SearchResult(Result):
59
59
  CustomUserWarning(f"Failed to parse response: {e}", raise_with=ValueError)
60
60
 
61
61
  def _extract_text(self, value) -> str | None:
62
+ if isinstance(value.get('output_text'), str) and value.get('output_text'):
63
+ return value.get('output_text')
62
64
  text = None
63
65
  for output in value.get('output', []):
64
66
  if output.get('type') == 'message' and output.get('content'):
@@ -68,19 +70,35 @@ class SearchResult(Result):
68
70
  return text
69
71
 
70
72
  def _extract_text_and_annotations(self, value):
71
- text = None
72
- annotations = []
73
- for output in value.get('output', []):
73
+ segments = []
74
+ global_annotations = []
75
+ pos = 0
76
+ for output in value.get('output', []) or []:
74
77
  if output.get('type') != 'message' or not output.get('content'):
75
78
  continue
76
79
  for content in output.get('content', []) or []:
77
- if 'text' in content and content['text']:
78
- text = content['text']
79
- anns = content.get('annotations', []) or []
80
- for ann in anns:
81
- if ann.get('type') == 'url_citation':
82
- annotations.append(ann)
83
- return text, annotations
80
+ seg_text = content.get('text') or ''
81
+ if not isinstance(seg_text, str):
82
+ continue
83
+ for ann in (content.get('annotations') or []):
84
+ if ann.get('type') == 'url_citation' and ann.get('url'):
85
+ start = ann.get('start_index', 0)
86
+ end = ann.get('end_index', 0)
87
+ global_annotations.append({
88
+ 'type': 'url_citation',
89
+ 'url': ann.get('url'),
90
+ 'title': (ann.get('title') or '').strip(),
91
+ 'start_index': pos + int(start),
92
+ 'end_index': pos + int(end),
93
+ })
94
+ segments.append(seg_text)
95
+ pos += len(seg_text)
96
+
97
+ built_text = ''.join(segments) if segments else None
98
+ # Prefer top-level output_text if present AND segments are empty (no way to compute indices)
99
+ if not built_text and isinstance(value.get('output_text'), str):
100
+ return value.get('output_text'), []
101
+ return built_text, global_annotations
84
102
 
85
103
  def _normalize_url(self, u: str) -> str:
86
104
  parts = urlsplit(u)
@@ -115,48 +133,96 @@ class SearchResult(Result):
115
133
  def _short_hash_id(self, nu: str, length=6) -> str:
116
134
  return hashlib.sha1(nu.encode('utf-8')).hexdigest()[:length]
117
135
 
118
- def _replace_links_with_citations(self, text: str, annotations, id_mode: str = 'sequential'):
136
+ def _insert_citation_markers(self, text: str, annotations):
119
137
  title_map = self._make_title_map(annotations)
120
- id_map = {}
121
- ordered = [] # list of ("[n]", title, normalized_url)
138
+ id_map: dict[str, int] = {}
139
+ first_span: dict[int, tuple[int, int]] = {}
140
+ ordered: list[tuple[int, str, str]] = [] # (id, title, normalized_url)
122
141
  next_id = 1
123
142
 
124
- pattern = re.compile(r"\[([^\]]*?)\]\((https?://[^\s)]+)\)")
143
+ url_anns = [a for a in annotations or [] if a.get('type') == 'url_citation' and a.get('url')]
144
+ url_anns.sort(key=lambda a: int(a.get('start_index', 0)))
145
+
146
+ pieces: list[str] = []
147
+ cursor = 0
148
+ out_len = 0 # length of output built so far (after cleaning and prior markers)
125
149
 
126
- def _get_id(nu: str) -> str:
150
+ def _get_id(nu: str) -> int:
127
151
  nonlocal next_id
128
- if id_mode == 'hash':
129
- return self._short_hash_id(nu)
130
152
  if nu not in id_map:
131
- id_map[nu] = str(next_id)
132
- t = title_map.get(nu) or self._hostname(nu)
133
- ordered.append((f"[{id_map[nu]}]", t, nu))
153
+ cid = next_id
154
+ id_map[nu] = cid
155
+ title = title_map.get(nu) or self._hostname(nu)
156
+ ordered.append((cid, title, nu))
134
157
  next_id += 1
135
158
  return id_map[nu]
136
159
 
137
- def _repl(m):
138
- link_text, url = m.group(1), m.group(2)
160
+ for ann in url_anns:
161
+ start = int(ann.get('start_index', 0))
162
+ end = int(ann.get('end_index', 0))
163
+ if end <= cursor:
164
+ continue # skip overlapping or backwards spans
165
+ url = ann.get('url')
139
166
  nu = self._normalize_url(url)
140
167
  cid = _get_id(nu)
141
- title = title_map.get(nu)
142
- if not title:
143
- lt = (link_text or '').strip()
144
- title = lt if (' ' in lt) else self._hostname(nu)
145
- return f"[{cid}] ({title})"
146
-
147
- replaced = pattern.sub(_repl, text)
148
- return replaced, ordered
168
+ title = title_map.get(nu) or self._hostname(nu)
169
+
170
+ prefix = text[cursor:start]
171
+ prefix_clean = self._strip_markdown_links(prefix)
172
+ pieces.append(prefix_clean)
173
+ out_len += len(prefix_clean)
174
+
175
+ span_text = text[start:end]
176
+ span_clean = self._strip_markdown_links(span_text)
177
+ span_end_out = out_len + len(span_clean)
178
+ pieces.append(span_clean)
179
+ out_len = span_end_out
180
+
181
+ marker = f"[{cid}] ({title})\n"
182
+ marker_start_out = out_len
183
+ marker_end_out = out_len + len(marker)
184
+ if cid not in first_span:
185
+ first_span[cid] = (marker_start_out, marker_end_out)
186
+ pieces.append(marker)
187
+ out_len = marker_end_out
188
+ cursor = end
189
+
190
+ tail_clean = self._strip_markdown_links(text[cursor:])
191
+ pieces.append(tail_clean)
192
+ replaced = ''.join(pieces)
193
+
194
+ starts_ends = {cid: first_span.get(cid, (0, 0)) for cid, _, _ in ordered}
195
+ return replaced, ordered, starts_ends
196
+
197
+ def _strip_markdown_links(self, text: str) -> str:
198
+ # Remove ([text](http...)) including surrounding parentheses
199
+ pattern_paren = re.compile(r"\(\s*\[[^\]]+\]\(https?://[^)]+\)\s*\)")
200
+ text = pattern_paren.sub('', text)
201
+ # Remove bare [text](http...)
202
+ pattern_bare = re.compile(r"\[[^\]]+\]\(https?://[^)]+\)")
203
+ text = pattern_bare.sub('', text)
204
+ # Remove parentheses that became empty or contain only commas/whitespace like (, , )
205
+ pattern_empty_paren = re.compile(r"\(\s*\)")
206
+ text = pattern_empty_paren.sub('', text)
207
+ pattern_commas_only = re.compile(r"\(\s*(,\s*)+\)")
208
+ text = pattern_commas_only.sub('', text)
209
+ # Collapse potential double spaces resulting from removals
210
+ return re.sub(r"\s{2,}", " ", text).strip()
149
211
 
150
212
  def __str__(self) -> str:
213
+ if isinstance(self._value, str) and self._value:
214
+ return self._value
151
215
  try:
152
216
  return json.dumps(self.raw, indent=2)
153
217
  except TypeError:
154
218
  return str(self.raw)
155
219
 
156
220
  def _repr_html_(self) -> str:
221
+ if isinstance(self._value, str) and self._value:
222
+ return f"<pre>{self._value}</pre>"
157
223
  try:
158
224
  return f"<pre>{json.dumps(self.raw, indent=2)}</pre>"
159
- except Exception as e:
225
+ except Exception:
160
226
  return f"<pre>{str(self.raw)}</pre>"
161
227
 
162
228
  def get_citations(self) -> list[Citation]:
@@ -4,5 +4,7 @@ from .deepseek import SUPPORTED_CHAT_MODELS as DEEPSEEK_CHAT_MODELS
4
4
  from .deepseek import SUPPORTED_REASONING_MODELS as DEEPSEEK_REASONING_MODELS
5
5
  from .google import SUPPORTED_CHAT_MODELS as GOOGLE_CHAT_MODELS
6
6
  from .google import SUPPORTED_REASONING_MODELS as GOOGLE_REASONING_MODELS
7
+ from .groq import SUPPORTED_CHAT_MODELS as GROQ_CHAT_MODELS
8
+ from .groq import SUPPORTED_REASONING_MODELS as GROQ_REASONING_MODELS
7
9
  from .openai import SUPPORTED_CHAT_MODELS as OPENAI_CHAT_MODELS
8
10
  from .openai import SUPPORTED_REASONING_MODELS as OPENAI_REASONING_MODELS
@@ -0,0 +1,10 @@
1
+ SUPPORTED_CHAT_MODELS = [
2
+ "groq:moonshotai/kimi-k2-instruct"
3
+ ]
4
+
5
+ SUPPORTED_REASONING_MODELS = [
6
+ "groq:openai/gpt-oss-120b",
7
+ "groq:openai/gpt-oss-20b",
8
+ "groq:qwen/qwen3-32b",
9
+ "groq:deepseek-r1-distill-llama-70b"
10
+ ]
@@ -1147,8 +1147,17 @@ class MetadataTracker(Expression):
1147
1147
  # Note on try/except:
1148
1148
  # The unpacking shouldn't fail; if it fails, it's likely the API response format has changed and we need to know that ASAP
1149
1149
  for (_, engine_name, model_name), metadata in self._metadata.items():
1150
- if engine_name in ("GPTXChatEngine", "GPTXReasoningEngine"):
1151
- try:
1150
+ try:
1151
+ if engine_name == "GroqEngine":
1152
+ usage = metadata["raw_output"].usage
1153
+ token_details[(engine_name, model_name)]["usage"]["completion_tokens"] += usage.completion_tokens
1154
+ token_details[(engine_name, model_name)]["usage"]["prompt_tokens"] += usage.prompt_tokens
1155
+ token_details[(engine_name, model_name)]["usage"]["total_tokens"] += usage.total_tokens
1156
+ token_details[(engine_name, model_name)]["usage"]["total_calls"] += 1
1157
+ #!: Backward compatibility for components like `RuntimeInfo`
1158
+ token_details[(engine_name, model_name)]["prompt_breakdown"]["cached_tokens"] += 0 # Assignment not allowed with defualtdict
1159
+ token_details[(engine_name, model_name)]["completion_breakdown"]["reasoning_tokens"] += 0
1160
+ elif engine_name in ("GPTXChatEngine", "GPTXReasoningEngine"):
1152
1161
  usage = metadata["raw_output"].usage
1153
1162
  token_details[(engine_name, model_name)]["usage"]["completion_tokens"] += usage.completion_tokens
1154
1163
  token_details[(engine_name, model_name)]["usage"]["prompt_tokens"] += usage.prompt_tokens
@@ -1160,10 +1169,7 @@ class MetadataTracker(Expression):
1160
1169
  token_details[(engine_name, model_name)]["completion_breakdown"]["reasoning_tokens"] += usage.completion_tokens_details.reasoning_tokens
1161
1170
  token_details[(engine_name, model_name)]["prompt_breakdown"]["audio_tokens"] += usage.prompt_tokens_details.audio_tokens
1162
1171
  token_details[(engine_name, model_name)]["prompt_breakdown"]["cached_tokens"] += usage.prompt_tokens_details.cached_tokens
1163
- except Exception as e:
1164
- CustomUserWarning(f"Failed to parse metadata for {engine_name}: {e}", raise_with=AttributeError)
1165
- elif engine_name == "GPTXSearchEngine":
1166
- try:
1172
+ elif engine_name == "GPTXSearchEngine":
1167
1173
  usage = metadata["raw_output"].usage
1168
1174
  token_details[(engine_name, model_name)]["usage"]["prompt_tokens"] += usage.input_tokens
1169
1175
  token_details[(engine_name, model_name)]["usage"]["completion_tokens"] += usage.output_tokens
@@ -1171,11 +1177,11 @@ class MetadataTracker(Expression):
1171
1177
  token_details[(engine_name, model_name)]["usage"]["total_calls"] += 1
1172
1178
  token_details[(engine_name, model_name)]["prompt_breakdown"]["cached_tokens"] += usage.input_tokens_details.cached_tokens
1173
1179
  token_details[(engine_name, model_name)]["completion_breakdown"]["reasoning_tokens"] += usage.output_tokens_details.reasoning_tokens
1174
- except Exception as e:
1175
- CustomUserWarning(f"Failed to parse metadata for {engine_name}: {e}", raise_with=AttributeError)
1176
- else:
1177
- logger.warning(f"Tracking {engine_name} is not supported.")
1178
- continue
1180
+ else:
1181
+ logger.warning(f"Tracking {engine_name} is not supported.")
1182
+ continue
1183
+ except Exception as e:
1184
+ CustomUserWarning(f"Failed to parse metadata for {engine_name}: {e}", raise_with=AttributeError)
1179
1185
 
1180
1186
  # Convert to normal dict
1181
1187
  return {**token_details}
@@ -1193,6 +1199,7 @@ class MetadataTracker(Expression):
1193
1199
  # Skipz first entry
1194
1200
  for (_, engine_name), metadata in list(self._metadata.items())[1:]:
1195
1201
  if engine_name not in ("GPTXChatEngine", "GPTXReasoningEngine", "GPTXSearchEngine"):
1202
+ logger.warning(f"Metadata accumulation for {engine_name} is not supported. Try `.usage` instead for now.")
1196
1203
  continue
1197
1204
 
1198
1205
  # Accumulate time if it exists
@@ -2,6 +2,7 @@ import re
2
2
  import pygments
3
3
  import logging
4
4
 
5
+ #@TODO: refactor to use rich instead of prompt_toolkit
5
6
  from html import escape as escape_html
6
7
  from pygments.lexers.python import PythonLexer
7
8
  from pygments.lexers.javascript import JavascriptLexer
@@ -49,8 +50,8 @@ class ConsoleStyle(object):
49
50
  message = str(message)
50
51
  if self.logging:
51
52
  logger.debug(message)
52
- if escape:
53
- message = escape_html(message)
53
+ # Prepare safe content for HTML printing without mutating the original
54
+ content_for_html = escape_html(message) if escape else message
54
55
  style = self.style_types.get(self.style_type, self.style_types['default'])
55
56
 
56
57
  if style == self.style_types['code']:
@@ -80,7 +81,6 @@ class ConsoleStyle(object):
80
81
  elif style == self.style_types['default']:
81
82
  print(message)
82
83
  elif style == self.style_types['custom']:
83
- print(HTML(f'<style fg="{self.color}">{message}</style>'))
84
+ print(HTML(f'<style fg="{self.color}">{content_for_html}</style>'))
84
85
  else:
85
- print(HTML(f'<style fg="{style}">{message}</style>'))
86
-
86
+ print(HTML(f'<style fg="{style}">{content_for_html}</style>'))
@@ -13,6 +13,7 @@ import traceback
13
13
  from pathlib import Path
14
14
  from typing import Iterable, Tuple
15
15
 
16
+ #@TODO: refactor to use rich instead of prompt_toolkit
16
17
  from prompt_toolkit import HTML, PromptSession, print_formatted_text
17
18
  from prompt_toolkit.completion import Completer, Completion, WordCompleter
18
19
  from prompt_toolkit.history import History
@@ -143,7 +143,8 @@ class CustomUserWarning:
143
143
  filename = caller.filename
144
144
  filename = filename[filename.find('symbolicai'):]
145
145
  with ConsoleStyle('warn') as console:
146
- console.print(f"{filename}:{lineno}: {UserWarning.__name__}: {message}")
146
+ # Escape content to avoid HTML parsing errors from model text like <|constrain|>JSON
147
+ console.print(f"{filename}:{lineno}: {UserWarning.__name__}: {message}", escape=True)
147
148
  # Always raise the warning if raise_with is provided
148
149
  if raise_with is not None:
149
150
  raise raise_with(message)
@@ -224,9 +225,7 @@ class RuntimeInfo:
224
225
  try:
225
226
  return RuntimeInfo.from_usage_stats(tracker.usage, total_elapsed_time)
226
227
  except Exception as e:
227
- raise e
228
- CustomUserWarning(f"Failed to parse metadata; returning empty RuntimeInfo: {e}")
229
- return RuntimeInfo(0, 0, 0, 0, 0, 0, 0, 0)
228
+ CustomUserWarning(f"Failed to parse metadata: {e}", raise_with=ValueError)
230
229
  return RuntimeInfo(0, 0, 0, 0, 0, 0, 0, 0)
231
230
 
232
231
  @staticmethod
@@ -234,12 +233,13 @@ class RuntimeInfo:
234
233
  if usage_stats is not None:
235
234
  usage_per_engine = {}
236
235
  for (engine_name, model_name), data in usage_stats.items():
236
+ #!: This object interacts with `MetadataTracker`; its fields are mandatory and handled there
237
237
  data = Box(data)
238
238
  usage_per_engine[(engine_name, model_name)] = RuntimeInfo(
239
239
  total_elapsed_time=total_elapsed_time,
240
240
  prompt_tokens=data.usage.prompt_tokens,
241
241
  completion_tokens=data.usage.completion_tokens,
242
- reasoning_tokens=getattr(data.usage, 'reasoning_tokens', 0),
242
+ reasoning_tokens=data.completion_breakdown.reasoning_tokens,
243
243
  cached_tokens=data.prompt_breakdown.cached_tokens,
244
244
  total_calls=data.usage.total_calls,
245
245
  total_tokens=data.usage.total_tokens,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: symbolicai
3
- Version: 0.17.6
3
+ Version: 0.18.1
4
4
  Summary: A Neurosymbolic Perspective on Large Language Models
5
5
  Author-email: Marius-Constantin Dinu <marius@extensity.ai>, Leoveanu-Condrei Claudiu <leo@extensity.ai>
6
6
  Project-URL: Homepage, https://extensity.ai
@@ -167,6 +167,7 @@ symai/backend/mixin/__init__.py
167
167
  symai/backend/mixin/anthropic.py
168
168
  symai/backend/mixin/deepseek.py
169
169
  symai/backend/mixin/google.py
170
+ symai/backend/mixin/groq.py
170
171
  symai/backend/mixin/openai.py
171
172
  symai/collect/__init__.py
172
173
  symai/collect/dynamic.py
@@ -0,0 +1,58 @@
1
+ import os
2
+ import re
3
+
4
+ import pytest
5
+
6
+ from symai.backend.engines.search.engine_openai import GPTXSearchEngine
7
+ from symai.backend.settings import SYMAI_CONFIG
8
+ from symai.extended.interfaces.openai_search import openai_search
9
+ from symai.functional import EngineRepository
10
+
11
+
12
+ def _get_api_key():
13
+ return (
14
+ os.environ.get("OPENAI_API_KEY")
15
+ or SYMAI_CONFIG.get("SEARCH_ENGINE_API_KEY")
16
+ or os.environ.get("SEARCH_ENGINE_API_KEY")
17
+ )
18
+
19
+
20
+ @pytest.mark.parametrize("model", ["gpt-4.1-mini", "gpt-5-mini"])
21
+ def test_openai_search_citations_and_formatting_live(model):
22
+ api_key = _get_api_key()
23
+ if not api_key:
24
+ pytest.skip("OPENAI_API_KEY/SEARCH_ENGINE_API_KEY not set; live test skipped")
25
+
26
+ # Register a fresh engine instance with the provided API key and target model
27
+ engine = GPTXSearchEngine(api_key=api_key, model=model)
28
+ EngineRepository.register("search", engine, allow_engine_override=True)
29
+
30
+ # Keep the query stable but realistic to elicit citations
31
+ query = "President of Romania 2025 inauguration timeline and partner (with citations)"
32
+ search = openai_search()
33
+ res = search(query, model=model, search_context_size="medium")
34
+
35
+ # 1) No leftover markdown link patterns or empty parentheses artifacts
36
+ assert not re.search(r"\[[^\]]+\]\(https?://[^)]+\)", res.value)
37
+ assert "(, , )" not in res.value
38
+ assert "()" not in res.value
39
+
40
+ # 2) Citations exist with integer ids and normalized URLs (no utm_ params)
41
+ citations = res.get_citations()
42
+ assert isinstance(citations, list) and len(citations) >= 1
43
+ seen_ids = set()
44
+ for c in citations:
45
+ assert isinstance(c.id, int)
46
+ assert c.id not in seen_ids
47
+ seen_ids.add(c.id)
48
+ assert "utm_" not in c.url
49
+
50
+ # Slice should match the marker format; allow small whitespace variance before newline
51
+ slice_text = res.value[c.start:c.end]
52
+ assert slice_text.startswith(f"[{c.id}] (")
53
+ assert slice_text.endswith(")\n")
54
+ # Optional stronger check including title
55
+ assert slice_text == f"[{c.id}] ({c.title})\n"
56
+
57
+ # 3) Formatting: At least one marker pattern with newline is present
58
+ assert re.search(r"\[\d+\] \([^)]+\)\n", res.value)