abstractcore 2.5.2__tar.gz → 2.6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (189) hide show
  1. {abstractcore-2.5.2 → abstractcore-2.6.0}/PKG-INFO +207 -8
  2. {abstractcore-2.5.2 → abstractcore-2.6.0}/README.md +191 -2
  3. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/__init__.py +19 -1
  4. abstractcore-2.6.0/abstractcore/architectures/detection.py +542 -0
  5. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/assets/architecture_formats.json +14 -1
  6. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/assets/model_capabilities.json +533 -10
  7. abstractcore-2.6.0/abstractcore/compression/__init__.py +29 -0
  8. abstractcore-2.6.0/abstractcore/compression/analytics.py +420 -0
  9. abstractcore-2.6.0/abstractcore/compression/cache.py +250 -0
  10. abstractcore-2.6.0/abstractcore/compression/config.py +279 -0
  11. abstractcore-2.6.0/abstractcore/compression/exceptions.py +30 -0
  12. abstractcore-2.6.0/abstractcore/compression/glyph_processor.py +381 -0
  13. abstractcore-2.6.0/abstractcore/compression/optimizer.py +388 -0
  14. abstractcore-2.6.0/abstractcore/compression/orchestrator.py +380 -0
  15. abstractcore-2.6.0/abstractcore/compression/pil_text_renderer.py +818 -0
  16. abstractcore-2.6.0/abstractcore/compression/quality.py +226 -0
  17. abstractcore-2.6.0/abstractcore/compression/text_formatter.py +666 -0
  18. abstractcore-2.6.0/abstractcore/compression/vision_compressor.py +371 -0
  19. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/config/main.py +64 -0
  20. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/config/manager.py +100 -5
  21. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/core/retry.py +2 -2
  22. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/core/session.py +193 -7
  23. abstractcore-2.6.0/abstractcore/download.py +253 -0
  24. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/embeddings/manager.py +2 -2
  25. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/events/__init__.py +113 -2
  26. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/exceptions/__init__.py +49 -2
  27. abstractcore-2.6.0/abstractcore/media/auto_handler.py +657 -0
  28. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/handlers/local_handler.py +14 -2
  29. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/handlers/openai_handler.py +62 -3
  30. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/processors/__init__.py +11 -1
  31. abstractcore-2.6.0/abstractcore/media/processors/direct_pdf_processor.py +210 -0
  32. abstractcore-2.6.0/abstractcore/media/processors/glyph_pdf_processor.py +227 -0
  33. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/processors/image_processor.py +7 -1
  34. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/processors/office_processor.py +2 -2
  35. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/processors/text_processor.py +18 -3
  36. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/types.py +164 -7
  37. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/utils/image_scaler.py +2 -2
  38. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/vision_fallback.py +2 -2
  39. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/providers/__init__.py +18 -0
  40. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/providers/anthropic_provider.py +228 -8
  41. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/providers/base.py +378 -11
  42. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/providers/huggingface_provider.py +563 -23
  43. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/providers/lmstudio_provider.py +284 -4
  44. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/providers/mlx_provider.py +27 -2
  45. abstractcore-2.6.0/abstractcore/providers/model_capabilities.py +352 -0
  46. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/providers/ollama_provider.py +282 -6
  47. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/providers/openai_provider.py +286 -8
  48. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/providers/registry.py +85 -13
  49. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/providers/streaming.py +2 -2
  50. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/server/app.py +91 -81
  51. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/tools/common_tools.py +2 -2
  52. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/tools/handler.py +2 -2
  53. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/tools/parser.py +2 -2
  54. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/tools/registry.py +2 -2
  55. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/tools/syntax_rewriter.py +2 -2
  56. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/tools/tag_rewriter.py +3 -3
  57. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/utils/__init__.py +4 -1
  58. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/utils/self_fixes.py +2 -2
  59. abstractcore-2.6.0/abstractcore/utils/trace_export.py +287 -0
  60. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/utils/version.py +1 -1
  61. abstractcore-2.6.0/abstractcore/utils/vlm_token_calculator.py +655 -0
  62. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore.egg-info/PKG-INFO +207 -8
  63. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore.egg-info/SOURCES.txt +19 -53
  64. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore.egg-info/requires.txt +19 -5
  65. {abstractcore-2.5.2 → abstractcore-2.6.0}/pyproject.toml +24 -5
  66. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_retry_strategy.py +24 -9
  67. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_unload_memory.py +4 -4
  68. abstractcore-2.5.2/abstractcore/architectures/detection.py +0 -296
  69. abstractcore-2.5.2/abstractcore/media/auto_handler.py +0 -363
  70. abstractcore-2.5.2/tests/test_all_specified_providers.py +0 -182
  71. abstractcore-2.5.2/tests/test_basic_summarizer.py +0 -332
  72. abstractcore-2.5.2/tests/test_cli_media.py +0 -45
  73. abstractcore-2.5.2/tests/test_consistency.py +0 -116
  74. abstractcore-2.5.2/tests/test_critical_streaming_tool_fix.py +0 -744
  75. abstractcore-2.5.2/tests/test_debug_server.py +0 -69
  76. abstractcore-2.5.2/tests/test_direct_vs_server.py +0 -157
  77. abstractcore-2.5.2/tests/test_embeddings.py +0 -391
  78. abstractcore-2.5.2/tests/test_embeddings_integration.py +0 -317
  79. abstractcore-2.5.2/tests/test_embeddings_llm_integration.py +0 -331
  80. abstractcore-2.5.2/tests/test_embeddings_matrix_operations.py +0 -387
  81. abstractcore-2.5.2/tests/test_embeddings_no_mock.py +0 -358
  82. abstractcore-2.5.2/tests/test_embeddings_real.py +0 -428
  83. abstractcore-2.5.2/tests/test_embeddings_semantic_validation.py +0 -438
  84. abstractcore-2.5.2/tests/test_embeddings_simple.py +0 -124
  85. abstractcore-2.5.2/tests/test_fixed_media.py +0 -168
  86. abstractcore-2.5.2/tests/test_fixed_prompt.py +0 -65
  87. abstractcore-2.5.2/tests/test_lmstudio_context.py +0 -143
  88. abstractcore-2.5.2/tests/test_media_import.py +0 -65
  89. abstractcore-2.5.2/tests/test_media_server.py +0 -276
  90. abstractcore-2.5.2/tests/test_ollama_tool_role_fix.py +0 -269
  91. abstractcore-2.5.2/tests/test_openai_conversion_manual.py +0 -119
  92. abstractcore-2.5.2/tests/test_openai_format_bug.py +0 -257
  93. abstractcore-2.5.2/tests/test_openai_format_conversion.py +0 -485
  94. abstractcore-2.5.2/tests/test_openai_media_integration.py +0 -444
  95. abstractcore-2.5.2/tests/test_progressive_complexity.py +0 -163
  96. abstractcore-2.5.2/tests/test_provider_basic_session.py +0 -154
  97. abstractcore-2.5.2/tests/test_provider_connectivity.py +0 -102
  98. abstractcore-2.5.2/tests/test_provider_simple_generation.py +0 -167
  99. abstractcore-2.5.2/tests/test_provider_streaming.py +0 -364
  100. abstractcore-2.5.2/tests/test_provider_token_translation.py +0 -289
  101. abstractcore-2.5.2/tests/test_provider_tool_detection.py +0 -265
  102. abstractcore-2.5.2/tests/test_providers.py +0 -362
  103. abstractcore-2.5.2/tests/test_providers_comprehensive.py +0 -353
  104. abstractcore-2.5.2/tests/test_providers_simple.py +0 -168
  105. abstractcore-2.5.2/tests/test_real_models_comprehensive.py +0 -521
  106. abstractcore-2.5.2/tests/test_server_debug.py +0 -131
  107. abstractcore-2.5.2/tests/test_server_embeddings_real.py +0 -233
  108. abstractcore-2.5.2/tests/test_server_integration.py +0 -245
  109. abstractcore-2.5.2/tests/test_stream_tool_calling.py +0 -496
  110. abstractcore-2.5.2/tests/test_streaming_enhancements.py +0 -614
  111. abstractcore-2.5.2/tests/test_streaming_tag_rewriting.py +0 -527
  112. abstractcore-2.5.2/tests/test_structured_integration.py +0 -218
  113. abstractcore-2.5.2/tests/test_structured_output.py +0 -332
  114. abstractcore-2.5.2/tests/test_syntax_rewriter.py +0 -471
  115. abstractcore-2.5.2/tests/test_tool_calling.py +0 -231
  116. abstractcore-2.5.2/tests/test_tool_execution_separation.py +0 -857
  117. abstractcore-2.5.2/tests/test_unified_streaming.py +0 -852
  118. abstractcore-2.5.2/tests/test_vision_accuracy.py +0 -145
  119. abstractcore-2.5.2/tests/test_vision_comprehensive.py +0 -648
  120. abstractcore-2.5.2/tests/test_vision_fallback_improvement.py +0 -129
  121. abstractcore-2.5.2/tests/test_wrong_model_fallback.py +0 -262
  122. {abstractcore-2.5.2 → abstractcore-2.6.0}/LICENSE +0 -0
  123. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/apps/__init__.py +0 -0
  124. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/apps/__main__.py +0 -0
  125. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/apps/app_config_utils.py +0 -0
  126. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/apps/deepsearch.py +0 -0
  127. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/apps/extractor.py +0 -0
  128. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/apps/intent.py +0 -0
  129. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/apps/judge.py +0 -0
  130. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/apps/summarizer.py +0 -0
  131. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/architectures/__init__.py +0 -0
  132. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/architectures/enums.py +0 -0
  133. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/assets/session_schema.json +0 -0
  134. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/config/__init__.py +0 -0
  135. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/config/vision_config.py +0 -0
  136. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/core/__init__.py +0 -0
  137. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/core/enums.py +0 -0
  138. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/core/factory.py +0 -0
  139. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/core/interface.py +0 -0
  140. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/core/types.py +0 -0
  141. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/embeddings/__init__.py +0 -0
  142. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/embeddings/models.py +0 -0
  143. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/__init__.py +0 -0
  144. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/base.py +0 -0
  145. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/capabilities.py +0 -0
  146. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/handlers/__init__.py +0 -0
  147. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/handlers/anthropic_handler.py +0 -0
  148. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/processors/pdf_processor.py +0 -0
  149. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/media/utils/__init__.py +0 -0
  150. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/processing/__init__.py +0 -0
  151. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/processing/basic_deepsearch.py +0 -0
  152. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/processing/basic_extractor.py +0 -0
  153. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/processing/basic_intent.py +0 -0
  154. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/processing/basic_judge.py +0 -0
  155. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/processing/basic_summarizer.py +0 -0
  156. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/server/__init__.py +0 -0
  157. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/structured/__init__.py +0 -0
  158. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/structured/handler.py +0 -0
  159. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/structured/retry.py +0 -0
  160. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/tools/__init__.py +0 -0
  161. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/tools/core.py +0 -0
  162. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/utils/cli.py +0 -0
  163. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/utils/message_preprocessor.py +0 -0
  164. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/utils/structured_logging.py +0 -0
  165. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore/utils/token_utils.py +0 -0
  166. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore.egg-info/dependency_links.txt +0 -0
  167. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore.egg-info/entry_points.txt +0 -0
  168. {abstractcore-2.5.2 → abstractcore-2.6.0}/abstractcore.egg-info/top_level.txt +0 -0
  169. {abstractcore-2.5.2 → abstractcore-2.6.0}/setup.cfg +0 -0
  170. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_agentic_cli_compatibility.py +0 -0
  171. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_basic_session.py +0 -0
  172. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_complete_integration.py +0 -0
  173. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_comprehensive_events.py +0 -0
  174. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_core_components.py +0 -0
  175. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_enhanced_prompt.py +0 -0
  176. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_environment_variable_tool_call_tags.py +0 -0
  177. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_factory.py +0 -0
  178. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_final_accuracy.py +0 -0
  179. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_final_comprehensive.py +0 -0
  180. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_final_graceful_errors.py +0 -0
  181. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_graceful_fallback.py +0 -0
  182. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_import_debug.py +0 -0
  183. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_integrated_functionality.py +0 -0
  184. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_retry_observability.py +0 -0
  185. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_seed_determinism.py +0 -0
  186. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_seed_temperature_basic.py +0 -0
  187. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_sensory_prompting.py +0 -0
  188. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_text_only_model_experience.py +0 -0
  189. {abstractcore-2.5.2 → abstractcore-2.6.0}/tests/test_user_scenario_validation.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstractcore
3
- Version: 2.5.2
3
+ Version: 2.6.0
4
4
  Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
5
5
  Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
6
6
  Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
@@ -37,8 +37,10 @@ Requires-Dist: anthropic<1.0.0,>=0.25.0; extra == "anthropic"
37
37
  Provides-Extra: ollama
38
38
  Provides-Extra: lmstudio
39
39
  Provides-Extra: huggingface
40
- Requires-Dist: transformers<5.0.0,>=4.30.0; extra == "huggingface"
41
- Requires-Dist: torch<3.0.0,>=1.12.0; extra == "huggingface"
40
+ Requires-Dist: transformers<5.0.0,>=4.57.1; extra == "huggingface"
41
+ Requires-Dist: torch<3.0.0,>=2.6.0; extra == "huggingface"
42
+ Requires-Dist: torchvision>=0.17.0; extra == "huggingface"
43
+ Requires-Dist: torchaudio>=2.1.0; extra == "huggingface"
42
44
  Requires-Dist: llama-cpp-python<1.0.0,>=0.2.0; extra == "huggingface"
43
45
  Requires-Dist: outlines>=0.1.0; extra == "huggingface"
44
46
  Provides-Extra: mlx
@@ -46,7 +48,7 @@ Requires-Dist: mlx<1.0.0,>=0.15.0; extra == "mlx"
46
48
  Requires-Dist: mlx-lm<1.0.0,>=0.15.0; extra == "mlx"
47
49
  Requires-Dist: outlines>=0.1.0; extra == "mlx"
48
50
  Provides-Extra: embeddings
49
- Requires-Dist: sentence-transformers<4.0.0,>=2.7.0; extra == "embeddings"
51
+ Requires-Dist: sentence-transformers<6.0.0,>=5.1.0; extra == "embeddings"
50
52
  Requires-Dist: numpy<2.0.0,>=1.20.0; extra == "embeddings"
51
53
  Provides-Extra: processing
52
54
  Provides-Extra: tools
@@ -59,18 +61,26 @@ Requires-Dist: Pillow<12.0.0,>=10.0.0; extra == "media"
59
61
  Requires-Dist: pymupdf4llm<1.0.0,>=0.0.20; extra == "media"
60
62
  Requires-Dist: unstructured[office]<1.0.0,>=0.10.0; extra == "media"
61
63
  Requires-Dist: pandas<3.0.0,>=1.0.0; extra == "media"
64
+ Provides-Extra: compression
65
+ Requires-Dist: pdf2image<2.0.0,>=1.16.0; extra == "compression"
62
66
  Provides-Extra: api-providers
63
67
  Requires-Dist: abstractcore[anthropic,openai]; extra == "api-providers"
64
68
  Provides-Extra: local-providers
65
69
  Requires-Dist: abstractcore[lmstudio,mlx,ollama]; extra == "local-providers"
70
+ Provides-Extra: local-providers-non-mlx
71
+ Requires-Dist: abstractcore[lmstudio,ollama]; extra == "local-providers-non-mlx"
66
72
  Provides-Extra: heavy-providers
67
73
  Requires-Dist: abstractcore[huggingface]; extra == "heavy-providers"
68
74
  Provides-Extra: all-providers
69
75
  Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,mlx,ollama,openai]; extra == "all-providers"
76
+ Provides-Extra: all-providers-non-mlx
77
+ Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,ollama,openai]; extra == "all-providers-non-mlx"
70
78
  Provides-Extra: all
71
- Requires-Dist: abstractcore[anthropic,dev,docs,embeddings,huggingface,lmstudio,media,mlx,ollama,openai,processing,server,test,tools]; extra == "all"
79
+ Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,mlx,ollama,openai,processing,server,test,tools]; extra == "all"
80
+ Provides-Extra: all-non-mlx
81
+ Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,ollama,openai,processing,server,test,tools]; extra == "all-non-mlx"
72
82
  Provides-Extra: lightweight
73
- Requires-Dist: abstractcore[anthropic,embeddings,lmstudio,media,ollama,openai,processing,server,tools]; extra == "lightweight"
83
+ Requires-Dist: abstractcore[anthropic,compression,embeddings,lmstudio,media,ollama,openai,processing,server,tools]; extra == "lightweight"
74
84
  Provides-Extra: dev
75
85
  Requires-Dist: pytest>=7.0.0; extra == "dev"
76
86
  Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
@@ -116,7 +126,11 @@ A unified Python library for interaction with multiple Large Language Model (LLM
116
126
  ### Installation
117
127
 
118
128
  ```bash
129
+ # macOS/Apple Silicon (includes MLX)
119
130
  pip install abstractcore[all]
131
+
132
+ # Linux/Windows (excludes MLX)
133
+ pip install abstractcore[all-non-mlx]
120
134
  ```
121
135
 
122
136
  ### Basic Usage
@@ -258,6 +272,118 @@ loaded_session = BasicSession.load('conversation.json', provider=llm)
258
272
 
259
273
  [Learn more about Session](docs/session.md)
260
274
 
275
+ ### Interaction Tracing (Observability)
276
+
277
+ Enable complete observability of LLM interactions for debugging, compliance, and transparency:
278
+
279
+ ```python
280
+ from abstractcore import create_llm
281
+ from abstractcore.core.session import BasicSession
282
+ from abstractcore.utils import export_traces
283
+
284
+ # Enable tracing on provider
285
+ llm = create_llm('openai', model='gpt-4o-mini', enable_tracing=True, max_traces=100)
286
+
287
+ # Or on session for automatic correlation
288
+ session = BasicSession(provider=llm, enable_tracing=True)
289
+
290
+ # Generate with custom metadata
291
+ response = session.generate(
292
+ "Write Python code",
293
+ step_type='code_generation',
294
+ attempt_number=1
295
+ )
296
+
297
+ # Access complete trace
298
+ trace_id = response.metadata['trace_id']
299
+ trace = llm.get_traces(trace_id=trace_id)
300
+
301
+ # Full interaction context
302
+ print(f"Prompt: {trace['prompt']}")
303
+ print(f"Response: {trace['response']['content']}")
304
+ print(f"Tokens: {trace['response']['usage']['total_tokens']}")
305
+ print(f"Time: {trace['response']['generation_time_ms']}ms")
306
+ print(f"Custom metadata: {trace['metadata']}")
307
+
308
+ # Get all session traces
309
+ traces = session.get_interaction_history()
310
+
311
+ # Export to JSONL, JSON, or Markdown
312
+ export_traces(traces, format='markdown', file_path='workflow_trace.md')
313
+ ```
314
+
315
+ **What's captured:**
316
+ - All prompts, system prompts, and conversation history
317
+ - Complete responses with token usage and timing
318
+ - Generation parameters (temperature, tokens, seed, etc.)
319
+ - Custom metadata for workflow tracking
320
+ - Tool calls and results
321
+
322
+ [Learn more about Interaction Tracing](docs/interaction-tracing.md)
323
+
324
+ ### Async/Await Support
325
+
326
+ Execute concurrent LLM requests for batch operations, multi-provider comparisons, or non-blocking web applications. **Production-ready with validated 6-7.5x performance improvement** for concurrent requests.
327
+
328
+ ```python
329
+ import asyncio
330
+ from abstractcore import create_llm
331
+
332
+ async def main():
333
+ llm = create_llm("openai", model="gpt-4o-mini")
334
+
335
+ # Execute 3 requests concurrently (6-7x faster!)
336
+ tasks = [
337
+ llm.agenerate(f"Summarize {topic}")
338
+ for topic in ["Python", "JavaScript", "Rust"]
339
+ ]
340
+ responses = await asyncio.gather(*tasks)
341
+
342
+ for response in responses:
343
+ print(response.content)
344
+
345
+ asyncio.run(main())
346
+ ```
347
+
348
+ **Performance (Validated with Real Testing):**
349
+ - **Ollama**: 7.5x faster for concurrent requests
350
+ - **LMStudio**: 6.5x faster for concurrent requests
351
+ - **OpenAI**: 6.0x faster for concurrent requests
352
+ - **Anthropic**: 7.4x faster for concurrent requests
353
+ - **Average**: ~7x speedup across all providers
354
+
355
+ **Native Async vs Fallback:**
356
+ - **Native async** (httpx.AsyncClient): Ollama, LMStudio, OpenAI, Anthropic
357
+ - **Fallback** (asyncio.to_thread): MLX, HuggingFace
358
+ - All providers work seamlessly - fallback keeps event loop responsive
359
+
360
+ **Use Cases:**
361
+ - Batch operations with 6-7x speedup via parallel execution
362
+ - Multi-provider comparisons (query OpenAI and Anthropic simultaneously)
363
+ - FastAPI/async web frameworks integration
364
+ - Session async for conversation management
365
+
366
+ **Works with:**
367
+ - All 6 providers (OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace)
368
+ - Streaming via `async for chunk in llm.agenerate(..., stream=True)`
369
+ - Sessions via `await session.agenerate(...)`
370
+ - Zero breaking changes to sync API
371
+
372
+ **Learn async patterns:**
373
+
374
+ AbstractCore includes an educational [async CLI demo](examples/async_cli_demo.py) that demonstrates 8 core async/await patterns:
375
+ - Event-driven progress with GlobalEventBus
376
+ - Parallel tool execution with asyncio.gather()
377
+ - Proper async streaming pattern (await first, then async for)
378
+ - Non-blocking animations and user input
379
+
380
+ ```bash
381
+ # Try the educational async demo
382
+ python examples/async_cli_demo.py --provider ollama --model qwen3:4b --stream
383
+ ```
384
+
385
+ [Learn more in CLI docs](docs/acore-cli.md#async-cli-demo-educational-reference)
386
+
261
387
  ### Media Handling
262
388
 
263
389
  AbstractCore provides unified media handling across all providers with automatic resolution optimization. Upload images, PDFs, and documents using the same simple API regardless of your provider.
@@ -307,9 +433,57 @@ response = llm.generate(
307
433
 
308
434
  [Learn more about Media Handling](docs/media-handling-system.md)
309
435
 
436
+ ### Glyph Visual-Text Compression (🧪 EXPERIMENTAL)
437
+
438
+ > ⚠️ **Vision Model Requirement**: This feature ONLY works with vision-capable models (e.g., gpt-4o, claude-3-5-sonnet, llama3.2-vision)
439
+
440
+ Achieve **3-4x token compression** and **faster inference** with Glyph's revolutionary visual-text compression:
441
+
442
+ ```python
443
+ from abstractcore import create_llm
444
+
445
+ # IMPORTANT: Requires a vision-capable model
446
+ llm = create_llm("ollama", model="llama3.2-vision:11b") # ✓ Vision model
447
+
448
+ # Large documents are automatically compressed for efficiency
449
+ response = llm.generate(
450
+ "Analyze the key findings in this research paper",
451
+ media=["large_research_paper.pdf"] # Automatically compressed if beneficial
452
+ )
453
+
454
+ # Force compression (raises error if model lacks vision)
455
+ response = llm.generate(
456
+ "Summarize this document",
457
+ media=["document.pdf"],
458
+ glyph_compression="always" # "auto" | "always" | "never"
459
+ )
460
+
461
+ # Non-vision models will raise UnsupportedFeatureError
462
+ # llm_no_vision = create_llm("openai", model="gpt-4") # ✗ No vision
463
+ # response = llm_no_vision.generate("...", glyph_compression="always") # Error!
464
+
465
+ # Check compression stats
466
+ if response.metadata and response.metadata.get('compression_used'):
467
+ stats = response.metadata.get('compression_stats', {})
468
+ print(f"Compression ratio: {stats.get('compression_ratio')}x")
469
+ print(f"Processing speedup: 14% faster, 79% less memory")
470
+ ```
471
+
472
+ **Validated Performance:**
473
+ - **14% faster processing** with real-world documents
474
+ - **79% lower memory usage** during processing
475
+ - **100% quality preservation** - no loss of analytical accuracy
476
+ - **Transparent operation** - works with existing code
477
+
478
+ [Learn more about Glyph Compression](docs/glyphs.md)
479
+
310
480
  ## Key Features
311
481
 
482
+ - **Offline-First Design**: Built primarily for open source LLMs with full offline capability. Download once, run forever without internet access
312
483
  - **Provider Agnostic**: Seamlessly switch between OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace
484
+ - **Async/Await Support** ⭐ NEW in v2.6.0: Native async support for concurrent requests with `asyncio.gather()` - works with all 6 providers
485
+ - **Interaction Tracing**: Complete LLM observability with programmatic access to prompts, responses, tokens, timing, and trace correlation for debugging, trust, and compliance
486
+ - **Glyph Visual-Text Compression**: Revolutionary compression system that renders text as optimized images for 3-4x token compression and faster inference
313
487
  - **Centralized Configuration**: Global defaults and app-specific preferences at `~/.abstractcore/config/abstractcore.json`
314
488
  - **Intelligent Media Handling**: Upload images, PDFs, and documents with automatic maximum resolution optimization
315
489
  - **Vision Model Support**: Smart image processing at each model's maximum capability
@@ -422,7 +596,7 @@ python -m abstractcore.utils.cli --provider anthropic --model claude-3-5-haiku-l
422
596
 
423
597
  ## Built-in Applications (Ready-to-Use CLI Tools)
424
598
 
425
- AbstractCore includes **four specialized command-line applications** for common LLM tasks. These are production-ready tools that can be used directly from the terminal without any Python programming.
599
+ AbstractCore includes **five specialized command-line applications** for common LLM tasks. These are production-ready tools that can be used directly from the terminal without any Python programming.
426
600
 
427
601
  ### Available Applications
428
602
 
@@ -432,6 +606,7 @@ AbstractCore includes **four specialized command-line applications** for common
432
606
  | **Extractor** | Entity and relationship extraction | `extractor` |
433
607
  | **Judge** | Text evaluation and scoring | `judge` |
434
608
  | **Intent Analyzer** | Psychological intent analysis & deception detection | `intent` |
609
+ | **DeepSearch** | Autonomous multi-stage research with web search | `deepsearch` |
435
610
 
436
611
  ### Quick Usage Examples
437
612
 
@@ -455,6 +630,11 @@ judge proposal.md --custom-criteria has_examples,covers_risks --output assessmen
455
630
  intent conversation.txt --focus-participant user --depth comprehensive
456
631
  intent email.txt --format plain --context document --verbose
457
632
  intent chat_log.json --conversation-mode --provider lmstudio --model qwen/qwen3-30b-a3b-2507
633
+
634
+ # Autonomous research with web search and reflexive refinement
635
+ deepsearch "What are the latest advances in quantum computing?" --depth comprehensive
636
+ deepsearch "AI impact on healthcare" --focus "diagnosis,treatment,ethics" --reflexive
637
+ deepsearch "sustainable energy 2025" --max-sources 25 --provider openai --model gpt-4o-mini
458
638
  ```
459
639
 
460
640
  ### Installation & Setup
@@ -467,9 +647,10 @@ pip install abstractcore[all]
467
647
 
468
648
  # Apps are immediately available
469
649
  summarizer --help
470
- extractor --help
650
+ extractor --help
471
651
  judge --help
472
652
  intent --help
653
+ deepsearch --help
473
654
  ```
474
655
 
475
656
  ### Alternative Usage Methods
@@ -480,12 +661,14 @@ summarizer document.txt
480
661
  extractor report.pdf
481
662
  judge essay.md
482
663
  intent conversation.txt
664
+ deepsearch "your research query"
483
665
 
484
666
  # Method 2: Via Python module
485
667
  python -m abstractcore.apps summarizer document.txt
486
668
  python -m abstractcore.apps extractor report.pdf
487
669
  python -m abstractcore.apps judge essay.md
488
670
  python -m abstractcore.apps intent conversation.txt
671
+ python -m abstractcore.apps deepsearch "your research query"
489
672
  ```
490
673
 
491
674
  ### Key Parameters
@@ -533,6 +716,7 @@ Each application has documentation with examples and usage information:
533
716
  - **[Extractor Guide](docs/apps/basic-extractor.md)** - Entity and relationship extraction
534
717
  - **[Intent Analyzer Guide](docs/apps/basic-intent.md)** - Psychological intent analysis and deception detection
535
718
  - **[Judge Guide](docs/apps/basic-judge.md)** - Text evaluation and scoring systems
719
+ - **[DeepSearch Guide](docs/apps/basic-deepsearch.md)** - Autonomous multi-stage research with web search
536
720
 
537
721
  **When to use the apps:**
538
722
  - Processing documents without writing code
@@ -752,6 +936,7 @@ curl -X POST http://localhost:8000/v1/chat/completions \
752
936
 
753
937
  ## Why AbstractCore?
754
938
 
939
+ - **Offline-First Philosophy**: Designed for open source LLMs with complete offline operation. No internet required after initial model download
755
940
  - **Unified Interface**: One API for all LLM providers
756
941
  - **Multimodal Support**: Upload images, PDFs, and documents across all providers
757
942
  - **Vision Models**: Seamless integration with GPT-4o, Claude Vision, qwen3-vl, and more
@@ -777,6 +962,9 @@ pip install abstractcore[media]
777
962
  pip install abstractcore[openai]
778
963
  pip install abstractcore[anthropic]
779
964
  pip install abstractcore[ollama]
965
+ pip install abstractcore[lmstudio]
966
+ pip install abstractcore[huggingface]
967
+ pip install abstractcore[mlx] # macOS/Apple Silicon only
780
968
 
781
969
  # With server support
782
970
  pip install abstractcore[server]
@@ -786,6 +974,16 @@ pip install abstractcore[embeddings]
786
974
 
787
975
  # Everything (recommended)
788
976
  pip install abstractcore[all]
977
+
978
+ # Cross-platform (all except MLX - for Linux/Windows)
979
+ pip install abstractcore[all-non-mlx]
980
+
981
+ # Provider groups
982
+ pip install abstractcore[all-providers] # All providers (includes MLX)
983
+ pip install abstractcore[all-providers-non-mlx] # All providers except MLX
984
+ pip install abstractcore[local-providers] # Ollama, LMStudio, MLX
985
+ pip install abstractcore[local-providers-non-mlx] # Ollama, LMStudio only
986
+ pip install abstractcore[api-providers] # OpenAI, Anthropic
789
987
  ```
790
988
 
791
989
  **Media processing extras:**
@@ -816,6 +1014,7 @@ All tests passing as of October 12th, 2025.
816
1014
  ## Quick Links
817
1015
 
818
1016
  - **[📚 Documentation Index](docs/)** - Complete documentation navigation guide
1017
+ - **[🔍 Interaction Tracing](docs/interaction-tracing.md)** - LLM observability and debugging ⭐ NEW
819
1018
  - **[Getting Started](docs/getting-started.md)** - 5-minute quick start
820
1019
  - **[⚙️ Prerequisites](docs/prerequisites.md)** - Provider setup (OpenAI, Anthropic, Ollama, etc.)
821
1020
  - **[📖 Python API](docs/api-reference.md)** - Complete Python API reference
@@ -14,7 +14,11 @@ A unified Python library for interaction with multiple Large Language Model (LLM
14
14
  ### Installation
15
15
 
16
16
  ```bash
17
+ # macOS/Apple Silicon (includes MLX)
17
18
  pip install abstractcore[all]
19
+
20
+ # Linux/Windows (excludes MLX)
21
+ pip install abstractcore[all-non-mlx]
18
22
  ```
19
23
 
20
24
  ### Basic Usage
@@ -156,6 +160,118 @@ loaded_session = BasicSession.load('conversation.json', provider=llm)
156
160
 
157
161
  [Learn more about Session](docs/session.md)
158
162
 
163
+ ### Interaction Tracing (Observability)
164
+
165
+ Enable complete observability of LLM interactions for debugging, compliance, and transparency:
166
+
167
+ ```python
168
+ from abstractcore import create_llm
169
+ from abstractcore.core.session import BasicSession
170
+ from abstractcore.utils import export_traces
171
+
172
+ # Enable tracing on provider
173
+ llm = create_llm('openai', model='gpt-4o-mini', enable_tracing=True, max_traces=100)
174
+
175
+ # Or on session for automatic correlation
176
+ session = BasicSession(provider=llm, enable_tracing=True)
177
+
178
+ # Generate with custom metadata
179
+ response = session.generate(
180
+ "Write Python code",
181
+ step_type='code_generation',
182
+ attempt_number=1
183
+ )
184
+
185
+ # Access complete trace
186
+ trace_id = response.metadata['trace_id']
187
+ trace = llm.get_traces(trace_id=trace_id)
188
+
189
+ # Full interaction context
190
+ print(f"Prompt: {trace['prompt']}")
191
+ print(f"Response: {trace['response']['content']}")
192
+ print(f"Tokens: {trace['response']['usage']['total_tokens']}")
193
+ print(f"Time: {trace['response']['generation_time_ms']}ms")
194
+ print(f"Custom metadata: {trace['metadata']}")
195
+
196
+ # Get all session traces
197
+ traces = session.get_interaction_history()
198
+
199
+ # Export to JSONL, JSON, or Markdown
200
+ export_traces(traces, format='markdown', file_path='workflow_trace.md')
201
+ ```
202
+
203
+ **What's captured:**
204
+ - All prompts, system prompts, and conversation history
205
+ - Complete responses with token usage and timing
206
+ - Generation parameters (temperature, tokens, seed, etc.)
207
+ - Custom metadata for workflow tracking
208
+ - Tool calls and results
209
+
210
+ [Learn more about Interaction Tracing](docs/interaction-tracing.md)
211
+
212
+ ### Async/Await Support
213
+
214
+ Execute concurrent LLM requests for batch operations, multi-provider comparisons, or non-blocking web applications. **Production-ready with validated 6-7.5x performance improvement** for concurrent requests.
215
+
216
+ ```python
217
+ import asyncio
218
+ from abstractcore import create_llm
219
+
220
+ async def main():
221
+ llm = create_llm("openai", model="gpt-4o-mini")
222
+
223
+ # Execute 3 requests concurrently (6-7x faster!)
224
+ tasks = [
225
+ llm.agenerate(f"Summarize {topic}")
226
+ for topic in ["Python", "JavaScript", "Rust"]
227
+ ]
228
+ responses = await asyncio.gather(*tasks)
229
+
230
+ for response in responses:
231
+ print(response.content)
232
+
233
+ asyncio.run(main())
234
+ ```
235
+
236
+ **Performance (Validated with Real Testing):**
237
+ - **Ollama**: 7.5x faster for concurrent requests
238
+ - **LMStudio**: 6.5x faster for concurrent requests
239
+ - **OpenAI**: 6.0x faster for concurrent requests
240
+ - **Anthropic**: 7.4x faster for concurrent requests
241
+ - **Average**: ~7x speedup across all providers
242
+
243
+ **Native Async vs Fallback:**
244
+ - **Native async** (httpx.AsyncClient): Ollama, LMStudio, OpenAI, Anthropic
245
+ - **Fallback** (asyncio.to_thread): MLX, HuggingFace
246
+ - All providers work seamlessly - fallback keeps event loop responsive
247
+
248
+ **Use Cases:**
249
+ - Batch operations with 6-7x speedup via parallel execution
250
+ - Multi-provider comparisons (query OpenAI and Anthropic simultaneously)
251
+ - FastAPI/async web frameworks integration
252
+ - Session async for conversation management
253
+
254
+ **Works with:**
255
+ - All 6 providers (OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace)
256
+ - Streaming via `async for chunk in llm.agenerate(..., stream=True)`
257
+ - Sessions via `await session.agenerate(...)`
258
+ - Zero breaking changes to sync API
259
+
260
+ **Learn async patterns:**
261
+
262
+ AbstractCore includes an educational [async CLI demo](examples/async_cli_demo.py) that demonstrates 8 core async/await patterns:
263
+ - Event-driven progress with GlobalEventBus
264
+ - Parallel tool execution with asyncio.gather()
265
+ - Proper async streaming pattern (await first, then async for)
266
+ - Non-blocking animations and user input
267
+
268
+ ```bash
269
+ # Try the educational async demo
270
+ python examples/async_cli_demo.py --provider ollama --model qwen3:4b --stream
271
+ ```
272
+
273
+ [Learn more in CLI docs](docs/acore-cli.md#async-cli-demo-educational-reference)
274
+
159
275
  ### Media Handling
160
276
 
161
277
  AbstractCore provides unified media handling across all providers with automatic resolution optimization. Upload images, PDFs, and documents using the same simple API regardless of your provider.
@@ -205,9 +321,57 @@ response = llm.generate(
205
321
 
206
322
  [Learn more about Media Handling](docs/media-handling-system.md)
207
323
 
324
+ ### Glyph Visual-Text Compression (🧪 EXPERIMENTAL)
325
+
326
+ > ⚠️ **Vision Model Requirement**: This feature ONLY works with vision-capable models (e.g., gpt-4o, claude-3-5-sonnet, llama3.2-vision)
327
+
328
+ Achieve **3-4x token compression** and **faster inference** with Glyph's revolutionary visual-text compression:
329
+
330
+ ```python
331
+ from abstractcore import create_llm
332
+
333
+ # IMPORTANT: Requires a vision-capable model
334
+ llm = create_llm("ollama", model="llama3.2-vision:11b") # ✓ Vision model
335
+
336
+ # Large documents are automatically compressed for efficiency
337
+ response = llm.generate(
338
+ "Analyze the key findings in this research paper",
339
+ media=["large_research_paper.pdf"] # Automatically compressed if beneficial
340
+ )
341
+
342
+ # Force compression (raises error if model lacks vision)
343
+ response = llm.generate(
344
+ "Summarize this document",
345
+ media=["document.pdf"],
346
+ glyph_compression="always" # "auto" | "always" | "never"
347
+ )
348
+
349
+ # Non-vision models will raise UnsupportedFeatureError
350
+ # llm_no_vision = create_llm("openai", model="gpt-4") # ✗ No vision
351
+ # response = llm_no_vision.generate("...", glyph_compression="always") # Error!
352
+
353
+ # Check compression stats
354
+ if response.metadata and response.metadata.get('compression_used'):
355
+ stats = response.metadata.get('compression_stats', {})
356
+ print(f"Compression ratio: {stats.get('compression_ratio')}x")
357
+ print(f"Processing speedup: 14% faster, 79% less memory")
358
+ ```
359
+
360
+ **Validated Performance:**
361
+ - **14% faster processing** with real-world documents
362
+ - **79% lower memory usage** during processing
363
+ - **100% quality preservation** - no loss of analytical accuracy
364
+ - **Transparent operation** - works with existing code
365
+
366
+ [Learn more about Glyph Compression](docs/glyphs.md)
367
+
208
368
  ## Key Features
209
369
 
370
+ - **Offline-First Design**: Built primarily for open source LLMs with full offline capability. Download once, run forever without internet access
210
371
  - **Provider Agnostic**: Seamlessly switch between OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace
372
+ - **Async/Await Support** ⭐ NEW in v2.6.0: Native async support for concurrent requests with `asyncio.gather()` - works with all 6 providers
373
+ - **Interaction Tracing**: Complete LLM observability with programmatic access to prompts, responses, tokens, timing, and trace correlation for debugging, trust, and compliance
374
+ - **Glyph Visual-Text Compression**: Revolutionary compression system that renders text as optimized images for 3-4x token compression and faster inference
211
375
  - **Centralized Configuration**: Global defaults and app-specific preferences at `~/.abstractcore/config/abstractcore.json`
212
376
  - **Intelligent Media Handling**: Upload images, PDFs, and documents with automatic maximum resolution optimization
213
377
  - **Vision Model Support**: Smart image processing at each model's maximum capability
@@ -320,7 +484,7 @@ python -m abstractcore.utils.cli --provider anthropic --model claude-3-5-haiku-l
320
484
 
321
485
  ## Built-in Applications (Ready-to-Use CLI Tools)
322
486
 
323
- AbstractCore includes **four specialized command-line applications** for common LLM tasks. These are production-ready tools that can be used directly from the terminal without any Python programming.
487
+ AbstractCore includes **five specialized command-line applications** for common LLM tasks. These are production-ready tools that can be used directly from the terminal without any Python programming.
324
488
 
325
489
  ### Available Applications
326
490
 
@@ -330,6 +494,7 @@ AbstractCore includes **four specialized command-line applications** for common
330
494
  | **Extractor** | Entity and relationship extraction | `extractor` |
331
495
  | **Judge** | Text evaluation and scoring | `judge` |
332
496
  | **Intent Analyzer** | Psychological intent analysis & deception detection | `intent` |
497
+ | **DeepSearch** | Autonomous multi-stage research with web search | `deepsearch` |
333
498
 
334
499
  ### Quick Usage Examples
335
500
 
@@ -353,6 +518,11 @@ judge proposal.md --custom-criteria has_examples,covers_risks --output assessmen
353
518
  intent conversation.txt --focus-participant user --depth comprehensive
354
519
  intent email.txt --format plain --context document --verbose
355
520
  intent chat_log.json --conversation-mode --provider lmstudio --model qwen/qwen3-30b-a3b-2507
521
+
522
+ # Autonomous research with web search and reflexive refinement
523
+ deepsearch "What are the latest advances in quantum computing?" --depth comprehensive
524
+ deepsearch "AI impact on healthcare" --focus "diagnosis,treatment,ethics" --reflexive
525
+ deepsearch "sustainable energy 2025" --max-sources 25 --provider openai --model gpt-4o-mini
356
526
  ```
357
527
 
358
528
  ### Installation & Setup
@@ -365,9 +535,10 @@ pip install abstractcore[all]
365
535
 
366
536
  # Apps are immediately available
367
537
  summarizer --help
368
- extractor --help
538
+ extractor --help
369
539
  judge --help
370
540
  intent --help
541
+ deepsearch --help
371
542
  ```
372
543
 
373
544
  ### Alternative Usage Methods
@@ -378,12 +549,14 @@ summarizer document.txt
378
549
  extractor report.pdf
379
550
  judge essay.md
380
551
  intent conversation.txt
552
+ deepsearch "your research query"
381
553
 
382
554
  # Method 2: Via Python module
383
555
  python -m abstractcore.apps summarizer document.txt
384
556
  python -m abstractcore.apps extractor report.pdf
385
557
  python -m abstractcore.apps judge essay.md
386
558
  python -m abstractcore.apps intent conversation.txt
559
+ python -m abstractcore.apps deepsearch "your research query"
387
560
  ```
388
561
 
389
562
  ### Key Parameters
@@ -431,6 +604,7 @@ Each application has documentation with examples and usage information:
431
604
  - **[Extractor Guide](docs/apps/basic-extractor.md)** - Entity and relationship extraction
432
605
  - **[Intent Analyzer Guide](docs/apps/basic-intent.md)** - Psychological intent analysis and deception detection
433
606
  - **[Judge Guide](docs/apps/basic-judge.md)** - Text evaluation and scoring systems
607
+ - **[DeepSearch Guide](docs/apps/basic-deepsearch.md)** - Autonomous multi-stage research with web search
434
608
 
435
609
  **When to use the apps:**
436
610
  - Processing documents without writing code
@@ -650,6 +824,7 @@ curl -X POST http://localhost:8000/v1/chat/completions \
650
824
 
651
825
  ## Why AbstractCore?
652
826
 
827
+ - **Offline-First Philosophy**: Designed for open source LLMs with complete offline operation. No internet required after initial model download
653
828
  - **Unified Interface**: One API for all LLM providers
654
829
  - **Multimodal Support**: Upload images, PDFs, and documents across all providers
655
830
  - **Vision Models**: Seamless integration with GPT-4o, Claude Vision, qwen3-vl, and more
@@ -675,6 +850,9 @@ pip install abstractcore[media]
675
850
  pip install abstractcore[openai]
676
851
  pip install abstractcore[anthropic]
677
852
  pip install abstractcore[ollama]
853
+ pip install abstractcore[lmstudio]
854
+ pip install abstractcore[huggingface]
855
+ pip install abstractcore[mlx] # macOS/Apple Silicon only
678
856
 
679
857
  # With server support
680
858
  pip install abstractcore[server]
@@ -684,6 +862,16 @@ pip install abstractcore[embeddings]
684
862
 
685
863
  # Everything (recommended)
686
864
  pip install abstractcore[all]
865
+
866
+ # Cross-platform (all except MLX - for Linux/Windows)
867
+ pip install abstractcore[all-non-mlx]
868
+
869
+ # Provider groups
870
+ pip install abstractcore[all-providers] # All providers (includes MLX)
871
+ pip install abstractcore[all-providers-non-mlx] # All providers except MLX
872
+ pip install abstractcore[local-providers] # Ollama, LMStudio, MLX
873
+ pip install abstractcore[local-providers-non-mlx] # Ollama, LMStudio only
874
+ pip install abstractcore[api-providers] # OpenAI, Anthropic
687
875
  ```
688
876
 
689
877
  **Media processing extras:**
@@ -714,6 +902,7 @@ All tests passing as of October 12th, 2025.
714
902
  ## Quick Links
715
903
 
716
904
  - **[📚 Documentation Index](docs/)** - Complete documentation navigation guide
905
+ - **[🔍 Interaction Tracing](docs/interaction-tracing.md)** - LLM observability and debugging ⭐ NEW
717
906
  - **[Getting Started](docs/getting-started.md)** - 5-minute quick start
718
907
  - **[⚙️ Prerequisites](docs/prerequisites.md)** - Provider setup (OpenAI, Anthropic, Ollama, etc.)
719
908
  - **[📖 Python API](docs/api-reference.md)** - Complete Python API reference
@@ -2,6 +2,8 @@
2
2
  """
3
3
  AbstractCore - Unified interface to all LLM providers with essential infrastructure.
4
4
 
5
+ CRITICAL: Offline-first design - enforces offline mode for open source LLMs by default.
6
+
5
7
  Key Features:
6
8
  • Multi-provider support (OpenAI, Anthropic, Ollama, HuggingFace, MLX, LMStudio)
7
9
  • Unified token parameter vocabulary across all providers
@@ -47,6 +49,16 @@ _has_processing = True
47
49
  # Tools module (core functionality)
48
50
  from .tools import tool
49
51
 
52
+ # Download module (core functionality)
53
+ from .download import download_model, DownloadProgress, DownloadStatus
54
+
55
+ # Compression module (optional import)
56
+ try:
57
+ from .compression import GlyphConfig, CompressionOrchestrator
58
+ _has_compression = True
59
+ except ImportError:
60
+ _has_compression = False
61
+
50
62
  __all__ = [
51
63
  'create_llm',
52
64
  'BasicSession',
@@ -58,11 +70,17 @@ __all__ = [
58
70
  'ModelNotFoundError',
59
71
  'ProviderAPIError',
60
72
  'AuthenticationError',
61
- 'tool'
73
+ 'tool',
74
+ 'download_model',
75
+ 'DownloadProgress',
76
+ 'DownloadStatus',
62
77
  ]
63
78
 
64
79
  if _has_embeddings:
65
80
  __all__.append('EmbeddingManager')
66
81
 
82
+ if _has_compression:
83
+ __all__.extend(['GlyphConfig', 'CompressionOrchestrator'])
84
+
67
85
  # Processing is core functionality
68
86
  __all__.extend(['BasicSummarizer', 'SummaryStyle', 'SummaryLength', 'BasicExtractor'])