abstractcore 2.5.0__tar.gz → 2.5.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (188) hide show
  1. {abstractcore-2.5.0 → abstractcore-2.5.3}/PKG-INFO +140 -23
  2. {abstractcore-2.5.0 → abstractcore-2.5.3}/README.md +128 -17
  3. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/__init__.py +12 -0
  4. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/apps/__main__.py +8 -1
  5. abstractcore-2.5.3/abstractcore/apps/deepsearch.py +644 -0
  6. abstractcore-2.5.3/abstractcore/apps/intent.py +614 -0
  7. abstractcore-2.5.3/abstractcore/architectures/detection.py +542 -0
  8. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/assets/architecture_formats.json +14 -1
  9. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/assets/model_capabilities.json +583 -44
  10. abstractcore-2.5.3/abstractcore/compression/__init__.py +29 -0
  11. abstractcore-2.5.3/abstractcore/compression/analytics.py +420 -0
  12. abstractcore-2.5.3/abstractcore/compression/cache.py +250 -0
  13. abstractcore-2.5.3/abstractcore/compression/config.py +279 -0
  14. abstractcore-2.5.3/abstractcore/compression/exceptions.py +30 -0
  15. abstractcore-2.5.3/abstractcore/compression/glyph_processor.py +381 -0
  16. abstractcore-2.5.3/abstractcore/compression/optimizer.py +388 -0
  17. abstractcore-2.5.3/abstractcore/compression/orchestrator.py +380 -0
  18. abstractcore-2.5.3/abstractcore/compression/pil_text_renderer.py +818 -0
  19. abstractcore-2.5.3/abstractcore/compression/quality.py +226 -0
  20. abstractcore-2.5.3/abstractcore/compression/text_formatter.py +666 -0
  21. abstractcore-2.5.3/abstractcore/compression/vision_compressor.py +371 -0
  22. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/config/main.py +66 -1
  23. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/config/manager.py +111 -5
  24. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/core/session.py +105 -5
  25. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/events/__init__.py +1 -1
  26. abstractcore-2.5.3/abstractcore/media/auto_handler.py +657 -0
  27. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/handlers/local_handler.py +14 -2
  28. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/handlers/openai_handler.py +62 -3
  29. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/processors/__init__.py +11 -1
  30. abstractcore-2.5.3/abstractcore/media/processors/direct_pdf_processor.py +210 -0
  31. abstractcore-2.5.3/abstractcore/media/processors/glyph_pdf_processor.py +227 -0
  32. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/processors/image_processor.py +7 -1
  33. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/processors/text_processor.py +18 -3
  34. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/types.py +164 -7
  35. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/processing/__init__.py +5 -1
  36. abstractcore-2.5.3/abstractcore/processing/basic_deepsearch.py +2173 -0
  37. abstractcore-2.5.3/abstractcore/processing/basic_intent.py +690 -0
  38. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/providers/__init__.py +18 -0
  39. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/providers/anthropic_provider.py +29 -2
  40. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/providers/base.py +279 -6
  41. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/providers/huggingface_provider.py +658 -27
  42. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/providers/lmstudio_provider.py +52 -2
  43. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/providers/mlx_provider.py +103 -4
  44. abstractcore-2.5.3/abstractcore/providers/model_capabilities.py +352 -0
  45. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/providers/ollama_provider.py +44 -6
  46. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/providers/openai_provider.py +29 -2
  47. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/providers/registry.py +91 -19
  48. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/server/app.py +91 -81
  49. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/structured/handler.py +161 -1
  50. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/tools/common_tools.py +98 -3
  51. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/utils/__init__.py +4 -1
  52. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/utils/cli.py +114 -1
  53. abstractcore-2.5.3/abstractcore/utils/trace_export.py +287 -0
  54. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/utils/version.py +1 -1
  55. abstractcore-2.5.3/abstractcore/utils/vlm_token_calculator.py +655 -0
  56. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore.egg-info/PKG-INFO +140 -23
  57. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore.egg-info/SOURCES.txt +22 -53
  58. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore.egg-info/entry_points.txt +4 -0
  59. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore.egg-info/requires.txt +12 -5
  60. {abstractcore-2.5.0 → abstractcore-2.5.3}/pyproject.toml +19 -5
  61. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_retry_strategy.py +24 -9
  62. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_unload_memory.py +4 -4
  63. abstractcore-2.5.0/abstractcore/architectures/detection.py +0 -296
  64. abstractcore-2.5.0/abstractcore/media/auto_handler.py +0 -363
  65. abstractcore-2.5.0/tests/test_all_specified_providers.py +0 -182
  66. abstractcore-2.5.0/tests/test_basic_summarizer.py +0 -332
  67. abstractcore-2.5.0/tests/test_cli_media.py +0 -45
  68. abstractcore-2.5.0/tests/test_consistency.py +0 -116
  69. abstractcore-2.5.0/tests/test_critical_streaming_tool_fix.py +0 -744
  70. abstractcore-2.5.0/tests/test_debug_server.py +0 -69
  71. abstractcore-2.5.0/tests/test_direct_vs_server.py +0 -157
  72. abstractcore-2.5.0/tests/test_embeddings.py +0 -391
  73. abstractcore-2.5.0/tests/test_embeddings_integration.py +0 -317
  74. abstractcore-2.5.0/tests/test_embeddings_llm_integration.py +0 -331
  75. abstractcore-2.5.0/tests/test_embeddings_matrix_operations.py +0 -387
  76. abstractcore-2.5.0/tests/test_embeddings_no_mock.py +0 -358
  77. abstractcore-2.5.0/tests/test_embeddings_real.py +0 -428
  78. abstractcore-2.5.0/tests/test_embeddings_semantic_validation.py +0 -438
  79. abstractcore-2.5.0/tests/test_embeddings_simple.py +0 -124
  80. abstractcore-2.5.0/tests/test_fixed_media.py +0 -168
  81. abstractcore-2.5.0/tests/test_fixed_prompt.py +0 -65
  82. abstractcore-2.5.0/tests/test_lmstudio_context.py +0 -143
  83. abstractcore-2.5.0/tests/test_media_import.py +0 -65
  84. abstractcore-2.5.0/tests/test_media_server.py +0 -276
  85. abstractcore-2.5.0/tests/test_ollama_tool_role_fix.py +0 -269
  86. abstractcore-2.5.0/tests/test_openai_conversion_manual.py +0 -119
  87. abstractcore-2.5.0/tests/test_openai_format_bug.py +0 -257
  88. abstractcore-2.5.0/tests/test_openai_format_conversion.py +0 -485
  89. abstractcore-2.5.0/tests/test_openai_media_integration.py +0 -444
  90. abstractcore-2.5.0/tests/test_progressive_complexity.py +0 -163
  91. abstractcore-2.5.0/tests/test_provider_basic_session.py +0 -154
  92. abstractcore-2.5.0/tests/test_provider_connectivity.py +0 -102
  93. abstractcore-2.5.0/tests/test_provider_simple_generation.py +0 -167
  94. abstractcore-2.5.0/tests/test_provider_streaming.py +0 -364
  95. abstractcore-2.5.0/tests/test_provider_token_translation.py +0 -289
  96. abstractcore-2.5.0/tests/test_provider_tool_detection.py +0 -265
  97. abstractcore-2.5.0/tests/test_providers.py +0 -362
  98. abstractcore-2.5.0/tests/test_providers_comprehensive.py +0 -353
  99. abstractcore-2.5.0/tests/test_providers_simple.py +0 -168
  100. abstractcore-2.5.0/tests/test_real_models_comprehensive.py +0 -521
  101. abstractcore-2.5.0/tests/test_server_debug.py +0 -131
  102. abstractcore-2.5.0/tests/test_server_embeddings_real.py +0 -233
  103. abstractcore-2.5.0/tests/test_server_integration.py +0 -245
  104. abstractcore-2.5.0/tests/test_stream_tool_calling.py +0 -496
  105. abstractcore-2.5.0/tests/test_streaming_enhancements.py +0 -614
  106. abstractcore-2.5.0/tests/test_streaming_tag_rewriting.py +0 -527
  107. abstractcore-2.5.0/tests/test_structured_integration.py +0 -218
  108. abstractcore-2.5.0/tests/test_structured_output.py +0 -332
  109. abstractcore-2.5.0/tests/test_syntax_rewriter.py +0 -471
  110. abstractcore-2.5.0/tests/test_tool_calling.py +0 -231
  111. abstractcore-2.5.0/tests/test_tool_execution_separation.py +0 -857
  112. abstractcore-2.5.0/tests/test_unified_streaming.py +0 -852
  113. abstractcore-2.5.0/tests/test_vision_accuracy.py +0 -145
  114. abstractcore-2.5.0/tests/test_vision_comprehensive.py +0 -648
  115. abstractcore-2.5.0/tests/test_vision_fallback_improvement.py +0 -129
  116. abstractcore-2.5.0/tests/test_wrong_model_fallback.py +0 -262
  117. {abstractcore-2.5.0 → abstractcore-2.5.3}/LICENSE +0 -0
  118. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/apps/__init__.py +0 -0
  119. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/apps/app_config_utils.py +0 -0
  120. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/apps/extractor.py +0 -0
  121. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/apps/judge.py +0 -0
  122. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/apps/summarizer.py +0 -0
  123. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/architectures/__init__.py +0 -0
  124. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/architectures/enums.py +0 -0
  125. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/assets/session_schema.json +0 -0
  126. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/config/__init__.py +0 -0
  127. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/config/vision_config.py +0 -0
  128. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/core/__init__.py +0 -0
  129. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/core/enums.py +0 -0
  130. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/core/factory.py +0 -0
  131. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/core/interface.py +0 -0
  132. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/core/retry.py +0 -0
  133. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/core/types.py +0 -0
  134. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/embeddings/__init__.py +0 -0
  135. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/embeddings/manager.py +0 -0
  136. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/embeddings/models.py +0 -0
  137. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/exceptions/__init__.py +0 -0
  138. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/__init__.py +0 -0
  139. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/base.py +0 -0
  140. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/capabilities.py +0 -0
  141. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/handlers/__init__.py +0 -0
  142. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/handlers/anthropic_handler.py +0 -0
  143. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/processors/office_processor.py +0 -0
  144. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/processors/pdf_processor.py +0 -0
  145. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/utils/__init__.py +0 -0
  146. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/utils/image_scaler.py +0 -0
  147. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/media/vision_fallback.py +0 -0
  148. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/processing/basic_extractor.py +0 -0
  149. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/processing/basic_judge.py +0 -0
  150. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/processing/basic_summarizer.py +0 -0
  151. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/providers/streaming.py +0 -0
  152. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/server/__init__.py +0 -0
  153. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/structured/__init__.py +0 -0
  154. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/structured/retry.py +0 -0
  155. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/tools/__init__.py +0 -0
  156. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/tools/core.py +0 -0
  157. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/tools/handler.py +0 -0
  158. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/tools/parser.py +0 -0
  159. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/tools/registry.py +0 -0
  160. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/tools/syntax_rewriter.py +0 -0
  161. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/tools/tag_rewriter.py +0 -0
  162. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/utils/message_preprocessor.py +0 -0
  163. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/utils/self_fixes.py +0 -0
  164. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/utils/structured_logging.py +0 -0
  165. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore/utils/token_utils.py +0 -0
  166. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore.egg-info/dependency_links.txt +0 -0
  167. {abstractcore-2.5.0 → abstractcore-2.5.3}/abstractcore.egg-info/top_level.txt +0 -0
  168. {abstractcore-2.5.0 → abstractcore-2.5.3}/setup.cfg +0 -0
  169. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_agentic_cli_compatibility.py +0 -0
  170. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_basic_session.py +0 -0
  171. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_complete_integration.py +0 -0
  172. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_comprehensive_events.py +0 -0
  173. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_core_components.py +0 -0
  174. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_enhanced_prompt.py +0 -0
  175. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_environment_variable_tool_call_tags.py +0 -0
  176. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_factory.py +0 -0
  177. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_final_accuracy.py +0 -0
  178. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_final_comprehensive.py +0 -0
  179. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_final_graceful_errors.py +0 -0
  180. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_graceful_fallback.py +0 -0
  181. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_import_debug.py +0 -0
  182. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_integrated_functionality.py +0 -0
  183. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_retry_observability.py +0 -0
  184. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_seed_determinism.py +0 -0
  185. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_seed_temperature_basic.py +0 -0
  186. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_sensory_prompting.py +0 -0
  187. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_text_only_model_experience.py +0 -0
  188. {abstractcore-2.5.0 → abstractcore-2.5.3}/tests/test_user_scenario_validation.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstractcore
3
- Version: 2.5.0
3
+ Version: 2.5.3
4
4
  Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
5
5
  Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
6
6
  Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
@@ -37,14 +37,18 @@ Requires-Dist: anthropic<1.0.0,>=0.25.0; extra == "anthropic"
37
37
  Provides-Extra: ollama
38
38
  Provides-Extra: lmstudio
39
39
  Provides-Extra: huggingface
40
- Requires-Dist: transformers<5.0.0,>=4.30.0; extra == "huggingface"
41
- Requires-Dist: torch<3.0.0,>=1.12.0; extra == "huggingface"
40
+ Requires-Dist: transformers<5.0.0,>=4.57.1; extra == "huggingface"
41
+ Requires-Dist: torch<3.0.0,>=2.6.0; extra == "huggingface"
42
+ Requires-Dist: torchvision>=0.17.0; extra == "huggingface"
43
+ Requires-Dist: torchaudio>=2.1.0; extra == "huggingface"
42
44
  Requires-Dist: llama-cpp-python<1.0.0,>=0.2.0; extra == "huggingface"
45
+ Requires-Dist: outlines>=0.1.0; extra == "huggingface"
43
46
  Provides-Extra: mlx
44
47
  Requires-Dist: mlx<1.0.0,>=0.15.0; extra == "mlx"
45
48
  Requires-Dist: mlx-lm<1.0.0,>=0.15.0; extra == "mlx"
49
+ Requires-Dist: outlines>=0.1.0; extra == "mlx"
46
50
  Provides-Extra: embeddings
47
- Requires-Dist: sentence-transformers<4.0.0,>=2.7.0; extra == "embeddings"
51
+ Requires-Dist: sentence-transformers<6.0.0,>=5.1.0; extra == "embeddings"
48
52
  Requires-Dist: numpy<2.0.0,>=1.20.0; extra == "embeddings"
49
53
  Provides-Extra: processing
50
54
  Provides-Extra: tools
@@ -57,6 +61,8 @@ Requires-Dist: Pillow<12.0.0,>=10.0.0; extra == "media"
57
61
  Requires-Dist: pymupdf4llm<1.0.0,>=0.0.20; extra == "media"
58
62
  Requires-Dist: unstructured[office]<1.0.0,>=0.10.0; extra == "media"
59
63
  Requires-Dist: pandas<3.0.0,>=1.0.0; extra == "media"
64
+ Provides-Extra: compression
65
+ Requires-Dist: pdf2image<2.0.0,>=1.16.0; extra == "compression"
60
66
  Provides-Extra: api-providers
61
67
  Requires-Dist: abstractcore[anthropic,openai]; extra == "api-providers"
62
68
  Provides-Extra: local-providers
@@ -66,9 +72,9 @@ Requires-Dist: abstractcore[huggingface]; extra == "heavy-providers"
66
72
  Provides-Extra: all-providers
67
73
  Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,mlx,ollama,openai]; extra == "all-providers"
68
74
  Provides-Extra: all
69
- Requires-Dist: abstractcore[anthropic,dev,docs,embeddings,huggingface,lmstudio,media,mlx,ollama,openai,processing,server,test,tools]; extra == "all"
75
+ Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,mlx,ollama,openai,processing,server,test,tools]; extra == "all"
70
76
  Provides-Extra: lightweight
71
- Requires-Dist: abstractcore[anthropic,embeddings,lmstudio,media,ollama,openai,processing,server,tools]; extra == "lightweight"
77
+ Requires-Dist: abstractcore[anthropic,compression,embeddings,lmstudio,media,ollama,openai,processing,server,tools]; extra == "lightweight"
72
78
  Provides-Extra: dev
73
79
  Requires-Dist: pytest>=7.0.0; extra == "dev"
74
80
  Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
@@ -256,9 +262,58 @@ loaded_session = BasicSession.load('conversation.json', provider=llm)
256
262
 
257
263
  [Learn more about Session](docs/session.md)
258
264
 
265
+ ### Interaction Tracing (Observability)
266
+
267
+ Enable complete observability of LLM interactions for debugging, compliance, and transparency:
268
+
269
+ ```python
270
+ from abstractcore import create_llm
271
+ from abstractcore.core.session import BasicSession
272
+ from abstractcore.utils import export_traces
273
+
274
+ # Enable tracing on provider
275
+ llm = create_llm('openai', model='gpt-4o-mini', enable_tracing=True, max_traces=100)
276
+
277
+ # Or on session for automatic correlation
278
+ session = BasicSession(provider=llm, enable_tracing=True)
279
+
280
+ # Generate with custom metadata
281
+ response = session.generate(
282
+ "Write Python code",
283
+ step_type='code_generation',
284
+ attempt_number=1
285
+ )
286
+
287
+ # Access complete trace
288
+ trace_id = response.metadata['trace_id']
289
+ trace = llm.get_traces(trace_id=trace_id)
290
+
291
+ # Full interaction context
292
+ print(f"Prompt: {trace['prompt']}")
293
+ print(f"Response: {trace['response']['content']}")
294
+ print(f"Tokens: {trace['response']['usage']['total_tokens']}")
295
+ print(f"Time: {trace['response']['generation_time_ms']}ms")
296
+ print(f"Custom metadata: {trace['metadata']}")
297
+
298
+ # Get all session traces
299
+ traces = session.get_interaction_history()
300
+
301
+ # Export to JSONL, JSON, or Markdown
302
+ export_traces(traces, format='markdown', file_path='workflow_trace.md')
303
+ ```
304
+
305
+ **What's captured:**
306
+ - All prompts, system prompts, and conversation history
307
+ - Complete responses with token usage and timing
308
+ - Generation parameters (temperature, tokens, seed, etc.)
309
+ - Custom metadata for workflow tracking
310
+ - Tool calls and results
311
+
312
+ [Learn more about Interaction Tracing](docs/interaction-tracing.md)
313
+
259
314
  ### Media Handling
260
315
 
261
- AbstractCore provides **unified media handling** across all providers with **automatic maximum resolution optimization** for best results. Upload images, PDFs, and documents using the same simple API regardless of your provider.
316
+ AbstractCore provides unified media handling across all providers with automatic resolution optimization. Upload images, PDFs, and documents using the same simple API regardless of your provider.
262
317
 
263
318
  ```python
264
319
  from abstractcore import create_llm
@@ -296,7 +351,7 @@ response = llm.generate(
296
351
  - **Smart Resolution**: Automatically uses maximum resolution supported by each model
297
352
  - **Format Support**: PNG, JPEG, GIF, WEBP, BMP, TIFF images; PDF, TXT, MD, CSV, TSV, JSON documents
298
353
  - **Office Documents**: DOCX, XLSX, PPT (with `pip install abstractcore[all]`)
299
- - **Vision Optimization**: Model-specific image processing for best vision results
354
+ - **Vision Optimization**: Model-specific image processing for vision results
300
355
 
301
356
  **Provider compatibility:**
302
357
  - **High-resolution vision**: GPT-4o (up to 4096x4096), Claude 3.5 Sonnet (up to 1568x1568)
@@ -305,16 +360,63 @@ response = llm.generate(
305
360
 
306
361
  [Learn more about Media Handling](docs/media-handling-system.md)
307
362
 
363
+ ### Glyph Visual-Text Compression (🧪 EXPERIMENTAL)
364
+
365
+ > ⚠️ **Vision Model Requirement**: This feature ONLY works with vision-capable models (e.g., gpt-4o, claude-3-5-sonnet, llama3.2-vision)
366
+
367
+ Achieve **3-4x token compression** and **faster inference** with Glyph's revolutionary visual-text compression:
368
+
369
+ ```python
370
+ from abstractcore import create_llm
371
+
372
+ # IMPORTANT: Requires a vision-capable model
373
+ llm = create_llm("ollama", model="llama3.2-vision:11b") # ✓ Vision model
374
+
375
+ # Large documents are automatically compressed for efficiency
376
+ response = llm.generate(
377
+ "Analyze the key findings in this research paper",
378
+ media=["large_research_paper.pdf"] # Automatically compressed if beneficial
379
+ )
380
+
381
+ # Force compression (raises error if model lacks vision)
382
+ response = llm.generate(
383
+ "Summarize this document",
384
+ media=["document.pdf"],
385
+ glyph_compression="always" # "auto" | "always" | "never"
386
+ )
387
+
388
+ # Non-vision models will raise UnsupportedFeatureError
389
+ # llm_no_vision = create_llm("openai", model="gpt-4") # ✗ No vision
390
+ # response = llm_no_vision.generate("...", glyph_compression="always") # Error!
391
+
392
+ # Check compression stats
393
+ if response.metadata and response.metadata.get('compression_used'):
394
+ stats = response.metadata.get('compression_stats', {})
395
+ print(f"Compression ratio: {stats.get('compression_ratio')}x")
396
+ print(f"Processing speedup: 14% faster, 79% less memory")
397
+ ```
398
+
399
+ **Validated Performance:**
400
+ - **14% faster processing** with real-world documents
401
+ - **79% lower memory usage** during processing
402
+ - **100% quality preservation** - no loss of analytical accuracy
403
+ - **Transparent operation** - works with existing code
404
+
405
+ [Learn more about Glyph Compression](docs/glyphs.md)
406
+
308
407
  ## Key Features
309
408
 
409
+ - **Offline-First Design**: Built primarily for open source LLMs with full offline capability. Download once, run forever without internet access
310
410
  - **Provider Agnostic**: Seamlessly switch between OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace
411
+ - **Interaction Tracing**: Complete LLM observability with programmatic access to prompts, responses, tokens, and timing for debugging and compliance
412
+ - **Glyph Visual-Text Compression**: Revolutionary compression system that renders text as optimized images for 3-4x token compression and faster inference
311
413
  - **Centralized Configuration**: Global defaults and app-specific preferences at `~/.abstractcore/config/abstractcore.json`
312
414
  - **Intelligent Media Handling**: Upload images, PDFs, and documents with automatic maximum resolution optimization
313
- - **Vision Model Support**: Smart image processing at each model's maximum capability for best results
314
- - **Document Processing**: Advanced PDF extraction (PyMuPDF4LLM), Office documents (DOCX/XLSX/PPT), CSV/TSV analysis
415
+ - **Vision Model Support**: Smart image processing at each model's maximum capability
416
+ - **Document Processing**: PDF extraction (PyMuPDF4LLM), Office documents (DOCX/XLSX/PPT), CSV/TSV analysis
315
417
  - **Unified Tools**: Consistent tool calling across all providers
316
418
  - **Session Management**: Persistent conversations with metadata, analytics, and complete serialization
317
- - **Structured Responses**: Clean, predictable output formats with Pydantic
419
+ - **Native Structured Output**: Server-side schema enforcement for Ollama and LMStudio (OpenAI and Anthropic also supported)
318
420
  - **Streaming Support**: Real-time token generation for interactive experiences
319
421
  - **Consistent Token Terminology**: Unified `input_tokens`, `output_tokens`, `total_tokens` across all providers
320
422
  - **Embeddings**: Built-in support for semantic search and RAG applications
@@ -324,12 +426,12 @@ response = llm.generate(
324
426
 
325
427
  | Provider | Status | SEED Support | Setup |
326
428
  |----------|--------|-------------|-------|
327
- | **OpenAI** | Full | Native | [Get API key](docs/prerequisites.md#openai-setup) |
328
- | **Anthropic** | Full | ⚠️ Warning* | [Get API key](docs/prerequisites.md#anthropic-setup) |
329
- | **Ollama** | Full | Native | [Install guide](docs/prerequisites.md#ollama-setup) |
330
- | **LMStudio** | Full | Native | [Install guide](docs/prerequisites.md#lmstudio-setup) |
331
- | **MLX** | Full | Native | [Setup guide](docs/prerequisites.md#mlx-setup) |
332
- | **HuggingFace** | Full | Native | [Setup guide](docs/prerequisites.md#huggingface-setup) |
429
+ | **OpenAI** | Full | Native | [Get API key](docs/prerequisites.md#openai-setup) |
430
+ | **Anthropic** | Full | Warning* | [Get API key](docs/prerequisites.md#anthropic-setup) |
431
+ | **Ollama** | Full | Native | [Install guide](docs/prerequisites.md#ollama-setup) |
432
+ | **LMStudio** | Full | Native | [Install guide](docs/prerequisites.md#lmstudio-setup) |
433
+ | **MLX** | Full | Native | [Setup guide](docs/prerequisites.md#mlx-setup) |
434
+ | **HuggingFace** | Full | Native | [Setup guide](docs/prerequisites.md#huggingface-setup) |
333
435
 
334
436
  *Anthropic doesn't support seed parameters but issues a warning when provided. Use `temperature=0.0` for more consistent outputs.
335
437
 
@@ -374,7 +476,7 @@ response = client.chat.completions.create(
374
476
  - Building web applications that need HTTP API
375
477
  - Multi-language access (not just Python)
376
478
 
377
- ## Internal CLI (Optional Interactive Testing Tool)
479
+ ## AbstractCore CLI (Optional Interactive Testing Tool)
378
480
 
379
481
  AbstractCore includes a **built-in CLI** for interactive testing, development, and conversation management. This is an internal testing tool, distinct from external agentic CLIs.
380
482
 
@@ -394,6 +496,7 @@ python -m abstractcore.utils.cli --provider anthropic --model claude-3-5-haiku-l
394
496
  - Chat history compaction and management
395
497
  - Fact extraction from conversations
396
498
  - Conversation quality evaluation (LLM-as-a-judge)
499
+ - Intent analysis and deception detection
397
500
  - Tool call testing and debugging
398
501
  - System prompt management
399
502
  - Multiple provider support
@@ -402,12 +505,13 @@ python -m abstractcore.utils.cli --provider anthropic --model claude-3-5-haiku-l
402
505
  - `/compact` - Compress chat history while preserving context
403
506
  - `/facts [file]` - Extract structured facts from conversation
404
507
  - `/judge` - Evaluate conversation quality with feedback
508
+ - `/intent [participant]` - Analyze psychological intents and detect deception
405
509
  - `/history [n]` - View conversation history
406
510
  - `/stream` - Toggle real-time streaming
407
511
  - `/system [prompt]` - Show or change system prompt
408
512
  - `/status` - Show current provider, model, and capabilities
409
513
 
410
- **Full Documentation:** [Internal CLI Guide](docs/internal-cli.md)
514
+ **Full Documentation:** [AbstractCore CLI Guide](docs/acore-cli.md)
411
515
 
412
516
  **When to use the CLI:**
413
517
  - Interactive development and testing
@@ -418,7 +522,7 @@ python -m abstractcore.utils.cli --provider anthropic --model claude-3-5-haiku-l
418
522
 
419
523
  ## Built-in Applications (Ready-to-Use CLI Tools)
420
524
 
421
- AbstractCore includes **three specialized command-line applications** for common LLM tasks. These are production-ready tools that can be used directly from the terminal without any Python programming.
525
+ AbstractCore includes **four specialized command-line applications** for common LLM tasks. These are production-ready tools that can be used directly from the terminal without any Python programming.
422
526
 
423
527
  ### Available Applications
424
528
 
@@ -427,6 +531,7 @@ AbstractCore includes **three specialized command-line applications** for common
427
531
  | **Summarizer** | Document summarization | `summarizer` |
428
532
  | **Extractor** | Entity and relationship extraction | `extractor` |
429
533
  | **Judge** | Text evaluation and scoring | `judge` |
534
+ | **Intent Analyzer** | Psychological intent analysis & deception detection | `intent` |
430
535
 
431
536
  ### Quick Usage Examples
432
537
 
@@ -445,6 +550,11 @@ extractor doc.txt --iterate 3 --mode thorough --verbose
445
550
  judge essay.txt --criteria clarity,accuracy,coherence --context "academic writing"
446
551
  judge code.py --context "code review" --format plain --verbose
447
552
  judge proposal.md --custom-criteria has_examples,covers_risks --output assessment.json
553
+
554
+ # Intent analysis with psychological insights and deception detection
555
+ intent conversation.txt --focus-participant user --depth comprehensive
556
+ intent email.txt --format plain --context document --verbose
557
+ intent chat_log.json --conversation-mode --provider lmstudio --model qwen/qwen3-30b-a3b-2507
448
558
  ```
449
559
 
450
560
  ### Installation & Setup
@@ -459,6 +569,7 @@ pip install abstractcore[all]
459
569
  summarizer --help
460
570
  extractor --help
461
571
  judge --help
572
+ intent --help
462
573
  ```
463
574
 
464
575
  ### Alternative Usage Methods
@@ -468,11 +579,13 @@ judge --help
468
579
  summarizer document.txt
469
580
  extractor report.pdf
470
581
  judge essay.md
582
+ intent conversation.txt
471
583
 
472
584
  # Method 2: Via Python module
473
585
  python -m abstractcore.apps summarizer document.txt
474
586
  python -m abstractcore.apps extractor report.pdf
475
587
  python -m abstractcore.apps judge essay.md
588
+ python -m abstractcore.apps intent conversation.txt
476
589
  ```
477
590
 
478
591
  ### Key Parameters
@@ -514,10 +627,11 @@ python -m abstractcore.apps judge essay.md
514
627
 
515
628
  ### Full Documentation
516
629
 
517
- Each application has comprehensive documentation with examples and advanced usage:
630
+ Each application has documentation with examples and usage information:
518
631
 
519
632
  - **[Summarizer Guide](docs/apps/basic-summarizer.md)** - Document summarization with multiple strategies
520
633
  - **[Extractor Guide](docs/apps/basic-extractor.md)** - Entity and relationship extraction
634
+ - **[Intent Analyzer Guide](docs/apps/basic-intent.md)** - Psychological intent analysis and deception detection
521
635
  - **[Judge Guide](docs/apps/basic-judge.md)** - Text evaluation and scoring systems
522
636
 
523
637
  **When to use the apps:**
@@ -666,7 +780,7 @@ llm = create_llm("anthropic", model="claude-3.5-sonnet")
666
780
  response = llm.generate(analysis_prompt, media=documents)
667
781
 
668
782
  # Automatic format handling:
669
- # - PDF: Advanced text extraction with PyMuPDF4LLM
783
+ # - PDF: Text extraction with PyMuPDF4LLM
670
784
  # - Excel: Table parsing with pandas
671
785
  # - PowerPoint: Slide content extraction with unstructured
672
786
  ```
@@ -719,6 +833,8 @@ review = llm.generate(
719
833
  print(f"{review.title}: {review.rating}/5")
720
834
  ```
721
835
 
836
+ [Learn more about Structured Output](docs/structured-output.md)
837
+
722
838
  ### 7. Universal API Server
723
839
 
724
840
  ```bash
@@ -736,6 +852,7 @@ curl -X POST http://localhost:8000/v1/chat/completions \
736
852
 
737
853
  ## Why AbstractCore?
738
854
 
855
+ - **Offline-First Philosophy**: Designed for open source LLMs with complete offline operation. No internet required after initial model download
739
856
  - **Unified Interface**: One API for all LLM providers
740
857
  - **Multimodal Support**: Upload images, PDFs, and documents across all providers
741
858
  - **Vision Models**: Seamless integration with GPT-4o, Claude Vision, qwen3-vl, and more
@@ -774,7 +891,7 @@ pip install abstractcore[all]
774
891
 
775
892
  **Media processing extras:**
776
893
  ```bash
777
- # For advanced PDF processing
894
+ # For PDF processing
778
895
  pip install pymupdf4llm
779
896
 
780
897
  # For Office documents (DOCX, XLSX, PPT)
@@ -156,9 +156,58 @@ loaded_session = BasicSession.load('conversation.json', provider=llm)
156
156
 
157
157
  [Learn more about Session](docs/session.md)
158
158
 
159
+ ### Interaction Tracing (Observability)
160
+
161
+ Enable complete observability of LLM interactions for debugging, compliance, and transparency:
162
+
163
+ ```python
164
+ from abstractcore import create_llm
165
+ from abstractcore.core.session import BasicSession
166
+ from abstractcore.utils import export_traces
167
+
168
+ # Enable tracing on provider
169
+ llm = create_llm('openai', model='gpt-4o-mini', enable_tracing=True, max_traces=100)
170
+
171
+ # Or on session for automatic correlation
172
+ session = BasicSession(provider=llm, enable_tracing=True)
173
+
174
+ # Generate with custom metadata
175
+ response = session.generate(
176
+ "Write Python code",
177
+ step_type='code_generation',
178
+ attempt_number=1
179
+ )
180
+
181
+ # Access complete trace
182
+ trace_id = response.metadata['trace_id']
183
+ trace = llm.get_traces(trace_id=trace_id)
184
+
185
+ # Full interaction context
186
+ print(f"Prompt: {trace['prompt']}")
187
+ print(f"Response: {trace['response']['content']}")
188
+ print(f"Tokens: {trace['response']['usage']['total_tokens']}")
189
+ print(f"Time: {trace['response']['generation_time_ms']}ms")
190
+ print(f"Custom metadata: {trace['metadata']}")
191
+
192
+ # Get all session traces
193
+ traces = session.get_interaction_history()
194
+
195
+ # Export to JSONL, JSON, or Markdown
196
+ export_traces(traces, format='markdown', file_path='workflow_trace.md')
197
+ ```
198
+
199
+ **What's captured:**
200
+ - All prompts, system prompts, and conversation history
201
+ - Complete responses with token usage and timing
202
+ - Generation parameters (temperature, tokens, seed, etc.)
203
+ - Custom metadata for workflow tracking
204
+ - Tool calls and results
205
+
206
+ [Learn more about Interaction Tracing](docs/interaction-tracing.md)
207
+
159
208
  ### Media Handling
160
209
 
161
- AbstractCore provides **unified media handling** across all providers with **automatic maximum resolution optimization** for best results. Upload images, PDFs, and documents using the same simple API regardless of your provider.
210
+ AbstractCore provides unified media handling across all providers with automatic resolution optimization. Upload images, PDFs, and documents using the same simple API regardless of your provider.
162
211
 
163
212
  ```python
164
213
  from abstractcore import create_llm
@@ -196,7 +245,7 @@ response = llm.generate(
196
245
  - **Smart Resolution**: Automatically uses maximum resolution supported by each model
197
246
  - **Format Support**: PNG, JPEG, GIF, WEBP, BMP, TIFF images; PDF, TXT, MD, CSV, TSV, JSON documents
198
247
  - **Office Documents**: DOCX, XLSX, PPT (with `pip install abstractcore[all]`)
199
- - **Vision Optimization**: Model-specific image processing for best vision results
248
+ - **Vision Optimization**: Model-specific image processing for vision results
200
249
 
201
250
  **Provider compatibility:**
202
251
  - **High-resolution vision**: GPT-4o (up to 4096x4096), Claude 3.5 Sonnet (up to 1568x1568)
@@ -205,16 +254,63 @@ response = llm.generate(
205
254
 
206
255
  [Learn more about Media Handling](docs/media-handling-system.md)
207
256
 
257
+ ### Glyph Visual-Text Compression (🧪 EXPERIMENTAL)
258
+
259
+ > ⚠️ **Vision Model Requirement**: This feature ONLY works with vision-capable models (e.g., gpt-4o, claude-3-5-sonnet, llama3.2-vision)
260
+
261
+ Achieve **3-4x token compression** and **faster inference** with Glyph's revolutionary visual-text compression:
262
+
263
+ ```python
264
+ from abstractcore import create_llm
265
+
266
+ # IMPORTANT: Requires a vision-capable model
267
+ llm = create_llm("ollama", model="llama3.2-vision:11b") # ✓ Vision model
268
+
269
+ # Large documents are automatically compressed for efficiency
270
+ response = llm.generate(
271
+ "Analyze the key findings in this research paper",
272
+ media=["large_research_paper.pdf"] # Automatically compressed if beneficial
273
+ )
274
+
275
+ # Force compression (raises error if model lacks vision)
276
+ response = llm.generate(
277
+ "Summarize this document",
278
+ media=["document.pdf"],
279
+ glyph_compression="always" # "auto" | "always" | "never"
280
+ )
281
+
282
+ # Non-vision models will raise UnsupportedFeatureError
283
+ # llm_no_vision = create_llm("openai", model="gpt-4") # ✗ No vision
284
+ # response = llm_no_vision.generate("...", glyph_compression="always") # Error!
285
+
286
+ # Check compression stats
287
+ if response.metadata and response.metadata.get('compression_used'):
288
+ stats = response.metadata.get('compression_stats', {})
289
+ print(f"Compression ratio: {stats.get('compression_ratio')}x")
290
+ print(f"Processing speedup: 14% faster, 79% less memory")
291
+ ```
292
+
293
+ **Validated Performance:**
294
+ - **14% faster processing** with real-world documents
295
+ - **79% lower memory usage** during processing
296
+ - **100% quality preservation** - no loss of analytical accuracy
297
+ - **Transparent operation** - works with existing code
298
+
299
+ [Learn more about Glyph Compression](docs/glyphs.md)
300
+
208
301
  ## Key Features
209
302
 
303
+ - **Offline-First Design**: Built primarily for open source LLMs with full offline capability. Download once, run forever without internet access
210
304
  - **Provider Agnostic**: Seamlessly switch between OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace
305
+ - **Interaction Tracing**: Complete LLM observability with programmatic access to prompts, responses, tokens, and timing for debugging and compliance
306
+ - **Glyph Visual-Text Compression**: Revolutionary compression system that renders text as optimized images for 3-4x token compression and faster inference
211
307
  - **Centralized Configuration**: Global defaults and app-specific preferences at `~/.abstractcore/config/abstractcore.json`
212
308
  - **Intelligent Media Handling**: Upload images, PDFs, and documents with automatic maximum resolution optimization
213
- - **Vision Model Support**: Smart image processing at each model's maximum capability for best results
214
- - **Document Processing**: Advanced PDF extraction (PyMuPDF4LLM), Office documents (DOCX/XLSX/PPT), CSV/TSV analysis
309
+ - **Vision Model Support**: Smart image processing at each model's maximum capability
310
+ - **Document Processing**: PDF extraction (PyMuPDF4LLM), Office documents (DOCX/XLSX/PPT), CSV/TSV analysis
215
311
  - **Unified Tools**: Consistent tool calling across all providers
216
312
  - **Session Management**: Persistent conversations with metadata, analytics, and complete serialization
217
- - **Structured Responses**: Clean, predictable output formats with Pydantic
313
+ - **Native Structured Output**: Server-side schema enforcement for Ollama and LMStudio (OpenAI and Anthropic also supported)
218
314
  - **Streaming Support**: Real-time token generation for interactive experiences
219
315
  - **Consistent Token Terminology**: Unified `input_tokens`, `output_tokens`, `total_tokens` across all providers
220
316
  - **Embeddings**: Built-in support for semantic search and RAG applications
@@ -224,12 +320,12 @@ response = llm.generate(
224
320
 
225
321
  | Provider | Status | SEED Support | Setup |
226
322
  |----------|--------|-------------|-------|
227
- | **OpenAI** | Full | Native | [Get API key](docs/prerequisites.md#openai-setup) |
228
- | **Anthropic** | Full | ⚠️ Warning* | [Get API key](docs/prerequisites.md#anthropic-setup) |
229
- | **Ollama** | Full | Native | [Install guide](docs/prerequisites.md#ollama-setup) |
230
- | **LMStudio** | Full | Native | [Install guide](docs/prerequisites.md#lmstudio-setup) |
231
- | **MLX** | Full | Native | [Setup guide](docs/prerequisites.md#mlx-setup) |
232
- | **HuggingFace** | Full | Native | [Setup guide](docs/prerequisites.md#huggingface-setup) |
323
+ | **OpenAI** | Full | Native | [Get API key](docs/prerequisites.md#openai-setup) |
324
+ | **Anthropic** | Full | Warning* | [Get API key](docs/prerequisites.md#anthropic-setup) |
325
+ | **Ollama** | Full | Native | [Install guide](docs/prerequisites.md#ollama-setup) |
326
+ | **LMStudio** | Full | Native | [Install guide](docs/prerequisites.md#lmstudio-setup) |
327
+ | **MLX** | Full | Native | [Setup guide](docs/prerequisites.md#mlx-setup) |
328
+ | **HuggingFace** | Full | Native | [Setup guide](docs/prerequisites.md#huggingface-setup) |
233
329
 
234
330
  *Anthropic doesn't support seed parameters but issues a warning when provided. Use `temperature=0.0` for more consistent outputs.
235
331
 
@@ -274,7 +370,7 @@ response = client.chat.completions.create(
274
370
  - Building web applications that need HTTP API
275
371
  - Multi-language access (not just Python)
276
372
 
277
- ## Internal CLI (Optional Interactive Testing Tool)
373
+ ## AbstractCore CLI (Optional Interactive Testing Tool)
278
374
 
279
375
  AbstractCore includes a **built-in CLI** for interactive testing, development, and conversation management. This is an internal testing tool, distinct from external agentic CLIs.
280
376
 
@@ -294,6 +390,7 @@ python -m abstractcore.utils.cli --provider anthropic --model claude-3-5-haiku-l
294
390
  - Chat history compaction and management
295
391
  - Fact extraction from conversations
296
392
  - Conversation quality evaluation (LLM-as-a-judge)
393
+ - Intent analysis and deception detection
297
394
  - Tool call testing and debugging
298
395
  - System prompt management
299
396
  - Multiple provider support
@@ -302,12 +399,13 @@ python -m abstractcore.utils.cli --provider anthropic --model claude-3-5-haiku-l
302
399
  - `/compact` - Compress chat history while preserving context
303
400
  - `/facts [file]` - Extract structured facts from conversation
304
401
  - `/judge` - Evaluate conversation quality with feedback
402
+ - `/intent [participant]` - Analyze psychological intents and detect deception
305
403
  - `/history [n]` - View conversation history
306
404
  - `/stream` - Toggle real-time streaming
307
405
  - `/system [prompt]` - Show or change system prompt
308
406
  - `/status` - Show current provider, model, and capabilities
309
407
 
310
- **Full Documentation:** [Internal CLI Guide](docs/internal-cli.md)
408
+ **Full Documentation:** [AbstractCore CLI Guide](docs/acore-cli.md)
311
409
 
312
410
  **When to use the CLI:**
313
411
  - Interactive development and testing
@@ -318,7 +416,7 @@ python -m abstractcore.utils.cli --provider anthropic --model claude-3-5-haiku-l
318
416
 
319
417
  ## Built-in Applications (Ready-to-Use CLI Tools)
320
418
 
321
- AbstractCore includes **three specialized command-line applications** for common LLM tasks. These are production-ready tools that can be used directly from the terminal without any Python programming.
419
+ AbstractCore includes **four specialized command-line applications** for common LLM tasks. These are production-ready tools that can be used directly from the terminal without any Python programming.
322
420
 
323
421
  ### Available Applications
324
422
 
@@ -327,6 +425,7 @@ AbstractCore includes **three specialized command-line applications** for common
327
425
  | **Summarizer** | Document summarization | `summarizer` |
328
426
  | **Extractor** | Entity and relationship extraction | `extractor` |
329
427
  | **Judge** | Text evaluation and scoring | `judge` |
428
+ | **Intent Analyzer** | Psychological intent analysis & deception detection | `intent` |
330
429
 
331
430
  ### Quick Usage Examples
332
431
 
@@ -345,6 +444,11 @@ extractor doc.txt --iterate 3 --mode thorough --verbose
345
444
  judge essay.txt --criteria clarity,accuracy,coherence --context "academic writing"
346
445
  judge code.py --context "code review" --format plain --verbose
347
446
  judge proposal.md --custom-criteria has_examples,covers_risks --output assessment.json
447
+
448
+ # Intent analysis with psychological insights and deception detection
449
+ intent conversation.txt --focus-participant user --depth comprehensive
450
+ intent email.txt --format plain --context document --verbose
451
+ intent chat_log.json --conversation-mode --provider lmstudio --model qwen/qwen3-30b-a3b-2507
348
452
  ```
349
453
 
350
454
  ### Installation & Setup
@@ -359,6 +463,7 @@ pip install abstractcore[all]
359
463
  summarizer --help
360
464
  extractor --help
361
465
  judge --help
466
+ intent --help
362
467
  ```
363
468
 
364
469
  ### Alternative Usage Methods
@@ -368,11 +473,13 @@ judge --help
368
473
  summarizer document.txt
369
474
  extractor report.pdf
370
475
  judge essay.md
476
+ intent conversation.txt
371
477
 
372
478
  # Method 2: Via Python module
373
479
  python -m abstractcore.apps summarizer document.txt
374
480
  python -m abstractcore.apps extractor report.pdf
375
481
  python -m abstractcore.apps judge essay.md
482
+ python -m abstractcore.apps intent conversation.txt
376
483
  ```
377
484
 
378
485
  ### Key Parameters
@@ -414,10 +521,11 @@ python -m abstractcore.apps judge essay.md
414
521
 
415
522
  ### Full Documentation
416
523
 
417
- Each application has comprehensive documentation with examples and advanced usage:
524
+ Each application has documentation with examples and usage information:
418
525
 
419
526
  - **[Summarizer Guide](docs/apps/basic-summarizer.md)** - Document summarization with multiple strategies
420
527
  - **[Extractor Guide](docs/apps/basic-extractor.md)** - Entity and relationship extraction
528
+ - **[Intent Analyzer Guide](docs/apps/basic-intent.md)** - Psychological intent analysis and deception detection
421
529
  - **[Judge Guide](docs/apps/basic-judge.md)** - Text evaluation and scoring systems
422
530
 
423
531
  **When to use the apps:**
@@ -566,7 +674,7 @@ llm = create_llm("anthropic", model="claude-3.5-sonnet")
566
674
  response = llm.generate(analysis_prompt, media=documents)
567
675
 
568
676
  # Automatic format handling:
569
- # - PDF: Advanced text extraction with PyMuPDF4LLM
677
+ # - PDF: Text extraction with PyMuPDF4LLM
570
678
  # - Excel: Table parsing with pandas
571
679
  # - PowerPoint: Slide content extraction with unstructured
572
680
  ```
@@ -619,6 +727,8 @@ review = llm.generate(
619
727
  print(f"{review.title}: {review.rating}/5")
620
728
  ```
621
729
 
730
+ [Learn more about Structured Output](docs/structured-output.md)
731
+
622
732
  ### 7. Universal API Server
623
733
 
624
734
  ```bash
@@ -636,6 +746,7 @@ curl -X POST http://localhost:8000/v1/chat/completions \
636
746
 
637
747
  ## Why AbstractCore?
638
748
 
749
+ - **Offline-First Philosophy**: Designed for open source LLMs with complete offline operation. No internet required after initial model download
639
750
  - **Unified Interface**: One API for all LLM providers
640
751
  - **Multimodal Support**: Upload images, PDFs, and documents across all providers
641
752
  - **Vision Models**: Seamless integration with GPT-4o, Claude Vision, qwen3-vl, and more
@@ -674,7 +785,7 @@ pip install abstractcore[all]
674
785
 
675
786
  **Media processing extras:**
676
787
  ```bash
677
- # For advanced PDF processing
788
+ # For PDF processing
678
789
  pip install pymupdf4llm
679
790
 
680
791
  # For Office documents (DOCX, XLSX, PPT)
@@ -2,6 +2,8 @@
2
2
  """
3
3
  AbstractCore - Unified interface to all LLM providers with essential infrastructure.
4
4
 
5
+ CRITICAL: Offline-first design - enforces offline mode for open source LLMs by default.
6
+
5
7
  Key Features:
6
8
  • Multi-provider support (OpenAI, Anthropic, Ollama, HuggingFace, MLX, LMStudio)
7
9
  • Unified token parameter vocabulary across all providers
@@ -47,6 +49,13 @@ _has_processing = True
47
49
  # Tools module (core functionality)
48
50
  from .tools import tool
49
51
 
52
+ # Compression module (optional import)
53
+ try:
54
+ from .compression import GlyphConfig, CompressionOrchestrator
55
+ _has_compression = True
56
+ except ImportError:
57
+ _has_compression = False
58
+
50
59
  __all__ = [
51
60
  'create_llm',
52
61
  'BasicSession',
@@ -64,5 +73,8 @@ __all__ = [
64
73
  if _has_embeddings:
65
74
  __all__.append('EmbeddingManager')
66
75
 
76
+ if _has_compression:
77
+ __all__.extend(['GlyphConfig', 'CompressionOrchestrator'])
78
+
67
79
  # Processing is core functionality
68
80
  __all__.extend(['BasicSummarizer', 'SummaryStyle', 'SummaryLength', 'BasicExtractor'])
@@ -9,11 +9,13 @@ Available apps:
9
9
  summarizer - Document summarization tool
10
10
  extractor - Entity and relationship extraction tool
11
11
  judge - Text evaluation and scoring tool
12
+ intent - Intent analysis and motivation identification tool
12
13
 
13
14
  Examples:
14
15
  python -m abstractcore.apps summarizer document.txt
15
16
  python -m abstractcore.apps extractor report.txt --format json-ld
16
17
  python -m abstractcore.apps judge essay.txt --criteria clarity,accuracy
18
+ python -m abstractcore.apps intent "I need help with this problem" --depth comprehensive
17
19
  python -m abstractcore.apps <app> --help
18
20
  """
19
21
 
@@ -43,9 +45,14 @@ def main():
43
45
  sys.argv = [sys.argv[0]] + sys.argv[2:]
44
46
  from .judge import main as judge_main
45
47
  judge_main()
48
+ elif app_name == "intent":
49
+ # Remove the app name from sys.argv and run intent analyzer
50
+ sys.argv = [sys.argv[0]] + sys.argv[2:]
51
+ from .intent import main as intent_main
52
+ intent_main()
46
53
  else:
47
54
  print(f"Unknown app: {app_name}")
48
- print("\nAvailable apps: summarizer, extractor, judge")
55
+ print("\nAvailable apps: summarizer, extractor, judge, intent")
49
56
  sys.exit(1)
50
57
 
51
58
  if __name__ == "__main__":