abstractcore 2.4.1__tar.gz → 2.4.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (153) hide show
  1. {abstractcore-2.4.1 → abstractcore-2.4.3}/PKG-INFO +214 -20
  2. {abstractcore-2.4.1 → abstractcore-2.4.3}/README.md +207 -18
  3. abstractcore-2.4.3/abstractcore/apps/app_config_utils.py +19 -0
  4. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/apps/summarizer.py +85 -56
  5. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/architectures/detection.py +15 -4
  6. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/assets/architecture_formats.json +1 -1
  7. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/assets/model_capabilities.json +420 -11
  8. abstractcore-2.4.3/abstractcore/core/factory.py +73 -0
  9. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/core/interface.py +2 -0
  10. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/core/session.py +4 -0
  11. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/embeddings/manager.py +54 -16
  12. abstractcore-2.4.3/abstractcore/media/__init__.py +119 -0
  13. abstractcore-2.4.3/abstractcore/media/auto_handler.py +363 -0
  14. abstractcore-2.4.3/abstractcore/media/base.py +456 -0
  15. abstractcore-2.4.3/abstractcore/media/capabilities.py +335 -0
  16. abstractcore-2.4.3/abstractcore/media/types.py +300 -0
  17. abstractcore-2.4.3/abstractcore/media/vision_fallback.py +260 -0
  18. abstractcore-2.4.3/abstractcore/providers/__init__.py +48 -0
  19. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/providers/anthropic_provider.py +18 -1
  20. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/providers/base.py +90 -0
  21. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/providers/huggingface_provider.py +94 -4
  22. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/providers/lmstudio_provider.py +88 -5
  23. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/providers/mlx_provider.py +33 -1
  24. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/providers/ollama_provider.py +37 -3
  25. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/providers/openai_provider.py +18 -1
  26. abstractcore-2.4.3/abstractcore/providers/registry.py +406 -0
  27. abstractcore-2.4.3/abstractcore/server/app.py +2382 -0
  28. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/tools/common_tools.py +12 -8
  29. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/utils/__init__.py +9 -5
  30. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/utils/cli.py +199 -17
  31. abstractcore-2.4.3/abstractcore/utils/message_preprocessor.py +182 -0
  32. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/utils/structured_logging.py +117 -16
  33. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/utils/version.py +1 -1
  34. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore.egg-info/PKG-INFO +214 -20
  35. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore.egg-info/SOURCES.txt +28 -0
  36. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore.egg-info/entry_points.txt +1 -0
  37. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore.egg-info/requires.txt +8 -2
  38. {abstractcore-2.4.1 → abstractcore-2.4.3}/pyproject.toml +10 -2
  39. abstractcore-2.4.3/tests/test_cli_media.py +45 -0
  40. abstractcore-2.4.3/tests/test_consistency.py +116 -0
  41. abstractcore-2.4.3/tests/test_debug_server.py +69 -0
  42. abstractcore-2.4.3/tests/test_direct_vs_server.py +157 -0
  43. abstractcore-2.4.3/tests/test_enhanced_prompt.py +124 -0
  44. abstractcore-2.4.3/tests/test_final_accuracy.py +138 -0
  45. abstractcore-2.4.3/tests/test_fixed_media.py +168 -0
  46. abstractcore-2.4.3/tests/test_fixed_prompt.py +65 -0
  47. abstractcore-2.4.3/tests/test_import_debug.py +107 -0
  48. abstractcore-2.4.3/tests/test_lmstudio_context.py +143 -0
  49. abstractcore-2.4.3/tests/test_media_import.py +65 -0
  50. abstractcore-2.4.3/tests/test_media_server.py +276 -0
  51. abstractcore-2.4.3/tests/test_openai_media_integration.py +444 -0
  52. abstractcore-2.4.3/tests/test_sensory_prompting.py +110 -0
  53. abstractcore-2.4.3/tests/test_server_debug.py +131 -0
  54. abstractcore-2.4.3/tests/test_server_integration.py +245 -0
  55. abstractcore-2.4.3/tests/test_text_only_model_experience.py +138 -0
  56. abstractcore-2.4.3/tests/test_vision_accuracy.py +145 -0
  57. abstractcore-2.4.3/tests/test_vision_comprehensive.py +648 -0
  58. abstractcore-2.4.3/tests/test_vision_fallback_improvement.py +129 -0
  59. abstractcore-2.4.1/abstractcore/core/factory.py +0 -122
  60. abstractcore-2.4.1/abstractcore/media/__init__.py +0 -151
  61. abstractcore-2.4.1/abstractcore/providers/__init__.py +0 -21
  62. abstractcore-2.4.1/abstractcore/server/app.py +0 -1124
  63. {abstractcore-2.4.1 → abstractcore-2.4.3}/LICENSE +0 -0
  64. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/__init__.py +0 -0
  65. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/apps/__init__.py +0 -0
  66. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/apps/__main__.py +0 -0
  67. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/apps/extractor.py +0 -0
  68. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/apps/judge.py +0 -0
  69. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/architectures/__init__.py +0 -0
  70. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/architectures/enums.py +0 -0
  71. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/assets/session_schema.json +0 -0
  72. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/core/__init__.py +0 -0
  73. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/core/enums.py +0 -0
  74. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/core/retry.py +0 -0
  75. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/core/types.py +0 -0
  76. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/embeddings/__init__.py +0 -0
  77. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/embeddings/models.py +0 -0
  78. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/events/__init__.py +0 -0
  79. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/exceptions/__init__.py +0 -0
  80. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/processing/__init__.py +0 -0
  81. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/processing/basic_extractor.py +0 -0
  82. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/processing/basic_judge.py +0 -0
  83. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/processing/basic_summarizer.py +0 -0
  84. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/providers/mock_provider.py +0 -0
  85. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/providers/streaming.py +0 -0
  86. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/server/__init__.py +0 -0
  87. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/structured/__init__.py +0 -0
  88. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/structured/handler.py +0 -0
  89. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/structured/retry.py +0 -0
  90. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/tools/__init__.py +0 -0
  91. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/tools/core.py +0 -0
  92. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/tools/handler.py +0 -0
  93. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/tools/parser.py +0 -0
  94. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/tools/registry.py +0 -0
  95. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/tools/syntax_rewriter.py +0 -0
  96. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/tools/tag_rewriter.py +0 -0
  97. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/utils/self_fixes.py +0 -0
  98. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore/utils/token_utils.py +0 -0
  99. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore.egg-info/dependency_links.txt +0 -0
  100. {abstractcore-2.4.1 → abstractcore-2.4.3}/abstractcore.egg-info/top_level.txt +0 -0
  101. {abstractcore-2.4.1 → abstractcore-2.4.3}/setup.cfg +0 -0
  102. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_agentic_cli_compatibility.py +0 -0
  103. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_all_specified_providers.py +0 -0
  104. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_basic_session.py +0 -0
  105. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_basic_summarizer.py +0 -0
  106. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_complete_integration.py +0 -0
  107. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_comprehensive_events.py +0 -0
  108. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_core_components.py +0 -0
  109. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_critical_streaming_tool_fix.py +0 -0
  110. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_embeddings.py +0 -0
  111. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_embeddings_integration.py +0 -0
  112. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_embeddings_llm_integration.py +0 -0
  113. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_embeddings_matrix_operations.py +0 -0
  114. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_embeddings_no_mock.py +0 -0
  115. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_embeddings_real.py +0 -0
  116. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_embeddings_semantic_validation.py +0 -0
  117. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_embeddings_simple.py +0 -0
  118. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_environment_variable_tool_call_tags.py +0 -0
  119. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_factory.py +0 -0
  120. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_final_comprehensive.py +0 -0
  121. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_final_graceful_errors.py +0 -0
  122. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_graceful_fallback.py +0 -0
  123. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_integrated_functionality.py +0 -0
  124. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_ollama_tool_role_fix.py +0 -0
  125. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_openai_conversion_manual.py +0 -0
  126. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_openai_format_bug.py +0 -0
  127. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_openai_format_conversion.py +0 -0
  128. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_progressive_complexity.py +0 -0
  129. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_provider_basic_session.py +0 -0
  130. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_provider_connectivity.py +0 -0
  131. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_provider_simple_generation.py +0 -0
  132. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_provider_streaming.py +0 -0
  133. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_provider_token_translation.py +0 -0
  134. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_provider_tool_detection.py +0 -0
  135. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_providers.py +0 -0
  136. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_providers_comprehensive.py +0 -0
  137. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_providers_simple.py +0 -0
  138. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_real_models_comprehensive.py +0 -0
  139. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_retry_observability.py +0 -0
  140. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_retry_strategy.py +0 -0
  141. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_server_embeddings_real.py +0 -0
  142. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_stream_tool_calling.py +0 -0
  143. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_streaming_enhancements.py +0 -0
  144. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_streaming_tag_rewriting.py +0 -0
  145. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_structured_integration.py +0 -0
  146. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_structured_output.py +0 -0
  147. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_syntax_rewriter.py +0 -0
  148. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_tool_calling.py +0 -0
  149. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_tool_execution_separation.py +0 -0
  150. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_unified_streaming.py +0 -0
  151. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_unload_memory.py +0 -0
  152. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_user_scenario_validation.py +0 -0
  153. {abstractcore-2.4.1 → abstractcore-2.4.3}/tests/test_wrong_model_fallback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstractcore
3
- Version: 2.4.1
3
+ Version: 2.4.3
4
4
  Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
5
5
  Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
6
6
  Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
@@ -46,6 +46,11 @@ Provides-Extra: embeddings
46
46
  Requires-Dist: sentence-transformers<4.0.0,>=2.7.0; extra == "embeddings"
47
47
  Requires-Dist: numpy<2.0.0,>=1.20.0; extra == "embeddings"
48
48
  Provides-Extra: processing
49
+ Provides-Extra: media
50
+ Requires-Dist: Pillow<12.0.0,>=10.0.0; extra == "media"
51
+ Requires-Dist: pymupdf4llm<1.0.0,>=0.0.20; extra == "media"
52
+ Requires-Dist: unstructured[office]<1.0.0,>=0.10.0; extra == "media"
53
+ Requires-Dist: pandas<3.0.0,>=1.0.0; extra == "media"
49
54
  Provides-Extra: api-providers
50
55
  Requires-Dist: abstractcore[anthropic,openai]; extra == "api-providers"
51
56
  Provides-Extra: local-providers
@@ -55,9 +60,9 @@ Requires-Dist: abstractcore[huggingface]; extra == "heavy-providers"
55
60
  Provides-Extra: all-providers
56
61
  Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,mlx,ollama,openai]; extra == "all-providers"
57
62
  Provides-Extra: all
58
- Requires-Dist: abstractcore[anthropic,dev,docs,embeddings,huggingface,lmstudio,mlx,ollama,openai,processing,server,test]; extra == "all"
63
+ Requires-Dist: abstractcore[anthropic,dev,docs,embeddings,huggingface,lmstudio,media,mlx,ollama,openai,processing,server,test]; extra == "all"
59
64
  Provides-Extra: lightweight
60
- Requires-Dist: abstractcore[anthropic,embeddings,lmstudio,ollama,openai,processing,server]; extra == "lightweight"
65
+ Requires-Dist: abstractcore[anthropic,embeddings,lmstudio,media,ollama,openai,processing,server]; extra == "lightweight"
61
66
  Provides-Extra: dev
62
67
  Requires-Dist: pytest>=7.0.0; extra == "dev"
63
68
  Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
@@ -89,6 +94,11 @@ Dynamic: license-file
89
94
 
90
95
  # AbstractCore
91
96
 
97
+ [![PyPI version](https://img.shields.io/pypi/v/abstractcore.svg)](https://pypi.org/project/abstractcore/)
98
+ [![Python Version](https://img.shields.io/pypi/pyversions/abstractcore)](https://pypi.org/project/abstractcore/)
99
+ [![license](https://img.shields.io/github/license/lpalbou/abstractcore)](https://github.com/lpalbou/abstractcore/blob/main/LICENSE)
100
+ [![GitHub stars](https://img.shields.io/github/stars/lpalbou/abstractcore?style=social)](https://github.com/lpalbou/abstractcore/stargazers)
101
+
92
102
  A unified Python library for interaction with multiple Large Language Model (LLM) providers.
93
103
 
94
104
  **Write once, run everywhere.**
@@ -153,15 +163,68 @@ loaded_session = BasicSession.load('conversation.json', provider=llm)
153
163
 
154
164
  [Learn more about Session](docs/session.md)
155
165
 
166
+ ### Media Handling
167
+
168
+ AbstractCore provides **unified media handling** across all providers with **automatic maximum resolution optimization** for best results. Upload images, PDFs, and documents using the same simple API regardless of your provider.
169
+
170
+ ```python
171
+ from abstractcore import create_llm
172
+
173
+ # Vision analysis - works with any vision model
174
+ # Images automatically processed at maximum supported resolution
175
+ llm = create_llm("openai", model="gpt-4o")
176
+ response = llm.generate(
177
+ "What's in this image?",
178
+ media=["photo.jpg"] # Auto-resized to model's maximum capability
179
+ )
180
+
181
+ # Document analysis - works with any model
182
+ llm = create_llm("anthropic", model="claude-3.5-sonnet")
183
+ response = llm.generate(
184
+ "Summarize this research paper",
185
+ media=["research_paper.pdf"]
186
+ )
187
+
188
+ # Multiple files - mix images, PDFs, spreadsheets
189
+ response = llm.generate(
190
+ "Analyze these business documents",
191
+ media=["report.pdf", "chart.png", "data.xlsx"]
192
+ )
193
+
194
+ # Same code works with local models
195
+ llm = create_llm("ollama", model="qwen3-vl:8b")
196
+ response = llm.generate(
197
+ "Describe this screenshot",
198
+ media=["screenshot.png"] # Auto-optimized for qwen3-vl
199
+ )
200
+ ```
201
+
202
+ **Key Features:**
203
+ - **Smart Resolution**: Automatically uses maximum resolution supported by each model
204
+ - **Format Support**: PNG, JPEG, GIF, WEBP, BMP, TIFF images; PDF, TXT, MD, CSV, TSV, JSON documents
205
+ - **Office Documents**: DOCX, XLSX, PPT (with `pip install abstractcore[all]`)
206
+ - **Vision Optimization**: Model-specific image processing for best vision results
207
+
208
+ **Provider compatibility:**
209
+ - **High-resolution vision**: GPT-4o (up to 4096x4096), Claude 3.5 Sonnet (up to 1568x1568)
210
+ - **Local models**: qwen3-vl (up to 3584x3584), gemma3:4b, llama3.2-vision
211
+ - **All models**: Automatic text extraction for non-vision models
212
+
213
+ [Learn more about Media Handling](docs/media-handling-system.md)
214
+
156
215
  ## Key Features
157
216
 
158
217
  - **Provider Agnostic**: Seamlessly switch between OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace
218
+ - **Centralized Configuration**: Global defaults and app-specific preferences at `~/.abstractcore/config/abstractcore.json`
219
+ - **Intelligent Media Handling**: Upload images, PDFs, and documents with automatic maximum resolution optimization
220
+ - **Vision Model Support**: Smart image processing at each model's maximum capability for best results
221
+ - **Document Processing**: Advanced PDF extraction (PyMuPDF4LLM), Office documents (DOCX/XLSX/PPT), CSV/TSV analysis
159
222
  - **Unified Tools**: Consistent tool calling across all providers
160
223
  - **Session Management**: Persistent conversations with metadata, analytics, and complete serialization
161
224
  - **Structured Responses**: Clean, predictable output formats with Pydantic
162
225
  - **Streaming Support**: Real-time token generation for interactive experiences
163
226
  - **Embeddings**: Built-in support for semantic search and RAG applications
164
- - **Universal Server**: Optional OpenAI-compatible API server
227
+ - **Universal Server**: Optional OpenAI-compatible API server with `/v1/responses` endpoint
165
228
 
166
229
  ## Supported Providers
167
230
 
@@ -199,12 +262,15 @@ response = client.chat.completions.create(
199
262
  ```
200
263
 
201
264
  **Server Features:**
202
- - OpenAI-compatible REST endpoints (`/v1/chat/completions`, `/v1/embeddings`, etc.)
265
+ - OpenAI-compatible REST endpoints (`/v1/chat/completions`, `/v1/embeddings`, `/v1/responses`)
266
+ - **NEW in v2.5.0**: OpenAI Responses API (`/v1/responses`) with native `input_file` support
203
267
  - Multi-provider support through one HTTP API
268
+ - Comprehensive media processing (images, PDFs, Office documents, CSV/TSV)
204
269
  - Agentic CLI integration (Codex, Crush, Gemini CLI)
205
- - Streaming responses
270
+ - Streaming responses with optional opt-in
206
271
  - Tool call format conversion
207
- - Interactive API docs at `/docs`
272
+ - Enhanced debug logging with `--debug` flag
273
+ - Interactive API docs at `/docs` (Swagger UI)
208
274
 
209
275
  **When to use the server:**
210
276
  - Integrating with existing OpenAI-compatible tools
@@ -365,6 +431,72 @@ Each application has comprehensive documentation with examples and advanced usag
365
431
  - Integration with shell scripts and automation
366
432
  - Standardized text processing tasks
367
433
 
434
+ ## Configuration
435
+
436
+ AbstractCore provides a **centralized configuration system** that manages default models, cache directories, and logging settings from a single location. This eliminates the need to specify `--provider` and `--model` parameters repeatedly.
437
+
438
+ ### Quick Setup
439
+
440
+ ```bash
441
+ # Check current configuration (shows how to change each setting)
442
+ abstractcore --status
443
+
444
+ # Set defaults for all applications
445
+ abstractcore --set-global-default ollama/llama3:8b
446
+
447
+ # Or configure specific applications (examples of customization)
448
+ abstractcore --set-app-default summarizer openai gpt-4o-mini
449
+ abstractcore --set-app-default extractor ollama qwen3:4b-instruct
450
+ abstractcore --set-app-default judge anthropic claude-3-5-haiku
451
+
452
+ # Configure logging (common examples)
453
+ abstractcore --set-console-log-level WARNING # Reduce console output
454
+ abstractcore --set-console-log-level NONE # Disable console logging
455
+ abstractcore --enable-file-logging # Save logs to files
456
+ abstractcore --enable-debug-logging # Full debug mode
457
+
458
+ # Set API keys as needed
459
+ abstractcore --set-api-key openai sk-your-key-here
460
+ abstractcore --set-api-key anthropic your-anthropic-key
461
+
462
+ # Verify configuration (includes change commands for each setting)
463
+ abstractcore --status
464
+ ```
465
+
466
+ ### Priority System
467
+
468
+ AbstractCore uses a clear priority system where explicit parameters always override defaults:
469
+
470
+ 1. **Explicit parameters** (highest priority): `summarizer doc.txt --provider openai --model gpt-4o-mini`
471
+ 2. **App-specific config**: `abstractcore --set-app-default summarizer openai gpt-4o-mini`
472
+ 3. **Global config**: `abstractcore --set-global-default openai/gpt-4o-mini`
473
+ 4. **Built-in defaults** (lowest priority): `huggingface/unsloth/Qwen3-4B-Instruct-2507-GGUF`
474
+
475
+ ### Usage After Configuration
476
+
477
+ Once configured, apps use your defaults automatically:
478
+
479
+ ```bash
480
+ # Before configuration (requires explicit parameters)
481
+ summarizer document.pdf --provider openai --model gpt-4o-mini
482
+
483
+ # After configuration (uses configured defaults)
484
+ summarizer document.pdf
485
+
486
+ # Explicit parameters still override when needed
487
+ summarizer document.pdf --provider anthropic --model claude-3-5-sonnet
488
+ ```
489
+
490
+ ### Configuration Features
491
+
492
+ - **Application defaults**: Different optimal models for each app
493
+ - **Cache directories**: Configurable cache locations for models and data
494
+ - **Logging control**: Package-wide logging levels and debug mode
495
+ - **API key management**: Centralized API key storage
496
+ - **Interactive setup**: `abstractcore --configure` for guided configuration
497
+
498
+ **Complete guide**: [Centralized Configuration](docs/centralized-config.md)
499
+
368
500
  ## Documentation
369
501
 
370
502
  **📚 Complete Documentation:** [docs/](docs/) - Full documentation index and navigation guide
@@ -376,6 +508,7 @@ Each application has comprehensive documentation with examples and advanced usag
376
508
 
377
509
  ### Core Library (Python)
378
510
  - **[Python API Reference](docs/api-reference.md)** - Complete Python API documentation
511
+ - **[Media Handling System](docs/media-handling-system.md)** - Images, PDFs, and document processing across all providers
379
512
  - **[Session Management](docs/session.md)** - Persistent conversations, serialization, and analytics
380
513
  - **[Embeddings Guide](docs/embeddings.md)** - Semantic search, RAG, and vector embeddings
381
514
  - **[Code Examples](examples/)** - Working examples for all features
@@ -401,7 +534,44 @@ for provider in providers:
401
534
  response = llm.generate("Hello!")
402
535
  ```
403
536
 
404
- ### 2. Local Development, Cloud Production
537
+ ### 2. Vision Analysis Across Providers
538
+
539
+ ```python
540
+ # Same image analysis works with any vision model
541
+ image_files = ["product_photo.jpg", "user_feedback.png"]
542
+ prompt = "Analyze these product images and suggest improvements"
543
+
544
+ # OpenAI GPT-4o
545
+ openai_llm = create_llm("openai", model="gpt-4o")
546
+ openai_analysis = openai_llm.generate(prompt, media=image_files)
547
+
548
+ # Anthropic Claude
549
+ claude_llm = create_llm("anthropic", model="claude-3.5-sonnet")
550
+ claude_analysis = claude_llm.generate(prompt, media=image_files)
551
+
552
+ # Local model (free)
553
+ local_llm = create_llm("ollama", model="qwen3-vl:8b")
554
+ local_analysis = local_llm.generate(prompt, media=image_files)
555
+ ```
556
+
557
+ ### 3. Document Processing Pipeline
558
+
559
+ ```python
560
+ # Universal document analysis
561
+ documents = ["contract.pdf", "financial_data.xlsx", "presentation.ppt"]
562
+ analysis_prompt = "Extract key information and identify potential risks"
563
+
564
+ # Works with any provider
565
+ llm = create_llm("anthropic", model="claude-3.5-sonnet")
566
+ response = llm.generate(analysis_prompt, media=documents)
567
+
568
+ # Automatic format handling:
569
+ # - PDF: Advanced text extraction with PyMuPDF4LLM
570
+ # - Excel: Table parsing with pandas
571
+ # - PowerPoint: Slide content extraction with unstructured
572
+ ```
573
+
574
+ ### 4. Local Development, Cloud Production
405
575
 
406
576
  ```python
407
577
  # Development (free, local)
@@ -411,7 +581,7 @@ llm_dev = create_llm("ollama", model="qwen3:4b-instruct-2507-q4_K_M")
411
581
  llm_prod = create_llm("openai", model="gpt-4o-mini")
412
582
  ```
413
583
 
414
- ### 3. Embeddings & RAG
584
+ ### 5. Embeddings & RAG
415
585
 
416
586
  ```python
417
587
  from abstractcore.embeddings import EmbeddingManager
@@ -431,7 +601,7 @@ similarity = embedder.compute_similarity(query, docs[0])
431
601
 
432
602
  [Learn more about Embeddings](docs/embeddings.md)
433
603
 
434
- ### 4. Structured Output
604
+ ### 6. Structured Output
435
605
 
436
606
  ```python
437
607
  from pydantic import BaseModel
@@ -449,7 +619,7 @@ review = llm.generate(
449
619
  print(f"{review.title}: {review.rating}/5")
450
620
  ```
451
621
 
452
- ### 5. Universal API Server
622
+ ### 7. Universal API Server
453
623
 
454
624
  ```bash
455
625
  # Start server once
@@ -466,14 +636,16 @@ curl -X POST http://localhost:8000/v1/chat/completions \
466
636
 
467
637
  ## Why AbstractCore?
468
638
 
469
- - **Unified Interface**: One API for all LLM providers
470
- - **Production Ready**: Robust error handling, retries, timeouts
471
- - **Type Safe**: Full Pydantic integration for structured outputs
472
- - **Local & Cloud**: Run models locally or use cloud APIs
473
- - **Tool Calling**: Consistent function calling across providers
474
- - **Streaming**: Real-time responses for interactive applications
475
- - **Embeddings**: Built-in vector embeddings for RAG
476
- - **Server Mode**: Optional OpenAI-compatible API server
639
+ - **Unified Interface**: One API for all LLM providers
640
+ - **Multimodal Support**: Upload images, PDFs, and documents across all providers
641
+ - **Vision Models**: Seamless integration with GPT-4o, Claude Vision, qwen3-vl, and more
642
+ - **Production Ready**: Robust error handling, retries, timeouts
643
+ - **Type Safe**: Full Pydantic integration for structured outputs
644
+ - **Local & Cloud**: Run models locally or use cloud APIs
645
+ - **Tool Calling**: Consistent function calling across providers
646
+ - **Streaming**: Real-time responses for interactive applications
647
+ - **Embeddings**: Built-in vector embeddings for RAG
648
+ - **Server Mode**: Optional OpenAI-compatible API server
477
649
  - **Well Documented**: Comprehensive guides and examples
478
650
 
479
651
  ## Installation Options
@@ -482,6 +654,9 @@ curl -X POST http://localhost:8000/v1/chat/completions \
482
654
  # Minimal core
483
655
  pip install abstractcore
484
656
 
657
+ # With media handling (images, PDFs, documents)
658
+ pip install abstractcore[media]
659
+
485
660
  # With specific providers
486
661
  pip install abstractcore[openai]
487
662
  pip install abstractcore[anthropic]
@@ -493,10 +668,25 @@ pip install abstractcore[server]
493
668
  # With embeddings
494
669
  pip install abstractcore[embeddings]
495
670
 
496
- # Everything
671
+ # Everything (recommended)
497
672
  pip install abstractcore[all]
498
673
  ```
499
674
 
675
+ **Media processing extras:**
676
+ ```bash
677
+ # For advanced PDF processing
678
+ pip install pymupdf4llm
679
+
680
+ # For Office documents (DOCX, XLSX, PPT)
681
+ pip install unstructured
682
+
683
+ # For image optimization
684
+ pip install pillow
685
+
686
+ # For data processing (CSV, Excel)
687
+ pip install pandas
688
+ ```
689
+
500
690
  ## Testing Status
501
691
 
502
692
  All tests passing as of October 12th, 2025.
@@ -519,6 +709,10 @@ All tests passing as of October 12th, 2025.
519
709
  - **[🐛 Issues](https://github.com/lpalbou/AbstractCore/issues)** - Report bugs
520
710
  - **[💬 Discussions](https://github.com/lpalbou/AbstractCore/discussions)** - Get help
521
711
 
712
+ ## Contact
713
+ **Maintainer:** Laurent-Philippe Albou
714
+ 📧 Email: [contact@abstractcore.ai](mailto:contact@abstractcore.ai)
715
+
522
716
  ## Contributing
523
717
 
524
718
  We welcome contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
@@ -1,5 +1,10 @@
1
1
  # AbstractCore
2
2
 
3
+ [![PyPI version](https://img.shields.io/pypi/v/abstractcore.svg)](https://pypi.org/project/abstractcore/)
4
+ [![Python Version](https://img.shields.io/pypi/pyversions/abstractcore)](https://pypi.org/project/abstractcore/)
5
+ [![license](https://img.shields.io/github/license/lpalbou/abstractcore)](https://github.com/lpalbou/abstractcore/blob/main/LICENSE)
6
+ [![GitHub stars](https://img.shields.io/github/stars/lpalbou/abstractcore?style=social)](https://github.com/lpalbou/abstractcore/stargazers)
7
+
3
8
  A unified Python library for interaction with multiple Large Language Model (LLM) providers.
4
9
 
5
10
  **Write once, run everywhere.**
@@ -64,15 +69,68 @@ loaded_session = BasicSession.load('conversation.json', provider=llm)
64
69
 
65
70
  [Learn more about Session](docs/session.md)
66
71
 
72
+ ### Media Handling
73
+
74
+ AbstractCore provides **unified media handling** across all providers with **automatic maximum resolution optimization** for best results. Upload images, PDFs, and documents using the same simple API regardless of your provider.
75
+
76
+ ```python
77
+ from abstractcore import create_llm
78
+
79
+ # Vision analysis - works with any vision model
80
+ # Images automatically processed at maximum supported resolution
81
+ llm = create_llm("openai", model="gpt-4o")
82
+ response = llm.generate(
83
+ "What's in this image?",
84
+ media=["photo.jpg"] # Auto-resized to model's maximum capability
85
+ )
86
+
87
+ # Document analysis - works with any model
88
+ llm = create_llm("anthropic", model="claude-3.5-sonnet")
89
+ response = llm.generate(
90
+ "Summarize this research paper",
91
+ media=["research_paper.pdf"]
92
+ )
93
+
94
+ # Multiple files - mix images, PDFs, spreadsheets
95
+ response = llm.generate(
96
+ "Analyze these business documents",
97
+ media=["report.pdf", "chart.png", "data.xlsx"]
98
+ )
99
+
100
+ # Same code works with local models
101
+ llm = create_llm("ollama", model="qwen3-vl:8b")
102
+ response = llm.generate(
103
+ "Describe this screenshot",
104
+ media=["screenshot.png"] # Auto-optimized for qwen3-vl
105
+ )
106
+ ```
107
+
108
+ **Key Features:**
109
+ - **Smart Resolution**: Automatically uses maximum resolution supported by each model
110
+ - **Format Support**: PNG, JPEG, GIF, WEBP, BMP, TIFF images; PDF, TXT, MD, CSV, TSV, JSON documents
111
+ - **Office Documents**: DOCX, XLSX, PPT (with `pip install abstractcore[all]`)
112
+ - **Vision Optimization**: Model-specific image processing for best vision results
113
+
114
+ **Provider compatibility:**
115
+ - **High-resolution vision**: GPT-4o (up to 4096x4096), Claude 3.5 Sonnet (up to 1568x1568)
116
+ - **Local models**: qwen3-vl (up to 3584x3584), gemma3:4b, llama3.2-vision
117
+ - **All models**: Automatic text extraction for non-vision models
118
+
119
+ [Learn more about Media Handling](docs/media-handling-system.md)
120
+
67
121
  ## Key Features
68
122
 
69
123
  - **Provider Agnostic**: Seamlessly switch between OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace
124
+ - **Centralized Configuration**: Global defaults and app-specific preferences at `~/.abstractcore/config/abstractcore.json`
125
+ - **Intelligent Media Handling**: Upload images, PDFs, and documents with automatic maximum resolution optimization
126
+ - **Vision Model Support**: Smart image processing at each model's maximum capability for best results
127
+ - **Document Processing**: Advanced PDF extraction (PyMuPDF4LLM), Office documents (DOCX/XLSX/PPT), CSV/TSV analysis
70
128
  - **Unified Tools**: Consistent tool calling across all providers
71
129
  - **Session Management**: Persistent conversations with metadata, analytics, and complete serialization
72
130
  - **Structured Responses**: Clean, predictable output formats with Pydantic
73
131
  - **Streaming Support**: Real-time token generation for interactive experiences
74
132
  - **Embeddings**: Built-in support for semantic search and RAG applications
75
- - **Universal Server**: Optional OpenAI-compatible API server
133
+ - **Universal Server**: Optional OpenAI-compatible API server with `/v1/responses` endpoint
76
134
 
77
135
  ## Supported Providers
78
136
 
@@ -110,12 +168,15 @@ response = client.chat.completions.create(
110
168
  ```
111
169
 
112
170
  **Server Features:**
113
- - OpenAI-compatible REST endpoints (`/v1/chat/completions`, `/v1/embeddings`, etc.)
171
+ - OpenAI-compatible REST endpoints (`/v1/chat/completions`, `/v1/embeddings`, `/v1/responses`)
172
+ - **NEW in v2.5.0**: OpenAI Responses API (`/v1/responses`) with native `input_file` support
114
173
  - Multi-provider support through one HTTP API
174
+ - Comprehensive media processing (images, PDFs, Office documents, CSV/TSV)
115
175
  - Agentic CLI integration (Codex, Crush, Gemini CLI)
116
- - Streaming responses
176
+ - Streaming responses with optional opt-in
117
177
  - Tool call format conversion
118
- - Interactive API docs at `/docs`
178
+ - Enhanced debug logging with `--debug` flag
179
+ - Interactive API docs at `/docs` (Swagger UI)
119
180
 
120
181
  **When to use the server:**
121
182
  - Integrating with existing OpenAI-compatible tools
@@ -276,6 +337,72 @@ Each application has comprehensive documentation with examples and advanced usag
276
337
  - Integration with shell scripts and automation
277
338
  - Standardized text processing tasks
278
339
 
340
+ ## Configuration
341
+
342
+ AbstractCore provides a **centralized configuration system** that manages default models, cache directories, and logging settings from a single location. This eliminates the need to specify `--provider` and `--model` parameters repeatedly.
343
+
344
+ ### Quick Setup
345
+
346
+ ```bash
347
+ # Check current configuration (shows how to change each setting)
348
+ abstractcore --status
349
+
350
+ # Set defaults for all applications
351
+ abstractcore --set-global-default ollama/llama3:8b
352
+
353
+ # Or configure specific applications (examples of customization)
354
+ abstractcore --set-app-default summarizer openai gpt-4o-mini
355
+ abstractcore --set-app-default extractor ollama qwen3:4b-instruct
356
+ abstractcore --set-app-default judge anthropic claude-3-5-haiku
357
+
358
+ # Configure logging (common examples)
359
+ abstractcore --set-console-log-level WARNING # Reduce console output
360
+ abstractcore --set-console-log-level NONE # Disable console logging
361
+ abstractcore --enable-file-logging # Save logs to files
362
+ abstractcore --enable-debug-logging # Full debug mode
363
+
364
+ # Set API keys as needed
365
+ abstractcore --set-api-key openai sk-your-key-here
366
+ abstractcore --set-api-key anthropic your-anthropic-key
367
+
368
+ # Verify configuration (includes change commands for each setting)
369
+ abstractcore --status
370
+ ```
371
+
372
+ ### Priority System
373
+
374
+ AbstractCore uses a clear priority system where explicit parameters always override defaults:
375
+
376
+ 1. **Explicit parameters** (highest priority): `summarizer doc.txt --provider openai --model gpt-4o-mini`
377
+ 2. **App-specific config**: `abstractcore --set-app-default summarizer openai gpt-4o-mini`
378
+ 3. **Global config**: `abstractcore --set-global-default openai/gpt-4o-mini`
379
+ 4. **Built-in defaults** (lowest priority): `huggingface/unsloth/Qwen3-4B-Instruct-2507-GGUF`
380
+
381
+ ### Usage After Configuration
382
+
383
+ Once configured, apps use your defaults automatically:
384
+
385
+ ```bash
386
+ # Before configuration (requires explicit parameters)
387
+ summarizer document.pdf --provider openai --model gpt-4o-mini
388
+
389
+ # After configuration (uses configured defaults)
390
+ summarizer document.pdf
391
+
392
+ # Explicit parameters still override when needed
393
+ summarizer document.pdf --provider anthropic --model claude-3-5-sonnet
394
+ ```
395
+
396
+ ### Configuration Features
397
+
398
+ - **Application defaults**: Different optimal models for each app
399
+ - **Cache directories**: Configurable cache locations for models and data
400
+ - **Logging control**: Package-wide logging levels and debug mode
401
+ - **API key management**: Centralized API key storage
402
+ - **Interactive setup**: `abstractcore --configure` for guided configuration
403
+
404
+ **Complete guide**: [Centralized Configuration](docs/centralized-config.md)
405
+
279
406
  ## Documentation
280
407
 
281
408
  **📚 Complete Documentation:** [docs/](docs/) - Full documentation index and navigation guide
@@ -287,6 +414,7 @@ Each application has comprehensive documentation with examples and advanced usag
287
414
 
288
415
  ### Core Library (Python)
289
416
  - **[Python API Reference](docs/api-reference.md)** - Complete Python API documentation
417
+ - **[Media Handling System](docs/media-handling-system.md)** - Images, PDFs, and document processing across all providers
290
418
  - **[Session Management](docs/session.md)** - Persistent conversations, serialization, and analytics
291
419
  - **[Embeddings Guide](docs/embeddings.md)** - Semantic search, RAG, and vector embeddings
292
420
  - **[Code Examples](examples/)** - Working examples for all features
@@ -312,7 +440,44 @@ for provider in providers:
312
440
  response = llm.generate("Hello!")
313
441
  ```
314
442
 
315
- ### 2. Local Development, Cloud Production
443
+ ### 2. Vision Analysis Across Providers
444
+
445
+ ```python
446
+ # Same image analysis works with any vision model
447
+ image_files = ["product_photo.jpg", "user_feedback.png"]
448
+ prompt = "Analyze these product images and suggest improvements"
449
+
450
+ # OpenAI GPT-4o
451
+ openai_llm = create_llm("openai", model="gpt-4o")
452
+ openai_analysis = openai_llm.generate(prompt, media=image_files)
453
+
454
+ # Anthropic Claude
455
+ claude_llm = create_llm("anthropic", model="claude-3.5-sonnet")
456
+ claude_analysis = claude_llm.generate(prompt, media=image_files)
457
+
458
+ # Local model (free)
459
+ local_llm = create_llm("ollama", model="qwen3-vl:8b")
460
+ local_analysis = local_llm.generate(prompt, media=image_files)
461
+ ```
462
+
463
+ ### 3. Document Processing Pipeline
464
+
465
+ ```python
466
+ # Universal document analysis
467
+ documents = ["contract.pdf", "financial_data.xlsx", "presentation.ppt"]
468
+ analysis_prompt = "Extract key information and identify potential risks"
469
+
470
+ # Works with any provider
471
+ llm = create_llm("anthropic", model="claude-3.5-sonnet")
472
+ response = llm.generate(analysis_prompt, media=documents)
473
+
474
+ # Automatic format handling:
475
+ # - PDF: Advanced text extraction with PyMuPDF4LLM
476
+ # - Excel: Table parsing with pandas
477
+ # - PowerPoint: Slide content extraction with unstructured
478
+ ```
479
+
480
+ ### 4. Local Development, Cloud Production
316
481
 
317
482
  ```python
318
483
  # Development (free, local)
@@ -322,7 +487,7 @@ llm_dev = create_llm("ollama", model="qwen3:4b-instruct-2507-q4_K_M")
322
487
  llm_prod = create_llm("openai", model="gpt-4o-mini")
323
488
  ```
324
489
 
325
- ### 3. Embeddings & RAG
490
+ ### 5. Embeddings & RAG
326
491
 
327
492
  ```python
328
493
  from abstractcore.embeddings import EmbeddingManager
@@ -342,7 +507,7 @@ similarity = embedder.compute_similarity(query, docs[0])
342
507
 
343
508
  [Learn more about Embeddings](docs/embeddings.md)
344
509
 
345
- ### 4. Structured Output
510
+ ### 6. Structured Output
346
511
 
347
512
  ```python
348
513
  from pydantic import BaseModel
@@ -360,7 +525,7 @@ review = llm.generate(
360
525
  print(f"{review.title}: {review.rating}/5")
361
526
  ```
362
527
 
363
- ### 5. Universal API Server
528
+ ### 7. Universal API Server
364
529
 
365
530
  ```bash
366
531
  # Start server once
@@ -377,14 +542,16 @@ curl -X POST http://localhost:8000/v1/chat/completions \
377
542
 
378
543
  ## Why AbstractCore?
379
544
 
380
- - **Unified Interface**: One API for all LLM providers
381
- - **Production Ready**: Robust error handling, retries, timeouts
382
- - **Type Safe**: Full Pydantic integration for structured outputs
383
- - **Local & Cloud**: Run models locally or use cloud APIs
384
- - **Tool Calling**: Consistent function calling across providers
385
- - **Streaming**: Real-time responses for interactive applications
386
- - **Embeddings**: Built-in vector embeddings for RAG
387
- - **Server Mode**: Optional OpenAI-compatible API server
545
+ - **Unified Interface**: One API for all LLM providers
546
+ - **Multimodal Support**: Upload images, PDFs, and documents across all providers
547
+ - **Vision Models**: Seamless integration with GPT-4o, Claude Vision, qwen3-vl, and more
548
+ - **Production Ready**: Robust error handling, retries, timeouts
549
+ - **Type Safe**: Full Pydantic integration for structured outputs
550
+ - **Local & Cloud**: Run models locally or use cloud APIs
551
+ - **Tool Calling**: Consistent function calling across providers
552
+ - **Streaming**: Real-time responses for interactive applications
553
+ - **Embeddings**: Built-in vector embeddings for RAG
554
+ - **Server Mode**: Optional OpenAI-compatible API server
388
555
  - **Well Documented**: Comprehensive guides and examples
389
556
 
390
557
  ## Installation Options
@@ -393,6 +560,9 @@ curl -X POST http://localhost:8000/v1/chat/completions \
393
560
  # Minimal core
394
561
  pip install abstractcore
395
562
 
563
+ # With media handling (images, PDFs, documents)
564
+ pip install abstractcore[media]
565
+
396
566
  # With specific providers
397
567
  pip install abstractcore[openai]
398
568
  pip install abstractcore[anthropic]
@@ -404,10 +574,25 @@ pip install abstractcore[server]
404
574
  # With embeddings
405
575
  pip install abstractcore[embeddings]
406
576
 
407
- # Everything
577
+ # Everything (recommended)
408
578
  pip install abstractcore[all]
409
579
  ```
410
580
 
581
+ **Media processing extras:**
582
+ ```bash
583
+ # For advanced PDF processing
584
+ pip install pymupdf4llm
585
+
586
+ # For Office documents (DOCX, XLSX, PPT)
587
+ pip install unstructured
588
+
589
+ # For image optimization
590
+ pip install pillow
591
+
592
+ # For data processing (CSV, Excel)
593
+ pip install pandas
594
+ ```
595
+
411
596
  ## Testing Status
412
597
 
413
598
  All tests passing as of October 12th, 2025.
@@ -430,6 +615,10 @@ All tests passing as of October 12th, 2025.
430
615
  - **[🐛 Issues](https://github.com/lpalbou/AbstractCore/issues)** - Report bugs
431
616
  - **[💬 Discussions](https://github.com/lpalbou/AbstractCore/discussions)** - Get help
432
617
 
618
+ ## Contact
619
+ **Maintainer:** Laurent-Philippe Albou
620
+ 📧 Email: [contact@abstractcore.ai](mailto:contact@abstractcore.ai)
621
+
433
622
  ## Contributing
434
623
 
435
624
  We welcome contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
@@ -444,4 +633,4 @@ MIT License - see [LICENSE](LICENSE) file for details.
444
633
 
445
634
  ---
446
635
 
447
- > **Migration Note**: This project was previously known as "AbstractLLM" and has been completely rebranded to "AbstractCore" as of version 2.4.0. See [CHANGELOG.md](CHANGELOG.md) for migration details.
636
+ > **Migration Note**: This project was previously known as "AbstractLLM" and has been completely rebranded to "AbstractCore" as of version 2.4.0. See [CHANGELOG.md](CHANGELOG.md) for migration details.