abstractcore 2.9.1__py3-none-any.whl → 2.11.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. abstractcore/__init__.py +7 -27
  2. abstractcore/apps/extractor.py +33 -100
  3. abstractcore/apps/intent.py +19 -0
  4. abstractcore/apps/judge.py +20 -1
  5. abstractcore/apps/summarizer.py +20 -1
  6. abstractcore/architectures/detection.py +34 -1
  7. abstractcore/architectures/response_postprocessing.py +313 -0
  8. abstractcore/assets/architecture_formats.json +38 -8
  9. abstractcore/assets/model_capabilities.json +781 -160
  10. abstractcore/compression/__init__.py +1 -2
  11. abstractcore/compression/glyph_processor.py +6 -4
  12. abstractcore/config/main.py +31 -19
  13. abstractcore/config/manager.py +389 -11
  14. abstractcore/config/vision_config.py +5 -5
  15. abstractcore/core/interface.py +151 -3
  16. abstractcore/core/session.py +16 -10
  17. abstractcore/download.py +1 -1
  18. abstractcore/embeddings/manager.py +20 -6
  19. abstractcore/endpoint/__init__.py +2 -0
  20. abstractcore/endpoint/app.py +458 -0
  21. abstractcore/mcp/client.py +3 -1
  22. abstractcore/media/__init__.py +52 -17
  23. abstractcore/media/auto_handler.py +42 -22
  24. abstractcore/media/base.py +44 -1
  25. abstractcore/media/capabilities.py +12 -33
  26. abstractcore/media/enrichment.py +105 -0
  27. abstractcore/media/handlers/anthropic_handler.py +19 -28
  28. abstractcore/media/handlers/local_handler.py +124 -70
  29. abstractcore/media/handlers/openai_handler.py +19 -31
  30. abstractcore/media/processors/__init__.py +4 -2
  31. abstractcore/media/processors/audio_processor.py +57 -0
  32. abstractcore/media/processors/office_processor.py +8 -3
  33. abstractcore/media/processors/pdf_processor.py +46 -3
  34. abstractcore/media/processors/text_processor.py +22 -24
  35. abstractcore/media/processors/video_processor.py +58 -0
  36. abstractcore/media/types.py +97 -4
  37. abstractcore/media/utils/image_scaler.py +20 -2
  38. abstractcore/media/utils/video_frames.py +219 -0
  39. abstractcore/media/vision_fallback.py +136 -22
  40. abstractcore/processing/__init__.py +32 -3
  41. abstractcore/processing/basic_deepsearch.py +15 -10
  42. abstractcore/processing/basic_intent.py +3 -2
  43. abstractcore/processing/basic_judge.py +3 -2
  44. abstractcore/processing/basic_summarizer.py +1 -1
  45. abstractcore/providers/__init__.py +3 -1
  46. abstractcore/providers/anthropic_provider.py +95 -8
  47. abstractcore/providers/base.py +1516 -81
  48. abstractcore/providers/huggingface_provider.py +546 -69
  49. abstractcore/providers/lmstudio_provider.py +35 -923
  50. abstractcore/providers/mlx_provider.py +382 -35
  51. abstractcore/providers/model_capabilities.py +5 -1
  52. abstractcore/providers/ollama_provider.py +99 -15
  53. abstractcore/providers/openai_compatible_provider.py +406 -180
  54. abstractcore/providers/openai_provider.py +188 -44
  55. abstractcore/providers/openrouter_provider.py +76 -0
  56. abstractcore/providers/registry.py +61 -5
  57. abstractcore/providers/streaming.py +138 -33
  58. abstractcore/providers/vllm_provider.py +92 -817
  59. abstractcore/server/app.py +461 -13
  60. abstractcore/server/audio_endpoints.py +139 -0
  61. abstractcore/server/vision_endpoints.py +1319 -0
  62. abstractcore/structured/handler.py +316 -41
  63. abstractcore/tools/common_tools.py +5501 -2012
  64. abstractcore/tools/comms_tools.py +1641 -0
  65. abstractcore/tools/core.py +37 -7
  66. abstractcore/tools/handler.py +4 -9
  67. abstractcore/tools/parser.py +49 -2
  68. abstractcore/tools/tag_rewriter.py +2 -1
  69. abstractcore/tools/telegram_tdlib.py +407 -0
  70. abstractcore/tools/telegram_tools.py +261 -0
  71. abstractcore/utils/cli.py +1085 -72
  72. abstractcore/utils/token_utils.py +2 -0
  73. abstractcore/utils/truncation.py +29 -0
  74. abstractcore/utils/version.py +3 -4
  75. abstractcore/utils/vlm_token_calculator.py +12 -2
  76. abstractcore-2.11.2.dist-info/METADATA +562 -0
  77. abstractcore-2.11.2.dist-info/RECORD +133 -0
  78. {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/WHEEL +1 -1
  79. {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/entry_points.txt +1 -0
  80. abstractcore-2.9.1.dist-info/METADATA +0 -1190
  81. abstractcore-2.9.1.dist-info/RECORD +0 -119
  82. {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/licenses/LICENSE +0 -0
  83. {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/top_level.txt +0 -0
@@ -230,12 +230,14 @@ class TokenUtils:
230
230
  return ContentType.NATURAL_LANGUAGE
231
231
 
232
232
  # Sample first 1000 chars for efficiency
233
+ #[WARNING:TRUNCATION] bounded sample for heuristic detection (performance)
233
234
  sample = text[:1000]
234
235
 
235
236
  # JSON detection
236
237
  if sample.strip().startswith(('{', '[')):
237
238
  try:
238
239
  import json
240
+ #[WARNING:TRUNCATION] bounded JSON probe for heuristic detection (performance)
239
241
  json.loads(sample[:500]) # Try to parse a portion
240
242
  return ContentType.JSON
241
243
  except:
@@ -0,0 +1,29 @@
1
+ """Truncation utilities (explicit + searchable).
2
+
3
+ Policy authority: ADR-0026 (docs/adr/0026-truncation-policy-and-contract.md).
4
+
5
+ All lossy truncation must:
6
+ - be explicit in the returned text (marker),
7
+ - and be searchable in code via `#[WARNING:TRUNCATION]`.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from typing import Any
13
+
14
+
15
+ def preview_text(value: Any, *, max_chars: int, marker: str = "… (truncated)") -> str:
16
+ """Return `value` as a bounded preview with an explicit truncation marker."""
17
+ s = str(value or "")
18
+ if max_chars <= 0:
19
+ #[WARNING:TRUNCATION] bounded preview requested with max_chars<=0
20
+ return ""
21
+ max_chars_i = int(max_chars)
22
+ if len(s) <= max_chars_i:
23
+ return s
24
+ #[WARNING:TRUNCATION] bounded preview (logs/telemetry/UI)
25
+ keep = max(0, max_chars_i - len(marker))
26
+ if keep <= 0:
27
+ return marker[:max_chars_i].rstrip()
28
+ return s[:keep].rstrip() + marker
29
+
@@ -2,13 +2,12 @@
2
2
  Version management for AbstractCore.
3
3
 
4
4
  This module provides the package version as a static constant that serves as the
5
- single source of truth for the Python code. The version is also maintained in
6
- pyproject.toml for packaging, requiring manual synchronization during releases.
5
+ single source of truth. Packaging reads the version from this module via
6
+ `[tool.setuptools.dynamic]` in `pyproject.toml`.
7
7
 
8
8
  This approach ensures reliable version access in all deployment scenarios,
9
9
  including when the package is installed from PyPI where pyproject.toml is not available.
10
10
  """
11
11
 
12
12
  # Package version - update this when releasing new versions
13
- # This must be manually synchronized with the version in pyproject.toml
14
- __version__ = "2.9.1"
13
+ __version__ = "2.11.2"
@@ -24,7 +24,12 @@ from typing import Tuple, Dict, Any, Optional, List
24
24
  from pathlib import Path
25
25
  import logging
26
26
 
27
- from PIL import Image
27
+ try:
28
+ from PIL import Image
29
+ PIL_AVAILABLE = True
30
+ except ImportError: # pragma: no cover
31
+ Image = None
32
+ PIL_AVAILABLE = False
28
33
 
29
34
  from ..utils.structured_logging import get_logger
30
35
  from ..architectures.detection import get_model_capabilities, detect_architecture
@@ -143,6 +148,11 @@ class VLMTokenCalculator:
143
148
  """
144
149
  # Get image dimensions
145
150
  if image_path and image_path.exists():
151
+ if not PIL_AVAILABLE:
152
+ raise ImportError(
153
+ "PIL/Pillow is required to read image files for token calculation. "
154
+ "Install with: pip install \"abstractcore[media]\""
155
+ )
146
156
  try:
147
157
  with Image.open(image_path) as img:
148
158
  width, height = img.size
@@ -653,4 +663,4 @@ def calculate_glyph_compression_ratio(original_tokens: int,
653
663
  model: str = '') -> Dict[str, Any]:
654
664
  """Calculate accurate Glyph compression ratio."""
655
665
  calculator = VLMTokenCalculator()
656
- return calculator.get_compression_ratio(original_tokens, image_paths, provider, model)
666
+ return calculator.get_compression_ratio(original_tokens, image_paths, provider, model)
@@ -0,0 +1,562 @@
1
+ Metadata-Version: 2.4
2
+ Name: abstractcore
3
+ Version: 2.11.2
4
+ Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
5
+ Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
6
+ Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
7
+ License: MIT
8
+ Project-URL: Homepage, https://lpalbou.github.io/AbstractCore
9
+ Project-URL: Documentation, https://github.com/lpalbou/AbstractCore#readme
10
+ Project-URL: Repository, https://github.com/lpalbou/AbstractCore
11
+ Project-URL: Bug Tracker, https://github.com/lpalbou/AbstractCore/issues
12
+ Project-URL: Changelog, https://github.com/lpalbou/AbstractCore/blob/main/CHANGELOG.md
13
+ Keywords: llm,openai,anthropic,ollama,lmstudio,huggingface,mlx,ai,machine-learning,natural-language-processing,tool-calling,streaming
14
+ Classifier: Development Status :: 4 - Beta
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: License :: OSI Approved :: MIT License
17
+ Classifier: Operating System :: OS Independent
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3.9
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
23
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
24
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
25
+ Classifier: Topic :: Internet :: WWW/HTTP :: HTTP Servers
26
+ Requires-Python: >=3.9
27
+ Description-Content-Type: text/markdown
28
+ License-File: LICENSE
29
+ Requires-Dist: pydantic<3.0.0,>=2.0.0
30
+ Requires-Dist: httpx<1.0.0,>=0.24.0
31
+ Provides-Extra: openai
32
+ Requires-Dist: openai<2.0.0,>=1.0.0; extra == "openai"
33
+ Provides-Extra: anthropic
34
+ Requires-Dist: anthropic<1.0.0,>=0.25.0; extra == "anthropic"
35
+ Provides-Extra: ollama
36
+ Provides-Extra: lmstudio
37
+ Provides-Extra: huggingface
38
+ Requires-Dist: transformers<6.0.0,>=4.57.1; extra == "huggingface"
39
+ Requires-Dist: torch<3.0.0,>=2.6.0; extra == "huggingface"
40
+ Requires-Dist: torchvision>=0.17.0; extra == "huggingface"
41
+ Requires-Dist: torchaudio>=2.1.0; extra == "huggingface"
42
+ Requires-Dist: llama-cpp-python<1.0.0,>=0.2.0; extra == "huggingface"
43
+ Requires-Dist: outlines>=0.1.0; extra == "huggingface"
44
+ Provides-Extra: mlx
45
+ Requires-Dist: mlx<1.0.0,>=0.30.0; extra == "mlx"
46
+ Requires-Dist: mlx-lm<1.0.0,>=0.30.0; extra == "mlx"
47
+ Requires-Dist: outlines>=0.1.0; extra == "mlx"
48
+ Provides-Extra: mlx-bench
49
+ Requires-Dist: matplotlib<4.0.0,>=3.8.0; extra == "mlx-bench"
50
+ Provides-Extra: vllm
51
+ Requires-Dist: vllm<1.0.0,>=0.6.0; extra == "vllm"
52
+ Provides-Extra: embeddings
53
+ Requires-Dist: sentence-transformers<6.0.0,>=5.1.0; extra == "embeddings"
54
+ Requires-Dist: numpy<2.0.0,>=1.20.0; extra == "embeddings"
55
+ Provides-Extra: tokens
56
+ Requires-Dist: tiktoken<1.0.0,>=0.5.0; extra == "tokens"
57
+ Provides-Extra: tools
58
+ Requires-Dist: requests<3.0.0,>=2.25.0; extra == "tools"
59
+ Requires-Dist: beautifulsoup4<5.0.0,>=4.12.0; extra == "tools"
60
+ Requires-Dist: lxml<6.0.0,>=4.9.0; extra == "tools"
61
+ Requires-Dist: ddgs<10.0.0,>=9.10.0; python_version >= "3.10" and extra == "tools"
62
+ Requires-Dist: duckduckgo-search<4.0.0,>=3.8.0; python_version < "3.10" and extra == "tools"
63
+ Requires-Dist: psutil<6.0.0,>=5.9.0; extra == "tools"
64
+ Provides-Extra: tool
65
+ Requires-Dist: requests<3.0.0,>=2.25.0; extra == "tool"
66
+ Requires-Dist: beautifulsoup4<5.0.0,>=4.12.0; extra == "tool"
67
+ Requires-Dist: lxml<6.0.0,>=4.9.0; extra == "tool"
68
+ Requires-Dist: ddgs<10.0.0,>=9.10.0; python_version >= "3.10" and extra == "tool"
69
+ Requires-Dist: duckduckgo-search<4.0.0,>=3.8.0; python_version < "3.10" and extra == "tool"
70
+ Requires-Dist: psutil<6.0.0,>=5.9.0; extra == "tool"
71
+ Provides-Extra: media
72
+ Requires-Dist: Pillow<12.0.0,>=10.0.0; extra == "media"
73
+ Requires-Dist: pymupdf4llm<1.0.0,>=0.0.20; extra == "media"
74
+ Requires-Dist: pymupdf-layout<2.0.0,>=1.26.6; extra == "media"
75
+ Requires-Dist: unstructured[docx,odt,pptx,rtf,xlsx]<1.0.0,>=0.10.0; extra == "media"
76
+ Requires-Dist: pandas<3.0.0,>=1.0.0; extra == "media"
77
+ Provides-Extra: compression
78
+ Requires-Dist: Pillow<12.0.0,>=10.0.0; extra == "compression"
79
+ Provides-Extra: all
80
+ Requires-Dist: openai<2.0.0,>=1.0.0; extra == "all"
81
+ Requires-Dist: anthropic<1.0.0,>=0.25.0; extra == "all"
82
+ Requires-Dist: transformers<6.0.0,>=4.57.1; extra == "all"
83
+ Requires-Dist: torch<3.0.0,>=2.6.0; extra == "all"
84
+ Requires-Dist: torchvision>=0.17.0; extra == "all"
85
+ Requires-Dist: torchaudio>=2.1.0; extra == "all"
86
+ Requires-Dist: llama-cpp-python<1.0.0,>=0.2.0; extra == "all"
87
+ Requires-Dist: outlines>=0.1.0; extra == "all"
88
+ Requires-Dist: mlx<1.0.0,>=0.30.0; extra == "all"
89
+ Requires-Dist: mlx-lm<1.0.0,>=0.30.0; extra == "all"
90
+ Requires-Dist: vllm<1.0.0,>=0.6.0; extra == "all"
91
+ Requires-Dist: sentence-transformers<6.0.0,>=5.1.0; extra == "all"
92
+ Requires-Dist: numpy<2.0.0,>=1.20.0; extra == "all"
93
+ Requires-Dist: tiktoken<1.0.0,>=0.5.0; extra == "all"
94
+ Requires-Dist: requests<3.0.0,>=2.25.0; extra == "all"
95
+ Requires-Dist: beautifulsoup4<5.0.0,>=4.12.0; extra == "all"
96
+ Requires-Dist: lxml<6.0.0,>=4.9.0; extra == "all"
97
+ Requires-Dist: ddgs<10.0.0,>=9.10.0; python_version >= "3.10" and extra == "all"
98
+ Requires-Dist: duckduckgo-search<4.0.0,>=3.8.0; python_version < "3.10" and extra == "all"
99
+ Requires-Dist: psutil<6.0.0,>=5.9.0; extra == "all"
100
+ Requires-Dist: Pillow<12.0.0,>=10.0.0; extra == "all"
101
+ Requires-Dist: pymupdf4llm<1.0.0,>=0.0.20; extra == "all"
102
+ Requires-Dist: pymupdf-layout<2.0.0,>=1.26.6; extra == "all"
103
+ Requires-Dist: unstructured[docx,odt,pptx,rtf,xlsx]<1.0.0,>=0.10.0; extra == "all"
104
+ Requires-Dist: pandas<3.0.0,>=1.0.0; extra == "all"
105
+ Requires-Dist: fastapi<1.0.0,>=0.100.0; extra == "all"
106
+ Requires-Dist: uvicorn[standard]<1.0.0,>=0.23.0; extra == "all"
107
+ Requires-Dist: python-multipart<1.0.0,>=0.0.6; extra == "all"
108
+ Requires-Dist: sse-starlette<2.0.0,>=1.6.0; extra == "all"
109
+ Requires-Dist: abstractvision>=0.2.0; extra == "all"
110
+ Provides-Extra: all-apple
111
+ Requires-Dist: openai<2.0.0,>=1.0.0; extra == "all-apple"
112
+ Requires-Dist: anthropic<1.0.0,>=0.25.0; extra == "all-apple"
113
+ Requires-Dist: transformers<6.0.0,>=4.57.1; extra == "all-apple"
114
+ Requires-Dist: torch<3.0.0,>=2.6.0; extra == "all-apple"
115
+ Requires-Dist: torchvision>=0.17.0; extra == "all-apple"
116
+ Requires-Dist: torchaudio>=2.1.0; extra == "all-apple"
117
+ Requires-Dist: llama-cpp-python<1.0.0,>=0.2.0; extra == "all-apple"
118
+ Requires-Dist: outlines>=0.1.0; extra == "all-apple"
119
+ Requires-Dist: mlx<1.0.0,>=0.30.0; extra == "all-apple"
120
+ Requires-Dist: mlx-lm<1.0.0,>=0.30.0; extra == "all-apple"
121
+ Requires-Dist: sentence-transformers<6.0.0,>=5.1.0; extra == "all-apple"
122
+ Requires-Dist: numpy<2.0.0,>=1.20.0; extra == "all-apple"
123
+ Requires-Dist: tiktoken<1.0.0,>=0.5.0; extra == "all-apple"
124
+ Requires-Dist: requests<3.0.0,>=2.25.0; extra == "all-apple"
125
+ Requires-Dist: beautifulsoup4<5.0.0,>=4.12.0; extra == "all-apple"
126
+ Requires-Dist: lxml<6.0.0,>=4.9.0; extra == "all-apple"
127
+ Requires-Dist: ddgs<10.0.0,>=9.10.0; python_version >= "3.10" and extra == "all-apple"
128
+ Requires-Dist: duckduckgo-search<4.0.0,>=3.8.0; python_version < "3.10" and extra == "all-apple"
129
+ Requires-Dist: psutil<6.0.0,>=5.9.0; extra == "all-apple"
130
+ Requires-Dist: Pillow<12.0.0,>=10.0.0; extra == "all-apple"
131
+ Requires-Dist: pymupdf4llm<1.0.0,>=0.0.20; extra == "all-apple"
132
+ Requires-Dist: pymupdf-layout<2.0.0,>=1.26.6; extra == "all-apple"
133
+ Requires-Dist: unstructured[docx,odt,pptx,rtf,xlsx]<1.0.0,>=0.10.0; extra == "all-apple"
134
+ Requires-Dist: pandas<3.0.0,>=1.0.0; extra == "all-apple"
135
+ Requires-Dist: fastapi<1.0.0,>=0.100.0; extra == "all-apple"
136
+ Requires-Dist: uvicorn[standard]<1.0.0,>=0.23.0; extra == "all-apple"
137
+ Requires-Dist: python-multipart<1.0.0,>=0.0.6; extra == "all-apple"
138
+ Requires-Dist: sse-starlette<2.0.0,>=1.6.0; extra == "all-apple"
139
+ Requires-Dist: abstractvision>=0.2.0; extra == "all-apple"
140
+ Provides-Extra: all-gpu
141
+ Requires-Dist: openai<2.0.0,>=1.0.0; extra == "all-gpu"
142
+ Requires-Dist: anthropic<1.0.0,>=0.25.0; extra == "all-gpu"
143
+ Requires-Dist: transformers<6.0.0,>=4.57.1; extra == "all-gpu"
144
+ Requires-Dist: torch<3.0.0,>=2.6.0; extra == "all-gpu"
145
+ Requires-Dist: torchvision>=0.17.0; extra == "all-gpu"
146
+ Requires-Dist: torchaudio>=2.1.0; extra == "all-gpu"
147
+ Requires-Dist: llama-cpp-python<1.0.0,>=0.2.0; extra == "all-gpu"
148
+ Requires-Dist: outlines>=0.1.0; extra == "all-gpu"
149
+ Requires-Dist: vllm<1.0.0,>=0.6.0; extra == "all-gpu"
150
+ Requires-Dist: sentence-transformers<6.0.0,>=5.1.0; extra == "all-gpu"
151
+ Requires-Dist: numpy<2.0.0,>=1.20.0; extra == "all-gpu"
152
+ Requires-Dist: tiktoken<1.0.0,>=0.5.0; extra == "all-gpu"
153
+ Requires-Dist: requests<3.0.0,>=2.25.0; extra == "all-gpu"
154
+ Requires-Dist: beautifulsoup4<5.0.0,>=4.12.0; extra == "all-gpu"
155
+ Requires-Dist: lxml<6.0.0,>=4.9.0; extra == "all-gpu"
156
+ Requires-Dist: ddgs<10.0.0,>=9.10.0; python_version >= "3.10" and extra == "all-gpu"
157
+ Requires-Dist: duckduckgo-search<4.0.0,>=3.8.0; python_version < "3.10" and extra == "all-gpu"
158
+ Requires-Dist: psutil<6.0.0,>=5.9.0; extra == "all-gpu"
159
+ Requires-Dist: Pillow<12.0.0,>=10.0.0; extra == "all-gpu"
160
+ Requires-Dist: pymupdf4llm<1.0.0,>=0.0.20; extra == "all-gpu"
161
+ Requires-Dist: pymupdf-layout<2.0.0,>=1.26.6; extra == "all-gpu"
162
+ Requires-Dist: unstructured[docx,odt,pptx,rtf,xlsx]<1.0.0,>=0.10.0; extra == "all-gpu"
163
+ Requires-Dist: pandas<3.0.0,>=1.0.0; extra == "all-gpu"
164
+ Requires-Dist: fastapi<1.0.0,>=0.100.0; extra == "all-gpu"
165
+ Requires-Dist: uvicorn[standard]<1.0.0,>=0.23.0; extra == "all-gpu"
166
+ Requires-Dist: python-multipart<1.0.0,>=0.0.6; extra == "all-gpu"
167
+ Requires-Dist: sse-starlette<2.0.0,>=1.6.0; extra == "all-gpu"
168
+ Requires-Dist: abstractvision>=0.2.0; extra == "all-gpu"
169
+ Provides-Extra: all-non-mlx
170
+ Requires-Dist: openai<2.0.0,>=1.0.0; extra == "all-non-mlx"
171
+ Requires-Dist: anthropic<1.0.0,>=0.25.0; extra == "all-non-mlx"
172
+ Requires-Dist: transformers<6.0.0,>=4.57.1; extra == "all-non-mlx"
173
+ Requires-Dist: torch<3.0.0,>=2.6.0; extra == "all-non-mlx"
174
+ Requires-Dist: torchvision>=0.17.0; extra == "all-non-mlx"
175
+ Requires-Dist: torchaudio>=2.1.0; extra == "all-non-mlx"
176
+ Requires-Dist: llama-cpp-python<1.0.0,>=0.2.0; extra == "all-non-mlx"
177
+ Requires-Dist: outlines>=0.1.0; extra == "all-non-mlx"
178
+ Requires-Dist: sentence-transformers<6.0.0,>=5.1.0; extra == "all-non-mlx"
179
+ Requires-Dist: numpy<2.0.0,>=1.20.0; extra == "all-non-mlx"
180
+ Requires-Dist: tiktoken<1.0.0,>=0.5.0; extra == "all-non-mlx"
181
+ Requires-Dist: requests<3.0.0,>=2.25.0; extra == "all-non-mlx"
182
+ Requires-Dist: beautifulsoup4<5.0.0,>=4.12.0; extra == "all-non-mlx"
183
+ Requires-Dist: lxml<6.0.0,>=4.9.0; extra == "all-non-mlx"
184
+ Requires-Dist: ddgs<10.0.0,>=9.10.0; python_version >= "3.10" and extra == "all-non-mlx"
185
+ Requires-Dist: duckduckgo-search<4.0.0,>=3.8.0; python_version < "3.10" and extra == "all-non-mlx"
186
+ Requires-Dist: psutil<6.0.0,>=5.9.0; extra == "all-non-mlx"
187
+ Requires-Dist: Pillow<12.0.0,>=10.0.0; extra == "all-non-mlx"
188
+ Requires-Dist: pymupdf4llm<1.0.0,>=0.0.20; extra == "all-non-mlx"
189
+ Requires-Dist: pymupdf-layout<2.0.0,>=1.26.6; extra == "all-non-mlx"
190
+ Requires-Dist: unstructured[docx,odt,pptx,rtf,xlsx]<1.0.0,>=0.10.0; extra == "all-non-mlx"
191
+ Requires-Dist: pandas<3.0.0,>=1.0.0; extra == "all-non-mlx"
192
+ Requires-Dist: fastapi<1.0.0,>=0.100.0; extra == "all-non-mlx"
193
+ Requires-Dist: uvicorn[standard]<1.0.0,>=0.23.0; extra == "all-non-mlx"
194
+ Requires-Dist: python-multipart<1.0.0,>=0.0.6; extra == "all-non-mlx"
195
+ Requires-Dist: sse-starlette<2.0.0,>=1.6.0; extra == "all-non-mlx"
196
+ Requires-Dist: abstractvision>=0.2.0; extra == "all-non-mlx"
197
+ Provides-Extra: dev
198
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
199
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
200
+ Requires-Dist: pytest-mock>=3.10.0; extra == "dev"
201
+ Requires-Dist: black>=23.0.0; extra == "dev"
202
+ Requires-Dist: isort>=5.12.0; extra == "dev"
203
+ Requires-Dist: mypy>=1.5.0; extra == "dev"
204
+ Requires-Dist: ruff>=0.1.0; extra == "dev"
205
+ Requires-Dist: pre-commit>=3.0.0; extra == "dev"
206
+ Provides-Extra: server
207
+ Requires-Dist: fastapi<1.0.0,>=0.100.0; extra == "server"
208
+ Requires-Dist: uvicorn[standard]<1.0.0,>=0.23.0; extra == "server"
209
+ Requires-Dist: python-multipart<1.0.0,>=0.0.6; extra == "server"
210
+ Requires-Dist: sse-starlette<2.0.0,>=1.6.0; extra == "server"
211
+ Requires-Dist: abstractvision>=0.2.0; extra == "server"
212
+ Provides-Extra: vision
213
+ Requires-Dist: abstractvision>=0.2.0; extra == "vision"
214
+ Provides-Extra: vision-diffusers
215
+ Requires-Dist: abstractvision[huggingface]>=0.2.0; extra == "vision-diffusers"
216
+ Provides-Extra: vision-sdcpp
217
+ Requires-Dist: abstractvision[sdcpp]>=0.2.0; extra == "vision-sdcpp"
218
+ Provides-Extra: vision-local
219
+ Requires-Dist: abstractvision[local]>=0.2.0; extra == "vision-local"
220
+ Provides-Extra: test
221
+ Requires-Dist: pytest>=7.0.0; extra == "test"
222
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == "test"
223
+ Requires-Dist: pytest-mock>=3.10.0; extra == "test"
224
+ Requires-Dist: pytest-cov>=4.0.0; extra == "test"
225
+ Requires-Dist: responses>=0.23.0; extra == "test"
226
+ Requires-Dist: httpx>=0.24.0; extra == "test"
227
+ Provides-Extra: docs
228
+ Requires-Dist: mkdocs>=1.5.0; extra == "docs"
229
+ Requires-Dist: mkdocs-material>=9.0.0; extra == "docs"
230
+ Requires-Dist: mkdocstrings[python]>=0.22.0; extra == "docs"
231
+ Requires-Dist: mkdocs-autorefs>=0.4.0; extra == "docs"
232
+ Provides-Extra: full-dev
233
+ Requires-Dist: openai<2.0.0,>=1.0.0; extra == "full-dev"
234
+ Requires-Dist: anthropic<1.0.0,>=0.25.0; extra == "full-dev"
235
+ Requires-Dist: transformers<6.0.0,>=4.57.1; extra == "full-dev"
236
+ Requires-Dist: torch<3.0.0,>=2.6.0; extra == "full-dev"
237
+ Requires-Dist: torchvision>=0.17.0; extra == "full-dev"
238
+ Requires-Dist: torchaudio>=2.1.0; extra == "full-dev"
239
+ Requires-Dist: llama-cpp-python<1.0.0,>=0.2.0; extra == "full-dev"
240
+ Requires-Dist: outlines>=0.1.0; extra == "full-dev"
241
+ Requires-Dist: mlx<1.0.0,>=0.30.0; extra == "full-dev"
242
+ Requires-Dist: mlx-lm<1.0.0,>=0.30.0; extra == "full-dev"
243
+ Requires-Dist: vllm<1.0.0,>=0.6.0; extra == "full-dev"
244
+ Requires-Dist: sentence-transformers<6.0.0,>=5.1.0; extra == "full-dev"
245
+ Requires-Dist: numpy<2.0.0,>=1.20.0; extra == "full-dev"
246
+ Requires-Dist: tiktoken<1.0.0,>=0.5.0; extra == "full-dev"
247
+ Requires-Dist: requests<3.0.0,>=2.25.0; extra == "full-dev"
248
+ Requires-Dist: beautifulsoup4<5.0.0,>=4.12.0; extra == "full-dev"
249
+ Requires-Dist: lxml<6.0.0,>=4.9.0; extra == "full-dev"
250
+ Requires-Dist: ddgs<10.0.0,>=9.10.0; python_version >= "3.10" and extra == "full-dev"
251
+ Requires-Dist: duckduckgo-search<4.0.0,>=3.8.0; python_version < "3.10" and extra == "full-dev"
252
+ Requires-Dist: psutil<6.0.0,>=5.9.0; extra == "full-dev"
253
+ Requires-Dist: Pillow<12.0.0,>=10.0.0; extra == "full-dev"
254
+ Requires-Dist: pymupdf4llm<1.0.0,>=0.0.20; extra == "full-dev"
255
+ Requires-Dist: pymupdf-layout<2.0.0,>=1.26.6; extra == "full-dev"
256
+ Requires-Dist: unstructured[docx,odt,pptx,rtf,xlsx]<1.0.0,>=0.10.0; extra == "full-dev"
257
+ Requires-Dist: pandas<3.0.0,>=1.0.0; extra == "full-dev"
258
+ Requires-Dist: fastapi<1.0.0,>=0.100.0; extra == "full-dev"
259
+ Requires-Dist: uvicorn[standard]<1.0.0,>=0.23.0; extra == "full-dev"
260
+ Requires-Dist: python-multipart<1.0.0,>=0.0.6; extra == "full-dev"
261
+ Requires-Dist: sse-starlette<2.0.0,>=1.6.0; extra == "full-dev"
262
+ Requires-Dist: abstractvision>=0.2.0; extra == "full-dev"
263
+ Requires-Dist: pytest>=7.0.0; extra == "full-dev"
264
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == "full-dev"
265
+ Requires-Dist: pytest-mock>=3.10.0; extra == "full-dev"
266
+ Requires-Dist: pytest-cov>=4.0.0; extra == "full-dev"
267
+ Requires-Dist: responses>=0.23.0; extra == "full-dev"
268
+ Requires-Dist: black>=23.0.0; extra == "full-dev"
269
+ Requires-Dist: isort>=5.12.0; extra == "full-dev"
270
+ Requires-Dist: mypy>=1.5.0; extra == "full-dev"
271
+ Requires-Dist: ruff>=0.1.0; extra == "full-dev"
272
+ Requires-Dist: pre-commit>=3.0.0; extra == "full-dev"
273
+ Requires-Dist: mkdocs>=1.5.0; extra == "full-dev"
274
+ Requires-Dist: mkdocs-material>=9.0.0; extra == "full-dev"
275
+ Requires-Dist: mkdocstrings[python]>=0.22.0; extra == "full-dev"
276
+ Requires-Dist: mkdocs-autorefs>=0.4.0; extra == "full-dev"
277
+ Dynamic: license-file
278
+
279
+ # AbstractCore
280
+
281
+ [![PyPI version](https://img.shields.io/pypi/v/abstractcore.svg)](https://pypi.org/project/abstractcore/)
282
+ [![Python Version](https://img.shields.io/pypi/pyversions/abstractcore)](https://pypi.org/project/abstractcore/)
283
+ [![license](https://img.shields.io/github/license/lpalbou/AbstractCore)](https://github.com/lpalbou/AbstractCore/blob/main/LICENSE)
284
+ [![GitHub stars](https://img.shields.io/github/stars/lpalbou/AbstractCore?style=social)](https://github.com/lpalbou/AbstractCore/stargazers)
285
+
286
+ Unified LLM Interface
287
+ > Write once, run everywhere
288
+
289
+ AbstractCore is a Python library that provides a unified `create_llm(...)` API across cloud + local LLM providers (OpenAI, Anthropic, Ollama, LMStudio, and more). The default install is intentionally lightweight; add providers and optional subsystems via explicit install extras.
290
+
291
+ First-class support for:
292
+ - sync + async
293
+ - streaming + non-streaming
294
+ - universal tool calling (native + prompted tool syntax)
295
+ - structured output (Pydantic)
296
+ - media input (images/audio/video + documents) with explicit, policy-driven fallbacks (*)
297
+ - optional capability plugins (`core.voice/core.audio/core.vision`) for deterministic TTS/STT and generative vision (via `abstractvoice` / `abstractvision`)
298
+ - glyph visual-text compression for long documents (**)
299
+ - unified openai-compatible endpoint for all providers and models
300
+
301
+ (*) Media input is policy-driven (no silent semantic changes). If a model doesn’t support images, AbstractCore can use a configured vision model to generate short visual observations and inject them into your text-only request (vision fallback). Audio/video attachments are also policy-driven (`audio_policy`, `video_policy`) and may require capability plugins for fallbacks. See [Media Handling](docs/media-handling-system.md) and [Centralized Config](docs/centralized-config.md).
302
+ (**) Optional visual-text compression: render long text/PDFs into images and process them with a vision model to reduce token usage. See [Glyph Visual-Text Compression](docs/glyphs.md) (install `pip install "abstractcore[compression]"`; for PDFs also install `pip install "abstractcore[media]"`).
303
+
304
+ Docs: [Getting Started](docs/getting-started.md) · [FAQ](docs/faq.md) · [Docs Index](docs/README.md) · https://lpalbou.github.io/AbstractCore
305
+
306
+ ## Install
307
+
308
+ ```bash
309
+ # Core (small, lightweight default)
310
+ pip install abstractcore
311
+
312
+ # Providers
313
+ pip install "abstractcore[openai]" # OpenAI SDK
314
+ pip install "abstractcore[anthropic]" # Anthropic SDK
315
+ pip install "abstractcore[huggingface]" # Transformers / torch (heavy)
316
+ pip install "abstractcore[mlx]" # Apple Silicon local inference (heavy)
317
+ pip install "abstractcore[vllm]" # NVIDIA CUDA / ROCm (heavy)
318
+
319
+ # Optional features
320
+ pip install "abstractcore[tools]" # built-in web tools (web_search, skim_websearch, skim_url, fetch_url)
321
+ pip install "abstractcore[media]" # images, PDFs, Office docs
322
+ pip install "abstractcore[compression]" # glyph visual-text compression (Pillow-only)
323
+ pip install "abstractcore[embeddings]" # EmbeddingManager + local embedding models
324
+ pip install "abstractcore[tokens]" # precise token counting (tiktoken)
325
+ pip install "abstractcore[server]" # OpenAI-compatible HTTP gateway
326
+
327
+ # Combine extras (zsh: keep quotes)
328
+ pip install "abstractcore[openai,media,tools]"
329
+
330
+ # Turnkey "everything" installs (pick one)
331
+ pip install "abstractcore[all-apple]" # macOS/Apple Silicon (includes MLX, excludes vLLM)
332
+ pip install "abstractcore[all-non-mlx]" # Linux/Windows/Intel Mac (excludes MLX and vLLM)
333
+ pip install "abstractcore[all-gpu]" # Linux NVIDIA GPU (includes vLLM, excludes MLX)
334
+ ```
335
+
336
+ ## Quickstart
337
+
338
+ OpenAI example (requires `pip install "abstractcore[openai]"`):
339
+
340
+ ```python
341
+ from abstractcore import create_llm
342
+
343
+ llm = create_llm("openai", model="gpt-4o-mini")
344
+ response = llm.generate("What is the capital of France?")
345
+ print(response.content)
346
+ ```
347
+
348
+ ### Conversation state (`BasicSession`)
349
+
350
+ ```python
351
+ from abstractcore import create_llm, BasicSession
352
+
353
+ session = BasicSession(create_llm("anthropic", model="claude-haiku-4-5"))
354
+ print(session.generate("Give me 3 bakery name ideas.").content)
355
+ print(session.generate("Pick the best one and explain why.").content)
356
+ ```
357
+
358
+ ### Streaming
359
+
360
+ ```python
361
+ from abstractcore import create_llm
362
+
363
+ llm = create_llm("ollama", model="qwen3:4b-instruct")
364
+ for chunk in llm.generate("Write a short poem about distributed systems.", stream=True):
365
+ print(chunk.content or "", end="", flush=True)
366
+ ```
367
+
368
+ ### Async
369
+
370
+ ```python
371
+ import asyncio
372
+ from abstractcore import create_llm
373
+
374
+ async def main():
375
+ llm = create_llm("openai", model="gpt-4o-mini")
376
+ resp = await llm.agenerate("Give me 5 bullet points about HTTP caching.")
377
+ print(resp.content)
378
+
379
+ asyncio.run(main())
380
+ ```
381
+
382
+ ## Token budgets (unified)
383
+
384
+ ```python
385
+ from abstractcore import create_llm
386
+
387
+ llm = create_llm(
388
+ "openai",
389
+ model="gpt-4o-mini",
390
+ max_tokens=8000, # total budget (input + output)
391
+ max_output_tokens=1200, # output cap
392
+ )
393
+ ```
394
+
395
+ ## Providers (common)
396
+
397
+ - `openai`: `OPENAI_API_KEY`, optional `OPENAI_BASE_URL`
398
+ - `anthropic`: `ANTHROPIC_API_KEY`, optional `ANTHROPIC_BASE_URL`
399
+ - `openrouter`: `OPENROUTER_API_KEY`, optional `OPENROUTER_BASE_URL` (default: `https://openrouter.ai/api/v1`)
400
+ - `ollama`: local server at `OLLAMA_BASE_URL` (or legacy `OLLAMA_HOST`)
401
+ - `lmstudio`: OpenAI-compatible local server at `LMSTUDIO_BASE_URL` (default: `http://localhost:1234/v1`)
402
+ - `vllm`: OpenAI-compatible server at `VLLM_BASE_URL` (default: `http://localhost:8000/v1`)
403
+ - `openai-compatible`: generic OpenAI-compatible endpoints via `OPENAI_COMPATIBLE_BASE_URL` (default: `http://localhost:1234/v1`)
404
+
405
+ You can also persist settings (including API keys) via the config CLI:
406
+ - `abstractcore --status`
407
+ - `abstractcore --configure`
408
+ - `abstractcore --set-api-key openai sk-...`
409
+
410
+ ## What’s inside (quick tour)
411
+
412
+ - Tools: universal tool calling across providers → [Tool Calling](docs/tool-calling.md)
413
+ - Built-in tools (optional): web + filesystem helpers (`skim_websearch`, `skim_url`, `fetch_url`, `read_file`, …) → [Tool Calling](docs/tool-calling.md)
414
+ - Tool syntax rewriting: `tool_call_tags` (Python) and `agent_format` (server) → [Tool Syntax Rewriting](docs/tool-syntax-rewriting.md)
415
+ - Structured output: Pydantic-first with provider-aware strategies → [Structured Output](docs/structured-output.md)
416
+ - Media input: images/audio/video + documents (policies + fallbacks) → [Media Handling](docs/media-handling-system.md) and [Vision Capabilities](docs/vision-capabilities.md)
417
+ - Capability plugins (optional): deterministic `llm.voice/llm.audio/llm.vision` surfaces → [Capabilities](docs/capabilities.md)
418
+ - Glyph visual-text compression: scale long-context document analysis via VLMs → [Glyph Visual-Text Compression](docs/glyphs.md)
419
+ - Embeddings and semantic search → [Embeddings](docs/embeddings.md)
420
+ - Observability: global event bus + interaction traces → [Architecture](docs/architecture.md), [API Reference (Events)](docs/api-reference.md#eventtype), [Interaction Tracing](docs/interaction-tracing.md)
421
+ - MCP (Model Context Protocol): discover tools from MCP servers (HTTP/stdio) → [MCP](docs/mcp.md)
422
+ - OpenAI-compatible server: one `/v1` gateway for chat + optional `/v1/images/*` and `/v1/audio/*` endpoints → [Server](docs/server.md)
423
+
424
+ ## Tool calling (passthrough by default)
425
+
426
+ By default (`execute_tools=False`), AbstractCore:
427
+ - returns clean assistant text in `response.content`
428
+ - returns structured tool calls in `response.tool_calls` (host/runtime executes them)
429
+
430
+ ```python
431
+ from abstractcore import create_llm, tool
432
+
433
+ @tool
434
+ def get_weather(city: str) -> str:
435
+ return f"{city}: 22°C and sunny"
436
+
437
+ llm = create_llm("openai", model="gpt-4o-mini")
438
+ resp = llm.generate("What's the weather in Paris? Use the tool.", tools=[get_weather])
439
+
440
+ print(resp.content)
441
+ print(resp.tool_calls)
442
+ ```
443
+
444
+ If you need tool-call markup preserved/re-written in `content` for downstream parsers, pass
445
+ `tool_call_tags=...` (e.g. `"qwen3"`, `"llama3"`, `"xml"`). See [Tool Syntax Rewriting](docs/tool-syntax-rewriting.md).
446
+
447
+ ## Structured output
448
+
449
+ ```python
450
+ from pydantic import BaseModel
451
+ from abstractcore import create_llm
452
+
453
+ class Answer(BaseModel):
454
+ title: str
455
+ bullets: list[str]
456
+
457
+ llm = create_llm("openai", model="gpt-4o-mini")
458
+ answer = llm.generate("Summarize HTTP/3 in 3 bullets.", response_model=Answer)
459
+ print(answer.bullets)
460
+ ```
461
+
462
+ ## Media / vision input
463
+
464
+ Requires `pip install "abstractcore[media]"`.
465
+
466
+ ```python
467
+ from abstractcore import create_llm
468
+
469
+ llm = create_llm("anthropic", model="claude-haiku-4-5")
470
+ resp = llm.generate("Describe the image.", media=["./image.png"])
471
+ print(resp.content)
472
+ ```
473
+
474
+ Notes:
475
+ - Audio/video attachments are policy-driven (`audio_policy`, `video_policy`) and fail loudly by default unless you configure or request a fallback.
476
+ - Speech-to-text fallback for audio attachments typically requires installing `abstractvoice` (capability plugin).
477
+
478
+ See [Media Handling](docs/media-handling-system.md) and [Vision Capabilities](docs/vision-capabilities.md).
479
+
480
+ ## HTTP server (OpenAI-compatible gateway)
481
+
482
+ ```bash
483
+ pip install "abstractcore[server]"
484
+ python -m abstractcore.server.app
485
+ ```
486
+
487
+ Use any OpenAI-compatible client, and route to any provider/model via `model="provider/model"`:
488
+
489
+ ```python
490
+ from openai import OpenAI
491
+
492
+ client = OpenAI(base_url="http://localhost:8000/v1", api_key="unused")
493
+ resp = client.chat.completions.create(
494
+ model="ollama/qwen3:4b-instruct",
495
+ messages=[{"role": "user", "content": "Hello from the gateway!"}],
496
+ )
497
+ print(resp.choices[0].message.content)
498
+ ```
499
+
500
+ See [Server](docs/server.md).
501
+
502
+ ## CLI (optional)
503
+
504
+ Interactive chat:
505
+
506
+ ```bash
507
+ abstractcore-chat --provider openai --model gpt-4o-mini
508
+ abstractcore-chat --provider lmstudio --model qwen/qwen3-4b-2507 --base-url http://localhost:1234/v1
509
+ abstractcore-chat --provider openrouter --model openai/gpt-4o-mini
510
+ ```
511
+
512
+ Token limits:
513
+ - startup: `abstractcore-chat --max-tokens 8192 --max-output-tokens 1024 ...`
514
+ - in-REPL: `/max-tokens 8192` and `/max-output-tokens 1024`
515
+
516
+ ## Built-in CLI apps
517
+
518
+ AbstractCore also ships with ready-to-use CLI apps:
519
+ - `summarizer`, `extractor`, `judge`, `intent`, `deepsearch` (see [docs/apps/](docs/apps/))
520
+
521
+ ## Documentation map
522
+
523
+ Start here:
524
+ - [Docs Index](docs/README.md) — navigation for all docs
525
+ - [Prerequisites](docs/prerequisites.md) — provider setup (keys, local servers, hardware notes)
526
+ - [Getting Started](docs/getting-started.md) — first call + core concepts
527
+ - [FAQ](docs/faq.md) — common questions and setup gotchas
528
+ - [Examples](docs/examples.md) — end-to-end patterns and recipes
529
+ - [Troubleshooting](docs/troubleshooting.md) — common failures and fixes
530
+
531
+ Core features:
532
+ - [Tool Calling](docs/tool-calling.md) — universal tools across providers (native + prompted)
533
+ - [Tool Syntax Rewriting](docs/tool-syntax-rewriting.md) — rewrite tool-call syntax for different runtimes/clients
534
+ - [Structured Output](docs/structured-output.md) — schema enforcement + retry strategies
535
+ - [Media Handling](docs/media-handling-system.md) — images/audio/video + documents (policies + fallbacks)
536
+ - [Vision Capabilities](docs/vision-capabilities.md) — image/video input, vision fallback, and how this differs from generative vision
537
+ - [Glyph Visual-Text Compression](docs/glyphs.md) — compress long documents into images for VLMs
538
+ - [Generation Parameters](docs/generation-parameters.md) — unified parameter vocabulary and provider quirks
539
+ - [Session Management](docs/session.md) — conversation history, persistence, and compaction
540
+ - [Embeddings](docs/embeddings.md) — embeddings API and RAG building blocks
541
+ - [Async Guide](docs/async-guide.md) — async patterns, concurrency, best practices
542
+ - [Centralized Config](docs/centralized-config.md) — `~/.abstractcore/config/abstractcore.json` + CLI config commands
543
+ - [Capabilities](docs/capabilities.md) — supported features and current limitations
544
+ - [Interaction Tracing](docs/interaction-tracing.md) — inspect prompts/responses/usage for observability
545
+ - [MCP](docs/mcp.md) — consume MCP tool servers (HTTP/stdio) as tool sources
546
+
547
+ Reference and internals:
548
+ - [Architecture](docs/architecture.md) — system overview + event system
549
+ - [API (Python)](docs/api.md) — how to use the public API
550
+ - [API Reference](docs/api-reference.md) — Python API (including events)
551
+ - [Server](docs/server.md) — OpenAI-compatible gateway with tool/media support
552
+ - [CLI Guide](docs/acore-cli.md) — interactive `abstractcore-chat` walkthrough
553
+
554
+ Project:
555
+ - [Changelog](CHANGELOG.md) — version history and upgrade notes
556
+ - [Contributing](CONTRIBUTING.md) — dev setup and contribution guidelines
557
+ - [Security](SECURITY.md) — responsible vulnerability reporting
558
+ - [Acknowledgements](ACKNOWLEDGEMENTS.md) — upstream projects and communities
559
+
560
+ ## License
561
+
562
+ MIT