abstractcore 2.6.3__py3-none-any.whl → 2.6.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -506,7 +506,17 @@ class ChatCompletionRequest(BaseModel):
506
506
  "Use 'auto' for automatic format detection based on model and user-agent.",
507
507
  example="auto"
508
508
  )
509
-
509
+
510
+ # Provider-specific parameters (AbstractCore-specific feature)
511
+ base_url: Optional[str] = Field(
512
+ default=None,
513
+ description="Base URL for the provider API endpoint (AbstractCore-specific feature). "
514
+ "Useful for openai-compatible provider to connect to custom endpoints. "
515
+ "Example: 'http://localhost:1234/v1' for LMStudio, 'http://localhost:8080/v1' for llama.cpp. "
516
+ "If not specified, uses provider's default or environment variable.",
517
+ example="http://localhost:1234/v1"
518
+ )
519
+
510
520
  class Config:
511
521
  schema_extra = {
512
522
  "examples": {
@@ -2007,7 +2017,17 @@ async def process_chat_completion(
2007
2017
  )
2008
2018
 
2009
2019
  # Create LLM instance
2010
- llm = create_llm(provider, model=model)
2020
+ # Prepare provider-specific kwargs
2021
+ provider_kwargs = {}
2022
+ if request.base_url:
2023
+ provider_kwargs["base_url"] = request.base_url
2024
+ logger.info(
2025
+ "🔗 Custom Base URL",
2026
+ request_id=request_id,
2027
+ base_url=request.base_url
2028
+ )
2029
+
2030
+ llm = create_llm(provider, model=model, **provider_kwargs)
2011
2031
 
2012
2032
  # Convert messages
2013
2033
  messages = convert_to_abstractcore_messages(processed_messages)
@@ -11,4 +11,4 @@ including when the package is installed from PyPI where pyproject.toml is not av
11
11
 
12
12
  # Package version - update this when releasing new versions
13
13
  # This must be manually synchronized with the version in pyproject.toml
14
- __version__ = "2.6.3"
14
+ __version__ = "2.6.6"
@@ -22,9 +22,15 @@ References:
22
22
  import math
23
23
  from typing import Tuple, Dict, Any, Optional, List
24
24
  from pathlib import Path
25
- from PIL import Image
26
25
  import logging
27
26
 
27
+ try:
28
+ from PIL import Image
29
+ except ImportError as e:
30
+ raise ImportError(
31
+ "PIL (Pillow) is required for VLM token calculation. Install with: pip install Pillow"
32
+ ) from e
33
+
28
34
  from ..utils.structured_logging import get_logger
29
35
  from ..architectures.detection import get_model_capabilities, detect_architecture
30
36
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstractcore
3
- Version: 2.6.3
3
+ Version: 2.6.6
4
4
  Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
5
5
  Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
6
6
  Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
@@ -47,6 +47,8 @@ Provides-Extra: mlx
47
47
  Requires-Dist: mlx<1.0.0,>=0.15.0; extra == "mlx"
48
48
  Requires-Dist: mlx-lm<1.0.0,>=0.15.0; extra == "mlx"
49
49
  Requires-Dist: outlines>=0.1.0; extra == "mlx"
50
+ Provides-Extra: vllm
51
+ Requires-Dist: vllm<1.0.0,>=0.6.0; extra == "vllm"
50
52
  Provides-Extra: embeddings
51
53
  Requires-Dist: sentence-transformers<6.0.0,>=5.1.0; extra == "embeddings"
52
54
  Requires-Dist: numpy<2.0.0,>=1.20.0; extra == "embeddings"
@@ -62,21 +64,36 @@ Requires-Dist: pymupdf4llm<1.0.0,>=0.0.20; extra == "media"
62
64
  Requires-Dist: unstructured[office]<1.0.0,>=0.10.0; extra == "media"
63
65
  Requires-Dist: pandas<3.0.0,>=1.0.0; extra == "media"
64
66
  Provides-Extra: compression
67
+ Requires-Dist: abstractcore[media]; extra == "compression"
65
68
  Requires-Dist: pdf2image<2.0.0,>=1.16.0; extra == "compression"
66
69
  Provides-Extra: api-providers
67
70
  Requires-Dist: abstractcore[anthropic,openai]; extra == "api-providers"
68
71
  Provides-Extra: local-providers
69
72
  Requires-Dist: abstractcore[lmstudio,mlx,ollama]; extra == "local-providers"
70
- Provides-Extra: local-providers-non-mlx
71
- Requires-Dist: abstractcore[lmstudio,ollama]; extra == "local-providers-non-mlx"
73
+ Provides-Extra: local-providers-apple
74
+ Requires-Dist: abstractcore[lmstudio,mlx,ollama]; extra == "local-providers-apple"
75
+ Provides-Extra: local-providers-gpu
76
+ Requires-Dist: abstractcore[lmstudio,ollama,vllm]; extra == "local-providers-gpu"
77
+ Provides-Extra: gpu-providers
78
+ Requires-Dist: abstractcore[huggingface,vllm]; extra == "gpu-providers"
72
79
  Provides-Extra: heavy-providers
73
80
  Requires-Dist: abstractcore[huggingface]; extra == "heavy-providers"
74
81
  Provides-Extra: all-providers
75
- Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,mlx,ollama,openai]; extra == "all-providers"
82
+ Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,mlx,ollama,openai,vllm]; extra == "all-providers"
83
+ Provides-Extra: all-providers-apple
84
+ Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,mlx,ollama,openai]; extra == "all-providers-apple"
85
+ Provides-Extra: all-providers-gpu
86
+ Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,ollama,openai,vllm]; extra == "all-providers-gpu"
76
87
  Provides-Extra: all-providers-non-mlx
77
88
  Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,ollama,openai]; extra == "all-providers-non-mlx"
89
+ Provides-Extra: local-providers-non-mlx
90
+ Requires-Dist: abstractcore[lmstudio,ollama]; extra == "local-providers-non-mlx"
78
91
  Provides-Extra: all
79
- Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,mlx,ollama,openai,processing,server,test,tools]; extra == "all"
92
+ Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,mlx,ollama,openai,processing,server,test,tools,vllm]; extra == "all"
93
+ Provides-Extra: all-apple
94
+ Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,mlx,ollama,openai,processing,server,test,tools]; extra == "all-apple"
95
+ Provides-Extra: all-gpu
96
+ Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,ollama,openai,processing,server,test,tools,vllm]; extra == "all-gpu"
80
97
  Provides-Extra: all-non-mlx
81
98
  Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,ollama,openai,processing,server,test,tools]; extra == "all-non-mlx"
82
99
  Provides-Extra: lightweight
@@ -480,8 +497,9 @@ if response.metadata and response.metadata.get('compression_used'):
480
497
  ## Key Features
481
498
 
482
499
  - **Offline-First Design**: Built primarily for open source LLMs with full offline capability. Download once, run forever without internet access
483
- - **Provider Agnostic**: Seamlessly switch between OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace
484
- - **Async/Await Support** ⭐ NEW in v2.6.0: Native async support for concurrent requests with `asyncio.gather()` - works with all 6 providers
500
+ - **Provider Agnostic**: Seamlessly switch between OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace, vLLM, and any OpenAI-compatible endpoint
501
+ - **Async/Await Support** ⭐ NEW in v2.6.0: Native async support for concurrent requests with `asyncio.gather()` - works with all providers
502
+ - **Dynamic Endpoint Configuration** ⭐ NEW in v2.6.5: Pass `base_url` in POST requests to connect to custom OpenAI-compatible endpoints without environment variables
485
503
  - **Interaction Tracing**: Complete LLM observability with programmatic access to prompts, responses, tokens, timing, and trace correlation for debugging, trust, and compliance
486
504
  - **Glyph Visual-Text Compression**: Revolutionary compression system that renders text as optimized images for 3-4x token compression and faster inference
487
505
  - **Centralized Configuration**: Global defaults and app-specific preferences at `~/.abstractcore/config/abstractcore.json`
@@ -498,14 +516,16 @@ if response.metadata and response.metadata.get('compression_used'):
498
516
 
499
517
  ## Supported Providers
500
518
 
501
- | Provider | Status | SEED Support | Setup |
502
- |----------|--------|-------------|-------|
503
- | **OpenAI** | Full | Native | [Get API key](docs/prerequisites.md#openai-setup) |
504
- | **Anthropic** | Full | Warning* | [Get API key](docs/prerequisites.md#anthropic-setup) |
505
- | **Ollama** | Full | Native | [Install guide](docs/prerequisites.md#ollama-setup) |
506
- | **LMStudio** | Full | Native | [Install guide](docs/prerequisites.md#lmstudio-setup) |
507
- | **MLX** | Full | Native | [Setup guide](docs/prerequisites.md#mlx-setup) |
508
- | **HuggingFace** | Full | Native | [Setup guide](docs/prerequisites.md#huggingface-setup) |
519
+ | Provider | Status | SEED Support | Hardware | Setup |
520
+ |----------|--------|-------------|----------|-------|
521
+ | **OpenAI** | Full | Native | Any | [Get API key](docs/prerequisites.md#openai-setup) |
522
+ | **Anthropic** | Full | Warning* | Any | [Get API key](docs/prerequisites.md#anthropic-setup) |
523
+ | **Ollama** | Full | Native | Any | [Install guide](docs/prerequisites.md#ollama-setup) |
524
+ | **LMStudio** | Full | Native | Any | [Install guide](docs/prerequisites.md#lmstudio-setup) |
525
+ | **MLX** | Full | Native | **Apple Silicon only** | [Setup guide](docs/prerequisites.md#mlx-setup) |
526
+ | **HuggingFace** | Full | Native | Any | [Setup guide](docs/prerequisites.md#huggingface-setup) |
527
+ | **vLLM** | Full | Native | **NVIDIA CUDA only** | [Setup guide](docs/prerequisites.md#vllm-setup) |
528
+ | **OpenAI-Compatible** ⭐ NEW | Full | Native | Any | Works with llama.cpp, text-generation-webui, LocalAI, etc. |
509
529
 
510
530
  *Anthropic doesn't support seed parameters but issues a warning when provided. Use `temperature=0.0` for more consistent outputs.
511
531
 
@@ -1024,7 +1044,8 @@ pip install abstractcore[anthropic]
1024
1044
  pip install abstractcore[ollama]
1025
1045
  pip install abstractcore[lmstudio]
1026
1046
  pip install abstractcore[huggingface]
1027
- pip install abstractcore[mlx] # macOS/Apple Silicon only
1047
+ pip install abstractcore[mlx] # macOS/Apple Silicon only
1048
+ pip install abstractcore[vllm] # NVIDIA CUDA only (Linux)
1028
1049
 
1029
1050
  # With server support
1030
1051
  pip install abstractcore[server]
@@ -1032,20 +1053,29 @@ pip install abstractcore[server]
1032
1053
  # With embeddings
1033
1054
  pip install abstractcore[embeddings]
1034
1055
 
1035
- # Everything (recommended)
1056
+ # With compression (Glyph visual-text compression)
1057
+ pip install abstractcore[compression]
1058
+
1059
+ # Everything (recommended for Apple Silicon)
1036
1060
  pip install abstractcore[all]
1037
1061
 
1038
- # Cross-platform (all except MLX - for Linux/Windows)
1062
+ # Cross-platform (all except MLX/vLLM - for Linux/Windows/Intel Mac)
1039
1063
  pip install abstractcore[all-non-mlx]
1040
1064
 
1041
1065
  # Provider groups
1042
- pip install abstractcore[all-providers] # All providers (includes MLX)
1043
- pip install abstractcore[all-providers-non-mlx] # All providers except MLX
1066
+ pip install abstractcore[all-providers] # All providers (includes MLX, excludes vLLM)
1067
+ pip install abstractcore[all-providers-non-mlx] # All providers except MLX (excludes vLLM)
1044
1068
  pip install abstractcore[local-providers] # Ollama, LMStudio, MLX
1045
1069
  pip install abstractcore[local-providers-non-mlx] # Ollama, LMStudio only
1046
1070
  pip install abstractcore[api-providers] # OpenAI, Anthropic
1071
+ pip install abstractcore[gpu-providers] # vLLM (NVIDIA CUDA only)
1047
1072
  ```
1048
1073
 
1074
+ **Hardware-Specific Notes:**
1075
+ - **MLX**: Requires Apple Silicon (M1/M2/M3/M4). Will not work on Intel Macs or other platforms.
1076
+ - **vLLM**: Requires NVIDIA GPUs with CUDA support. Will not work on Apple Silicon, AMD GPUs, or Intel integrated graphics.
1077
+ - **All other providers** (OpenAI, Anthropic, Ollama, LMStudio, HuggingFace): Work on any hardware.
1078
+
1049
1079
  **Media processing extras:**
1050
1080
  ```bash
1051
1081
  # For PDF processing
@@ -60,14 +60,14 @@ abstractcore/media/processors/office_processor.py,sha256=_aTrrDtREiy6MivbANFc1FK
60
60
  abstractcore/media/processors/pdf_processor.py,sha256=qniYt7cTYYPVRi_cS1IsXztOldeY0bqdn7sdbELBU9k,17157
61
61
  abstractcore/media/processors/text_processor.py,sha256=D84QWxxIou4MeNhERmCTxi_p27CgicVFhMXJiujZgIE,21905
62
62
  abstractcore/media/utils/__init__.py,sha256=30-CTif91iRKOXJ4njGiduWAt-xp31U7NafMBNvgdO0,460
63
- abstractcore/media/utils/image_scaler.py,sha256=jPdYd65K5oeo0YaXjOkmv-tvs6tNHYwxJCZ29vZqFDg,11367
63
+ abstractcore/media/utils/image_scaler.py,sha256=RWE2kPeURLEtJDK-Qs4KvZKtu-GkMLziFL9Vc9-aWjc,11388
64
64
  abstractcore/processing/__init__.py,sha256=QcACEnhnHKYCkFL1LNOW_uqBrwkTAmz5A61N4K2dyu0,988
65
65
  abstractcore/processing/basic_deepsearch.py,sha256=dzJQtH4k44XY9tvG0Z4JIlYt_s7HpbLdSPScha-t7vk,101036
66
66
  abstractcore/processing/basic_extractor.py,sha256=3x-3BdIHgLvqLnLF6K1-P4qVaLIpAnNIIutaJi7lDQM,49832
67
67
  abstractcore/processing/basic_intent.py,sha256=wD99Z7fE2RiYk6oyTZXojUbv-bz8HhKFIuIHYLLTw54,32455
68
68
  abstractcore/processing/basic_judge.py,sha256=L1fc9H0-_88B1TULL-mlaNL7OydMgp-ru_zzzoGdr38,37220
69
69
  abstractcore/processing/basic_summarizer.py,sha256=XHNxMQ_8aLStTeUo6_2JaThlct12Htpz7ORmm0iuJsg,25495
70
- abstractcore/providers/__init__.py,sha256=O7gmT4p_jbzMjoZPhi_6RIMHQm-IMFX1XfcgySz3DSQ,1729
70
+ abstractcore/providers/__init__.py,sha256=dNz-KrUwpBZhEv6DkAe3t_V8w40_HjeME5j9VL0lDyo,1886
71
71
  abstractcore/providers/anthropic_provider.py,sha256=0-qZb0Es6-VLuVVl2j7IUjOuyRlgjQdJFulWfpi4qb4,31740
72
72
  abstractcore/providers/base.py,sha256=nWF1pxeUlT4ozlUqKG0rWOmLkfo-zQgfU7fv3AUSI08,68452
73
73
  abstractcore/providers/huggingface_provider.py,sha256=v4UUmODrnWKtTygzPh-lm4jSCAPms5VYJE5v7PWB4Lo,79458
@@ -75,11 +75,13 @@ abstractcore/providers/lmstudio_provider.py,sha256=92_vx7AVVt_oufJdHo3R0D_V2qyTK
75
75
  abstractcore/providers/mlx_provider.py,sha256=afLCEwuw7r8OK4fD3OriyKMcWpxVIob_37ItmgAclfc,23123
76
76
  abstractcore/providers/model_capabilities.py,sha256=C4HIgvNTp9iIPiDeWyXo7vdzRkMdecRPoQi80yHSOL0,11955
77
77
  abstractcore/providers/ollama_provider.py,sha256=Kg5au_tia0xFTXqUlqDNrSvwVpt2lXvfnVFou9K2FGQ,34144
78
+ abstractcore/providers/openai_compatible_provider.py,sha256=PpBFOoPBMq2q2GNU0CpO_YAH1vl6MrBbapC05i1pNMA,35822
78
79
  abstractcore/providers/openai_provider.py,sha256=Y-79mAtgDiDw6SqF2LhnWtlfuC_e6TxeF_tqJWAAyWo,36364
79
- abstractcore/providers/registry.py,sha256=z0FVaucJ6KmE6QAIkskH56jV-7hodWgj_G-u5_bcgp0,19732
80
+ abstractcore/providers/registry.py,sha256=gz2fu3m7EVYHWy9Lggbmjh46abrdnxC3_73ccI_MYMg,21598
80
81
  abstractcore/providers/streaming.py,sha256=HaGkoItPWXqgml3C-KiPc0hBNpztLzjl_ooECw11BHI,31370
82
+ abstractcore/providers/vllm_provider.py,sha256=zl1utRG-G__Qh5UpgIEU-Dbb6w5LeZfiboBUC5aPeL0,33969
81
83
  abstractcore/server/__init__.py,sha256=1DSAz_YhQtnKv7sNi5TMQV8GFujctDOabgvAdilQE0o,249
82
- abstractcore/server/app.py,sha256=ajG4yfMOHjqBafquLHeLTaMmEr01RXiBRxjFRTIT2j8,96602
84
+ abstractcore/server/app.py,sha256=AYupb0rlmf4-L9xDb7qHVLdUi8lGC-jYgg1Pe_AvZ3o,97507
83
85
  abstractcore/structured/__init__.py,sha256=VXRQHGcm-iaYnLOBPin2kyhvhhQA0kaGt_pcNDGsE_8,339
84
86
  abstractcore/structured/handler.py,sha256=hcUe_fZcwx0O3msLqFiOsj6-jbq3S-ZQa9c1nRIZvuo,24622
85
87
  abstractcore/structured/retry.py,sha256=BN_PvrWybyU1clMy2cult1-TVxFSMaVqiCPmmXvA5aI,3805
@@ -98,11 +100,11 @@ abstractcore/utils/self_fixes.py,sha256=1VYxPq-q7_DtNl39NbrzUmyHpkhb9Q2SdnXUj4c0
98
100
  abstractcore/utils/structured_logging.py,sha256=Vm-HviSa42G9DJCWmaEv4a0QG3NMsADD3ictLOs4En0,19952
99
101
  abstractcore/utils/token_utils.py,sha256=eLwFmJ68p9WMFD_MHLMmeJRW6Oqx_4hKELB8FNQ2Mnk,21097
100
102
  abstractcore/utils/trace_export.py,sha256=MD1DHDWltpewy62cYzz_OSPAA6edZbZq7_pZbvxz_H8,9279
101
- abstractcore/utils/version.py,sha256=MLDRpkxfuzOdSqP9RhPQPGU3g3_lGxn36uZBllC5LY8,605
102
- abstractcore/utils/vlm_token_calculator.py,sha256=VBmIji_oiqOQ13IvVhNkb8E246tYMIXWVVOnl86Ne94,27978
103
- abstractcore-2.6.3.dist-info/licenses/LICENSE,sha256=PI2v_4HMvd6050uDD_4AY_8PzBnu2asa3RKbdDjowTA,1078
104
- abstractcore-2.6.3.dist-info/METADATA,sha256=FSrM7xWLPCdW9mtHmMDB8alRirG1oJ4ieCswyDhZXdk,43479
105
- abstractcore-2.6.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
106
- abstractcore-2.6.3.dist-info/entry_points.txt,sha256=jXNdzeltVs23A2JM2e2HOiAHldHrsnud3EvPI5VffOs,658
107
- abstractcore-2.6.3.dist-info/top_level.txt,sha256=DiNHBI35SIawW3N9Z-z0y6cQYNbXd32pvBkW0RLfScs,13
108
- abstractcore-2.6.3.dist-info/RECORD,,
103
+ abstractcore/utils/version.py,sha256=Z8uyHNTgXcYjIz0T3S0B0d_LcOXgKpyd_2WS8Rg3MtU,605
104
+ abstractcore/utils/vlm_token_calculator.py,sha256=pIDc_iwJ4IrM_e8AxY2HDU8UdTYitBOXVhu0F-EaMTY,28144
105
+ abstractcore-2.6.6.dist-info/licenses/LICENSE,sha256=PI2v_4HMvd6050uDD_4AY_8PzBnu2asa3RKbdDjowTA,1078
106
+ abstractcore-2.6.6.dist-info/METADATA,sha256=Z4_SfHiSDLDXMDGLWdsd7cZASoZg2DtXgCQfnjrB6HA,45799
107
+ abstractcore-2.6.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
108
+ abstractcore-2.6.6.dist-info/entry_points.txt,sha256=jXNdzeltVs23A2JM2e2HOiAHldHrsnud3EvPI5VffOs,658
109
+ abstractcore-2.6.6.dist-info/top_level.txt,sha256=DiNHBI35SIawW3N9Z-z0y6cQYNbXd32pvBkW0RLfScs,13
110
+ abstractcore-2.6.6.dist-info/RECORD,,