abstractcore 2.6.2__py3-none-any.whl → 2.6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -506,7 +506,17 @@ class ChatCompletionRequest(BaseModel):
506
506
  "Use 'auto' for automatic format detection based on model and user-agent.",
507
507
  example="auto"
508
508
  )
509
-
509
+
510
+ # Provider-specific parameters (AbstractCore-specific feature)
511
+ base_url: Optional[str] = Field(
512
+ default=None,
513
+ description="Base URL for the provider API endpoint (AbstractCore-specific feature). "
514
+ "Useful for openai-compatible provider to connect to custom endpoints. "
515
+ "Example: 'http://localhost:1234/v1' for LMStudio, 'http://localhost:8080/v1' for llama.cpp. "
516
+ "If not specified, uses provider's default or environment variable.",
517
+ example="http://localhost:1234/v1"
518
+ )
519
+
510
520
  class Config:
511
521
  schema_extra = {
512
522
  "examples": {
@@ -2007,7 +2017,17 @@ async def process_chat_completion(
2007
2017
  )
2008
2018
 
2009
2019
  # Create LLM instance
2010
- llm = create_llm(provider, model=model)
2020
+ # Prepare provider-specific kwargs
2021
+ provider_kwargs = {}
2022
+ if request.base_url:
2023
+ provider_kwargs["base_url"] = request.base_url
2024
+ logger.info(
2025
+ "🔗 Custom Base URL",
2026
+ request_id=request_id,
2027
+ base_url=request.base_url
2028
+ )
2029
+
2030
+ llm = create_llm(provider, model=model, **provider_kwargs)
2011
2031
 
2012
2032
  # Convert messages
2013
2033
  messages = convert_to_abstractcore_messages(processed_messages)
@@ -11,4 +11,4 @@ including when the package is installed from PyPI where pyproject.toml is not av
11
11
 
12
12
  # Package version - update this when releasing new versions
13
13
  # This must be manually synchronized with the version in pyproject.toml
14
- __version__ = "2.6.2"
14
+ __version__ = "2.6.5"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstractcore
3
- Version: 2.6.2
3
+ Version: 2.6.5
4
4
  Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
5
5
  Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
6
6
  Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
@@ -47,6 +47,8 @@ Provides-Extra: mlx
47
47
  Requires-Dist: mlx<1.0.0,>=0.15.0; extra == "mlx"
48
48
  Requires-Dist: mlx-lm<1.0.0,>=0.15.0; extra == "mlx"
49
49
  Requires-Dist: outlines>=0.1.0; extra == "mlx"
50
+ Provides-Extra: vllm
51
+ Requires-Dist: vllm<1.0.0,>=0.6.0; extra == "vllm"
50
52
  Provides-Extra: embeddings
51
53
  Requires-Dist: sentence-transformers<6.0.0,>=5.1.0; extra == "embeddings"
52
54
  Requires-Dist: numpy<2.0.0,>=1.20.0; extra == "embeddings"
@@ -67,18 +69,26 @@ Provides-Extra: api-providers
67
69
  Requires-Dist: abstractcore[anthropic,openai]; extra == "api-providers"
68
70
  Provides-Extra: local-providers
69
71
  Requires-Dist: abstractcore[lmstudio,mlx,ollama]; extra == "local-providers"
70
- Provides-Extra: local-providers-non-mlx
71
- Requires-Dist: abstractcore[lmstudio,ollama]; extra == "local-providers-non-mlx"
72
+ Provides-Extra: local-providers-apple
73
+ Requires-Dist: abstractcore[lmstudio,mlx,ollama]; extra == "local-providers-apple"
74
+ Provides-Extra: local-providers-gpu
75
+ Requires-Dist: abstractcore[lmstudio,ollama,vllm]; extra == "local-providers-gpu"
76
+ Provides-Extra: gpu-providers
77
+ Requires-Dist: abstractcore[huggingface,vllm]; extra == "gpu-providers"
72
78
  Provides-Extra: heavy-providers
73
79
  Requires-Dist: abstractcore[huggingface]; extra == "heavy-providers"
74
80
  Provides-Extra: all-providers
75
- Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,mlx,ollama,openai]; extra == "all-providers"
76
- Provides-Extra: all-providers-non-mlx
77
- Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,ollama,openai]; extra == "all-providers-non-mlx"
81
+ Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,mlx,ollama,openai,vllm]; extra == "all-providers"
82
+ Provides-Extra: all-providers-apple
83
+ Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,mlx,ollama,openai]; extra == "all-providers-apple"
84
+ Provides-Extra: all-providers-gpu
85
+ Requires-Dist: abstractcore[anthropic,embeddings,huggingface,lmstudio,ollama,openai,vllm]; extra == "all-providers-gpu"
78
86
  Provides-Extra: all
79
- Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,mlx,ollama,openai,processing,server,test,tools]; extra == "all"
80
- Provides-Extra: all-non-mlx
81
- Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,ollama,openai,processing,server,test,tools]; extra == "all-non-mlx"
87
+ Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,mlx,ollama,openai,processing,server,test,tools,vllm]; extra == "all"
88
+ Provides-Extra: all-apple
89
+ Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,mlx,ollama,openai,processing,server,test,tools]; extra == "all-apple"
90
+ Provides-Extra: all-gpu
91
+ Requires-Dist: abstractcore[anthropic,compression,dev,docs,embeddings,huggingface,lmstudio,media,ollama,openai,processing,server,test,tools,vllm]; extra == "all-gpu"
82
92
  Provides-Extra: lightweight
83
93
  Requires-Dist: abstractcore[anthropic,compression,embeddings,lmstudio,media,ollama,openai,processing,server,tools]; extra == "lightweight"
84
94
  Provides-Extra: dev
@@ -480,8 +490,9 @@ if response.metadata and response.metadata.get('compression_used'):
480
490
  ## Key Features
481
491
 
482
492
  - **Offline-First Design**: Built primarily for open source LLMs with full offline capability. Download once, run forever without internet access
483
- - **Provider Agnostic**: Seamlessly switch between OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace
484
- - **Async/Await Support** ⭐ NEW in v2.6.0: Native async support for concurrent requests with `asyncio.gather()` - works with all 6 providers
493
+ - **Provider Agnostic**: Seamlessly switch between OpenAI, Anthropic, Ollama, LMStudio, MLX, HuggingFace, vLLM, and any OpenAI-compatible endpoint
494
+ - **Async/Await Support** ⭐ NEW in v2.6.0: Native async support for concurrent requests with `asyncio.gather()` - works with all providers
495
+ - **Dynamic Endpoint Configuration** ⭐ NEW in v2.6.5: Pass `base_url` in POST requests to connect to custom OpenAI-compatible endpoints without environment variables
485
496
  - **Interaction Tracing**: Complete LLM observability with programmatic access to prompts, responses, tokens, timing, and trace correlation for debugging, trust, and compliance
486
497
  - **Glyph Visual-Text Compression**: Revolutionary compression system that renders text as optimized images for 3-4x token compression and faster inference
487
498
  - **Centralized Configuration**: Global defaults and app-specific preferences at `~/.abstractcore/config/abstractcore.json`
@@ -498,14 +509,16 @@ if response.metadata and response.metadata.get('compression_used'):
498
509
 
499
510
  ## Supported Providers
500
511
 
501
- | Provider | Status | SEED Support | Setup |
502
- |----------|--------|-------------|-------|
503
- | **OpenAI** | Full | Native | [Get API key](docs/prerequisites.md#openai-setup) |
504
- | **Anthropic** | Full | Warning* | [Get API key](docs/prerequisites.md#anthropic-setup) |
505
- | **Ollama** | Full | Native | [Install guide](docs/prerequisites.md#ollama-setup) |
506
- | **LMStudio** | Full | Native | [Install guide](docs/prerequisites.md#lmstudio-setup) |
507
- | **MLX** | Full | Native | [Setup guide](docs/prerequisites.md#mlx-setup) |
508
- | **HuggingFace** | Full | Native | [Setup guide](docs/prerequisites.md#huggingface-setup) |
512
+ | Provider | Status | SEED Support | Hardware | Setup |
513
+ |----------|--------|-------------|----------|-------|
514
+ | **OpenAI** | Full | Native | Any | [Get API key](docs/prerequisites.md#openai-setup) |
515
+ | **Anthropic** | Full | Warning* | Any | [Get API key](docs/prerequisites.md#anthropic-setup) |
516
+ | **Ollama** | Full | Native | Any | [Install guide](docs/prerequisites.md#ollama-setup) |
517
+ | **LMStudio** | Full | Native | Any | [Install guide](docs/prerequisites.md#lmstudio-setup) |
518
+ | **MLX** | Full | Native | **Apple Silicon only** | [Setup guide](docs/prerequisites.md#mlx-setup) |
519
+ | **HuggingFace** | Full | Native | Any | [Setup guide](docs/prerequisites.md#huggingface-setup) |
520
+ | **vLLM** | Full | Native | **NVIDIA CUDA only** | [Setup guide](docs/prerequisites.md#vllm-setup) |
521
+ | **OpenAI-Compatible** ⭐ NEW | Full | Native | Any | Works with llama.cpp, text-generation-webui, LocalAI, etc. |
509
522
 
510
523
  *Anthropic doesn't support seed parameters but issues a warning when provided. Use `temperature=0.0` for more consistent outputs.
511
524
 
@@ -1024,7 +1037,8 @@ pip install abstractcore[anthropic]
1024
1037
  pip install abstractcore[ollama]
1025
1038
  pip install abstractcore[lmstudio]
1026
1039
  pip install abstractcore[huggingface]
1027
- pip install abstractcore[mlx] # macOS/Apple Silicon only
1040
+ pip install abstractcore[mlx] # macOS/Apple Silicon only
1041
+ pip install abstractcore[vllm] # NVIDIA CUDA only (Linux)
1028
1042
 
1029
1043
  # With server support
1030
1044
  pip install abstractcore[server]
@@ -1032,20 +1046,26 @@ pip install abstractcore[server]
1032
1046
  # With embeddings
1033
1047
  pip install abstractcore[embeddings]
1034
1048
 
1035
- # Everything (recommended)
1049
+ # Everything (recommended for Apple Silicon)
1036
1050
  pip install abstractcore[all]
1037
1051
 
1038
- # Cross-platform (all except MLX - for Linux/Windows)
1052
+ # Cross-platform (all except MLX/vLLM - for Linux/Windows/Intel Mac)
1039
1053
  pip install abstractcore[all-non-mlx]
1040
1054
 
1041
1055
  # Provider groups
1042
- pip install abstractcore[all-providers] # All providers (includes MLX)
1043
- pip install abstractcore[all-providers-non-mlx] # All providers except MLX
1056
+ pip install abstractcore[all-providers] # All providers (includes MLX, excludes vLLM)
1057
+ pip install abstractcore[all-providers-non-mlx] # All providers except MLX (excludes vLLM)
1044
1058
  pip install abstractcore[local-providers] # Ollama, LMStudio, MLX
1045
1059
  pip install abstractcore[local-providers-non-mlx] # Ollama, LMStudio only
1046
1060
  pip install abstractcore[api-providers] # OpenAI, Anthropic
1061
+ pip install abstractcore[gpu-providers] # vLLM (NVIDIA CUDA only)
1047
1062
  ```
1048
1063
 
1064
+ **Hardware-Specific Notes:**
1065
+ - **MLX**: Requires Apple Silicon (M1/M2/M3/M4). Will not work on Intel Macs or other platforms.
1066
+ - **vLLM**: Requires NVIDIA GPUs with CUDA support. Will not work on Apple Silicon, AMD GPUs, or Intel integrated graphics.
1067
+ - **All other providers** (OpenAI, Anthropic, Ollama, LMStudio, HuggingFace): Work on any hardware.
1068
+
1049
1069
  **Media processing extras:**
1050
1070
  ```bash
1051
1071
  # For PDF processing
@@ -6,7 +6,7 @@ abstractcore/apps/app_config_utils.py,sha256=5GIvXnD996LFIV3-BpfkqII6UqYlStm7ZCg
6
6
  abstractcore/apps/deepsearch.py,sha256=UlmuBS9T4yNsz0V_iY08GNNDTstsI5OJNNV6c8CU6AE,23191
7
7
  abstractcore/apps/extractor.py,sha256=OfiqB9l_alH9xCGb6zOD__QJkDjdKOlLZngriVgmn7c,23749
8
8
  abstractcore/apps/intent.py,sha256=5ie_H9_K_ZxlA0oCu7ROUrsgwfzDNFgVUyBNec6YVRE,22813
9
- abstractcore/apps/judge.py,sha256=nOgxvn-BbhNY6xU9AlTeD1yidTh73AiVlSN7hQCVE2M,23169
9
+ abstractcore/apps/judge.py,sha256=ZoBRGYjM24TrDALwV7MMDO4Cg2pGPtwRMXX5WyFhdVs,23840
10
10
  abstractcore/apps/summarizer.py,sha256=9aD6KH21w-tv_wGp9MaO2uyJuaU71OemW7KpqrG5t6w,14669
11
11
  abstractcore/architectures/__init__.py,sha256=-4JucAM7JkMWShWKkePoclxrUHRKgaG36UTguJihE0U,1046
12
12
  abstractcore/architectures/detection.py,sha256=jmpD04xcKotWCW7--jadBzCtD2a5dYJi1zljpxB9JmU,19813
@@ -35,7 +35,7 @@ abstractcore/core/enums.py,sha256=BhkVnHC-X1_377JDmqd-2mnem9GdBLqixWlYzlP_FJU,69
35
35
  abstractcore/core/factory.py,sha256=ec7WGW2JKK-dhDplziTAeRkebEUFymtEEZ_bS5qkpqY,2798
36
36
  abstractcore/core/interface.py,sha256=-VAY0nlsTnWN_WghiuMC7iE7xUdZfYOg6KlgrAPi14Y,14086
37
37
  abstractcore/core/retry.py,sha256=xP38rabBqJImZ-yg60L5mKeg80ATvxmLG5Yp6lCeTpk,14566
38
- abstractcore/core/session.py,sha256=n9StBlMhSlETlEqQ401PpM8lK0W2ycCP4Zwrywl4Mhs,46147
38
+ abstractcore/core/session.py,sha256=pgiwwgfpgBovwqJ0RkWRsS5TbroTvQEB0jkZnzdhhCY,47278
39
39
  abstractcore/core/types.py,sha256=jj44i07kMjdg9FQ3mA_fK6r_M0Lcgt1RQpy1Ra5w-eI,4578
40
40
  abstractcore/embeddings/__init__.py,sha256=hR3xZyqcRm4c2pq1dIa5lxj_-Bk70Zad802JQN4joWo,637
41
41
  abstractcore/embeddings/manager.py,sha256=bisyQJquM1HLQor8ZAfO9V_XWWHw0b4PjyAz9g7sS-4,52273
@@ -65,9 +65,9 @@ abstractcore/processing/__init__.py,sha256=QcACEnhnHKYCkFL1LNOW_uqBrwkTAmz5A61N4
65
65
  abstractcore/processing/basic_deepsearch.py,sha256=dzJQtH4k44XY9tvG0Z4JIlYt_s7HpbLdSPScha-t7vk,101036
66
66
  abstractcore/processing/basic_extractor.py,sha256=3x-3BdIHgLvqLnLF6K1-P4qVaLIpAnNIIutaJi7lDQM,49832
67
67
  abstractcore/processing/basic_intent.py,sha256=wD99Z7fE2RiYk6oyTZXojUbv-bz8HhKFIuIHYLLTw54,32455
68
- abstractcore/processing/basic_judge.py,sha256=tKWJrg_tY4vCHzWgXxz0ZjgLXBYYfpMcpG7vl03hJcM,32218
68
+ abstractcore/processing/basic_judge.py,sha256=L1fc9H0-_88B1TULL-mlaNL7OydMgp-ru_zzzoGdr38,37220
69
69
  abstractcore/processing/basic_summarizer.py,sha256=XHNxMQ_8aLStTeUo6_2JaThlct12Htpz7ORmm0iuJsg,25495
70
- abstractcore/providers/__init__.py,sha256=O7gmT4p_jbzMjoZPhi_6RIMHQm-IMFX1XfcgySz3DSQ,1729
70
+ abstractcore/providers/__init__.py,sha256=dNz-KrUwpBZhEv6DkAe3t_V8w40_HjeME5j9VL0lDyo,1886
71
71
  abstractcore/providers/anthropic_provider.py,sha256=0-qZb0Es6-VLuVVl2j7IUjOuyRlgjQdJFulWfpi4qb4,31740
72
72
  abstractcore/providers/base.py,sha256=nWF1pxeUlT4ozlUqKG0rWOmLkfo-zQgfU7fv3AUSI08,68452
73
73
  abstractcore/providers/huggingface_provider.py,sha256=v4UUmODrnWKtTygzPh-lm4jSCAPms5VYJE5v7PWB4Lo,79458
@@ -75,11 +75,13 @@ abstractcore/providers/lmstudio_provider.py,sha256=92_vx7AVVt_oufJdHo3R0D_V2qyTK
75
75
  abstractcore/providers/mlx_provider.py,sha256=afLCEwuw7r8OK4fD3OriyKMcWpxVIob_37ItmgAclfc,23123
76
76
  abstractcore/providers/model_capabilities.py,sha256=C4HIgvNTp9iIPiDeWyXo7vdzRkMdecRPoQi80yHSOL0,11955
77
77
  abstractcore/providers/ollama_provider.py,sha256=Kg5au_tia0xFTXqUlqDNrSvwVpt2lXvfnVFou9K2FGQ,34144
78
+ abstractcore/providers/openai_compatible_provider.py,sha256=PpBFOoPBMq2q2GNU0CpO_YAH1vl6MrBbapC05i1pNMA,35822
78
79
  abstractcore/providers/openai_provider.py,sha256=Y-79mAtgDiDw6SqF2LhnWtlfuC_e6TxeF_tqJWAAyWo,36364
79
- abstractcore/providers/registry.py,sha256=z0FVaucJ6KmE6QAIkskH56jV-7hodWgj_G-u5_bcgp0,19732
80
+ abstractcore/providers/registry.py,sha256=gz2fu3m7EVYHWy9Lggbmjh46abrdnxC3_73ccI_MYMg,21598
80
81
  abstractcore/providers/streaming.py,sha256=HaGkoItPWXqgml3C-KiPc0hBNpztLzjl_ooECw11BHI,31370
82
+ abstractcore/providers/vllm_provider.py,sha256=zl1utRG-G__Qh5UpgIEU-Dbb6w5LeZfiboBUC5aPeL0,33969
81
83
  abstractcore/server/__init__.py,sha256=1DSAz_YhQtnKv7sNi5TMQV8GFujctDOabgvAdilQE0o,249
82
- abstractcore/server/app.py,sha256=ajG4yfMOHjqBafquLHeLTaMmEr01RXiBRxjFRTIT2j8,96602
84
+ abstractcore/server/app.py,sha256=AYupb0rlmf4-L9xDb7qHVLdUi8lGC-jYgg1Pe_AvZ3o,97507
83
85
  abstractcore/structured/__init__.py,sha256=VXRQHGcm-iaYnLOBPin2kyhvhhQA0kaGt_pcNDGsE_8,339
84
86
  abstractcore/structured/handler.py,sha256=hcUe_fZcwx0O3msLqFiOsj6-jbq3S-ZQa9c1nRIZvuo,24622
85
87
  abstractcore/structured/retry.py,sha256=BN_PvrWybyU1clMy2cult1-TVxFSMaVqiCPmmXvA5aI,3805
@@ -98,11 +100,11 @@ abstractcore/utils/self_fixes.py,sha256=1VYxPq-q7_DtNl39NbrzUmyHpkhb9Q2SdnXUj4c0
98
100
  abstractcore/utils/structured_logging.py,sha256=Vm-HviSa42G9DJCWmaEv4a0QG3NMsADD3ictLOs4En0,19952
99
101
  abstractcore/utils/token_utils.py,sha256=eLwFmJ68p9WMFD_MHLMmeJRW6Oqx_4hKELB8FNQ2Mnk,21097
100
102
  abstractcore/utils/trace_export.py,sha256=MD1DHDWltpewy62cYzz_OSPAA6edZbZq7_pZbvxz_H8,9279
101
- abstractcore/utils/version.py,sha256=5iDEPQNqduswVtnrBoEL5IucdXpHJ7JSu98Ep1PMYlM,605
103
+ abstractcore/utils/version.py,sha256=bw_xqN48f_FOq8CRO369DqSiwKLeZ7sT8x-9FwxKe7Y,605
102
104
  abstractcore/utils/vlm_token_calculator.py,sha256=VBmIji_oiqOQ13IvVhNkb8E246tYMIXWVVOnl86Ne94,27978
103
- abstractcore-2.6.2.dist-info/licenses/LICENSE,sha256=PI2v_4HMvd6050uDD_4AY_8PzBnu2asa3RKbdDjowTA,1078
104
- abstractcore-2.6.2.dist-info/METADATA,sha256=ULSMViuMcjCj-wIgJJ_AksUq0BFRjf33L-vpWxo-iJ0,43479
105
- abstractcore-2.6.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
106
- abstractcore-2.6.2.dist-info/entry_points.txt,sha256=jXNdzeltVs23A2JM2e2HOiAHldHrsnud3EvPI5VffOs,658
107
- abstractcore-2.6.2.dist-info/top_level.txt,sha256=DiNHBI35SIawW3N9Z-z0y6cQYNbXd32pvBkW0RLfScs,13
108
- abstractcore-2.6.2.dist-info/RECORD,,
105
+ abstractcore-2.6.5.dist-info/licenses/LICENSE,sha256=PI2v_4HMvd6050uDD_4AY_8PzBnu2asa3RKbdDjowTA,1078
106
+ abstractcore-2.6.5.dist-info/METADATA,sha256=BAyyNyC_zudt7B3Oqc0eVczlzvuW9_5fV6BA-ay9Hic,45179
107
+ abstractcore-2.6.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
108
+ abstractcore-2.6.5.dist-info/entry_points.txt,sha256=jXNdzeltVs23A2JM2e2HOiAHldHrsnud3EvPI5VffOs,658
109
+ abstractcore-2.6.5.dist-info/top_level.txt,sha256=DiNHBI35SIawW3N9Z-z0y6cQYNbXd32pvBkW0RLfScs,13
110
+ abstractcore-2.6.5.dist-info/RECORD,,