lionagi 0.15.13__py3-none-any.whl → 0.15.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/config.py +1 -0
- lionagi/service/connections/match_endpoint.py +9 -0
- lionagi/service/connections/providers/nvidia_nim_.py +100 -0
- lionagi/version.py +1 -1
- {lionagi-0.15.13.dist-info → lionagi-0.15.14.dist-info}/METADATA +1 -1
- {lionagi-0.15.13.dist-info → lionagi-0.15.14.dist-info}/RECORD +8 -7
- {lionagi-0.15.13.dist-info → lionagi-0.15.14.dist-info}/WHEEL +0 -0
- {lionagi-0.15.13.dist-info → lionagi-0.15.14.dist-info}/licenses/LICENSE +0 -0
lionagi/config.py
CHANGED
@@ -62,6 +62,7 @@ class AppSettings(BaseSettings, frozen=True):
|
|
62
62
|
PERPLEXITY_API_KEY: SecretStr | None = None
|
63
63
|
GROQ_API_KEY: SecretStr | None = None
|
64
64
|
ANTHROPIC_API_KEY: SecretStr | None = None
|
65
|
+
NVIDIA_NIM_API_KEY: SecretStr | None = None
|
65
66
|
|
66
67
|
OPENAI_DEFAULT_MODEL: str = "gpt-4.1-mini"
|
67
68
|
|
@@ -47,6 +47,15 @@ def match_endpoint(
|
|
47
47
|
from .providers.perplexity_ import PerplexityChatEndpoint
|
48
48
|
|
49
49
|
return PerplexityChatEndpoint(None, **kwargs)
|
50
|
+
if provider == "nvidia_nim":
|
51
|
+
if "embed" in endpoint:
|
52
|
+
from .providers.nvidia_nim_ import NvidiaNimEmbedEndpoint
|
53
|
+
|
54
|
+
return NvidiaNimEmbedEndpoint(None, **kwargs)
|
55
|
+
if "chat" in endpoint or "completion" in endpoint:
|
56
|
+
from .providers.nvidia_nim_ import NvidiaNimChatEndpoint
|
57
|
+
|
58
|
+
return NvidiaNimChatEndpoint(None, **kwargs)
|
50
59
|
if provider == "claude_code":
|
51
60
|
if "cli" in endpoint:
|
52
61
|
from .providers.claude_code_cli import ClaudeCodeCLIEndpoint
|
@@ -0,0 +1,100 @@
|
|
1
|
+
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
"""
|
6
|
+
NVIDIA NIM endpoint configurations.
|
7
|
+
|
8
|
+
This module provides endpoint configurations for NVIDIA NIM (NVIDIA Inference Microservices),
|
9
|
+
which offers GPU-accelerated inference for various AI models through an OpenAI-compatible API.
|
10
|
+
|
11
|
+
NVIDIA NIM features:
|
12
|
+
- OpenAI-compatible API endpoints
|
13
|
+
- GPU-accelerated inference
|
14
|
+
- Support for various open-source models (Llama, Mistral, etc.)
|
15
|
+
- Both cloud-hosted and self-hosted options
|
16
|
+
- Free tier with 1000 credits for development
|
17
|
+
|
18
|
+
API Documentation: https://docs.nvidia.com/nim/
|
19
|
+
Build Portal: https://build.nvidia.com/
|
20
|
+
"""
|
21
|
+
|
22
|
+
from lionagi.config import settings
|
23
|
+
from lionagi.service.connections.endpoint import Endpoint
|
24
|
+
from lionagi.service.connections.endpoint_config import EndpointConfig
|
25
|
+
|
26
|
+
__all__ = (
|
27
|
+
"NvidiaNimChatEndpoint",
|
28
|
+
"NvidiaNimEmbedEndpoint",
|
29
|
+
"NVIDIA_NIM_CHAT_ENDPOINT_CONFIG",
|
30
|
+
"NVIDIA_NIM_EMBED_ENDPOINT_CONFIG",
|
31
|
+
)
|
32
|
+
|
33
|
+
|
34
|
+
def _get_nvidia_nim_config(**kwargs):
|
35
|
+
"""Create NVIDIA NIM endpoint configuration with defaults.
|
36
|
+
|
37
|
+
NVIDIA NIM uses the integrate.api.nvidia.com endpoint for cloud-hosted models.
|
38
|
+
Authentication is via bearer token (API key from build.nvidia.com).
|
39
|
+
"""
|
40
|
+
config = dict(
|
41
|
+
name="nvidia_nim_chat",
|
42
|
+
provider="nvidia_nim",
|
43
|
+
base_url="https://integrate.api.nvidia.com/v1",
|
44
|
+
endpoint="chat/completions",
|
45
|
+
kwargs={"model": "meta/llama3-8b-instruct"}, # Default model
|
46
|
+
api_key=settings.NVIDIA_NIM_API_KEY or "dummy-key-for-testing",
|
47
|
+
auth_type="bearer",
|
48
|
+
content_type="application/json",
|
49
|
+
method="POST",
|
50
|
+
requires_tokens=True,
|
51
|
+
# OpenAI-compatible format
|
52
|
+
)
|
53
|
+
config.update(kwargs)
|
54
|
+
return EndpointConfig(**config)
|
55
|
+
|
56
|
+
|
57
|
+
# Chat endpoint configuration
|
58
|
+
NVIDIA_NIM_CHAT_ENDPOINT_CONFIG = _get_nvidia_nim_config()
|
59
|
+
|
60
|
+
# Embedding endpoint configuration
|
61
|
+
# Note: You'll need to verify which embedding models are available on NVIDIA NIM
|
62
|
+
NVIDIA_NIM_EMBED_ENDPOINT_CONFIG = _get_nvidia_nim_config(
|
63
|
+
name="nvidia_nim_embed",
|
64
|
+
endpoint="embeddings",
|
65
|
+
kwargs={"model": "nvidia/nv-embed-v1"}, # Example embedding model
|
66
|
+
)
|
67
|
+
|
68
|
+
|
69
|
+
class NvidiaNimChatEndpoint(Endpoint):
|
70
|
+
"""NVIDIA NIM chat completion endpoint.
|
71
|
+
|
72
|
+
Supports various open-source models including:
|
73
|
+
- meta/llama3-8b-instruct
|
74
|
+
- meta/llama3-70b-instruct
|
75
|
+
- meta/llama3.1-405b-instruct
|
76
|
+
- mistralai/mixtral-8x7b-instruct-v0.1
|
77
|
+
- google/gemma-7b
|
78
|
+
- And many more...
|
79
|
+
|
80
|
+
Get your API key from: https://build.nvidia.com/
|
81
|
+
"""
|
82
|
+
|
83
|
+
def __init__(self, config=None, **kwargs):
|
84
|
+
config = config or _get_nvidia_nim_config()
|
85
|
+
super().__init__(config, **kwargs)
|
86
|
+
|
87
|
+
|
88
|
+
class NvidiaNimEmbedEndpoint(Endpoint):
|
89
|
+
"""NVIDIA NIM embedding endpoint.
|
90
|
+
|
91
|
+
Note: Verify available embedding models at https://build.nvidia.com/
|
92
|
+
"""
|
93
|
+
|
94
|
+
def __init__(self, config=None, **kwargs):
|
95
|
+
config = config or _get_nvidia_nim_config(
|
96
|
+
name="nvidia_nim_embed",
|
97
|
+
endpoint="embeddings",
|
98
|
+
kwargs={"model": "nvidia/nv-embed-v1"},
|
99
|
+
)
|
100
|
+
super().__init__(config, **kwargs)
|
lionagi/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.15.
|
1
|
+
__version__ = "0.15.14"
|
@@ -2,11 +2,11 @@ lionagi/__init__.py,sha256=HZmhhYeplh06qAJQ7cW8JUlJsqG_Q4JKJ3GGFW_ZQnw,682
|
|
2
2
|
lionagi/_class_registry.py,sha256=pfUO1DjFZIqr3OwnNMkFqL_fiEBrrf8-swkGmP_KDLE,3112
|
3
3
|
lionagi/_errors.py,sha256=ia_VWhPSyr5FIJLSdPpl04SrNOLI2skN40VC8ePmzeQ,3748
|
4
4
|
lionagi/_types.py,sha256=j8XwSGeGrYwfmSJ8o-80bsfoalLWJgQH41ZkVevc4wk,75
|
5
|
-
lionagi/config.py,sha256=
|
5
|
+
lionagi/config.py,sha256=D13nnjpgJKz_LlQrzaKKVefm4hqesz_dP9ROjWmGuLE,3811
|
6
6
|
lionagi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
7
|
lionagi/settings.py,sha256=HDuKCEJCpc4HudKodBnhoQUGuTGhRHdlIFhbtf3VBtY,1633
|
8
8
|
lionagi/utils.py,sha256=7Hlt60LiShRdNPXv4y2e_xq7W2N0PM5cGRJnc8qXAVw,28660
|
9
|
-
lionagi/version.py,sha256=
|
9
|
+
lionagi/version.py,sha256=twn0Vrxaz4hLyeNEgJYUkN06H8sXuoxF6BpefwWSUTU,24
|
10
10
|
lionagi/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
11
|
lionagi/adapters/_utils.py,sha256=n4DS27CZfC-0O_UFaYtlUdjiMx9IeYsGpP7MVaFO5ZA,885
|
12
12
|
lionagi/adapters/async_postgres_adapter.py,sha256=OEJd9ie8prxRQK2_-W9qmdI3Sl6Q7xxRs7Vey16G3pQ,3172
|
@@ -203,12 +203,13 @@ lionagi/service/connections/api_calling.py,sha256=fY-fzwSJvQKpUT27TF0MTfE5TxroYK
|
|
203
203
|
lionagi/service/connections/endpoint.py,sha256=UYjryqKF4uEaGzDP9vYrav-0Xr6tDR6J9FM_kwqR87Q,14832
|
204
204
|
lionagi/service/connections/endpoint_config.py,sha256=6sA06uCzriT6p0kFxhDCFH8N6V6MVp8ytlOw5ctBhDI,5169
|
205
205
|
lionagi/service/connections/header_factory.py,sha256=IYeTQQk7r8FXcdhmW7orCxHjNO-Nb1EOXhgNK7CAp-I,1821
|
206
|
-
lionagi/service/connections/match_endpoint.py,sha256=
|
206
|
+
lionagi/service/connections/match_endpoint.py,sha256=Df5v3bprnJq5CqOzuK0KzwawOIfAsGZZM4CnE-sliu4,2850
|
207
207
|
lionagi/service/connections/providers/__init__.py,sha256=3lzOakDoBWmMaNnT2g-YwktPKa_Wme4lnPRSmOQfayY,105
|
208
208
|
lionagi/service/connections/providers/anthropic_.py,sha256=vok8mIyFiuV3K83tOjdYfruA6cv1h_57ML6RtpuW-bU,3157
|
209
209
|
lionagi/service/connections/providers/claude_code_.py,sha256=dix4VoR2YwRabJ0F8I3mV8sVtRQEquExqP6mfbE_rGk,10435
|
210
210
|
lionagi/service/connections/providers/claude_code_cli.py,sha256=kqEOnCUOOh2O_3NGi6W7r-gdLsbW-Jcp11tm30VEv4Q,4455
|
211
211
|
lionagi/service/connections/providers/exa_.py,sha256=kuWD7yyYRqIa4ChSn0TsxFA5V5LwvFUD-w8TZ6mx4rk,1048
|
212
|
+
lionagi/service/connections/providers/nvidia_nim_.py,sha256=95vmo0DSONYBVHkR9SGJ5BiHNKFZNZBrjw4_7ShOXQA,3154
|
212
213
|
lionagi/service/connections/providers/oai_.py,sha256=3x5d6Ei1hKu8Mix0N2V2K21O9dd-2jtAELHhHXj5iHk,6071
|
213
214
|
lionagi/service/connections/providers/ollama_.py,sha256=oqYLWn81KrWoQgId4e4GD_bgrDjQLPOmhqlc5uBuFGk,4569
|
214
215
|
lionagi/service/connections/providers/perplexity_.py,sha256=1GMmxAXsKGsB-xlqxO6hW-QdqoqkU2705NLyejetFSw,1646
|
@@ -235,7 +236,7 @@ lionagi/tools/types.py,sha256=XtJLY0m-Yi_ZLWhm0KycayvqMCZd--HxfQ0x9vFUYDE,230
|
|
235
236
|
lionagi/tools/file/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
|
236
237
|
lionagi/tools/file/reader.py,sha256=2YKgU3VKo76zfL_buDAUQJoPLC56f6WJ4_mdJjlMDIM,9509
|
237
238
|
lionagi/tools/memory/tools.py,sha256=earYkKxSOz_iXkqVZYTEDfE3dwZYIWPXZrqQ1DYGz4I,15941
|
238
|
-
lionagi-0.15.
|
239
|
-
lionagi-0.15.
|
240
|
-
lionagi-0.15.
|
241
|
-
lionagi-0.15.
|
239
|
+
lionagi-0.15.14.dist-info/METADATA,sha256=B9IMnGWdUDI43F5aDYmhldz7LynHcJ8mjRp_2xubKgk,23052
|
240
|
+
lionagi-0.15.14.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
241
|
+
lionagi-0.15.14.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
|
242
|
+
lionagi-0.15.14.dist-info/RECORD,,
|
File without changes
|
File without changes
|