multillm-core 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
multillm/__init__.py ADDED
@@ -0,0 +1,13 @@
1
+ from .exceptions import MultiLLMError
2
+ from .factory import create_client
3
+ from .providers.base import BaseLLMClient
4
+ from .schemas import LLMResponse
5
+ from .version import __version__
6
+
7
+ __all__ = [
8
+ "create_client",
9
+ "BaseLLMClient",
10
+ "LLMResponse",
11
+ "MultiLLMError",
12
+ "__version__",
13
+ ]
multillm/exceptions.py ADDED
@@ -0,0 +1,46 @@
1
+ class MultiLLMError(Exception):
2
+ """Base exception for all multillm errors."""
3
+
4
+ pass
5
+
6
+
7
+ class ProviderNotFoundError(MultiLLMError):
8
+ """Raised when a requested provider is not found."""
9
+
10
+ pass
11
+
12
+
13
+ class SDKNotInstalledError(MultiLLMError):
14
+ """Raised when a provider's SDK is not installed."""
15
+
16
+ def __init__(self, provider: str, package_name: str):
17
+ self.provider = provider
18
+ self.package_name = package_name
19
+ super().__init__(
20
+ f"SDK for '{provider}' is not installed. Please install it using: "
21
+ f"pip install 'multillm-core[{package_name}]'"
22
+ )
23
+
24
+
25
+ class APIError(MultiLLMError):
26
+ """Base for provider-side API errors."""
27
+
28
+ pass
29
+
30
+
31
+ class AuthenticationError(APIError):
32
+ """Raised when authentication fails."""
33
+
34
+ pass
35
+
36
+
37
+ class RateLimitError(APIError):
38
+ """Raised when rate limits are exceeded."""
39
+
40
+ pass
41
+
42
+
43
+ class InvalidRequestError(APIError):
44
+ """Raised when the request is invalid."""
45
+
46
+ pass
multillm/factory.py ADDED
@@ -0,0 +1,39 @@
1
+ from typing import Any
2
+
3
+ from .exceptions import ProviderNotFoundError
4
+ from .providers.anthropic import AnthropicClient
5
+ from .providers.base import BaseLLMClient
6
+ from .providers.gemini import GeminiClient
7
+ from .providers.openai import OpenAIClient
8
+
9
+ PROVIDER_MAP: dict[str, type[BaseLLMClient]] = {
10
+ "openai": OpenAIClient,
11
+ "anthropic": AnthropicClient,
12
+ "gemini": GeminiClient,
13
+ "google": GeminiClient,
14
+ }
15
+
16
+
17
+ def create_client(provider: str, **kwargs: Any) -> BaseLLMClient:
18
+ """
19
+ Factory function to create an LLM client.
20
+
21
+ Args:
22
+ provider: Name of the provider ('openai', 'anthropic', 'gemini').
23
+ **kwargs: Arguments passed to the provider client constructor (e.g., api_key).
24
+
25
+ Returns:
26
+ An instance of BaseLLMClient.
27
+
28
+ Raises:
29
+ ProviderNotFoundError: If the provider is not supported.
30
+ """
31
+ provider_key = provider.lower()
32
+ if provider_key not in PROVIDER_MAP:
33
+ raise ProviderNotFoundError(
34
+ f"Provider '{provider}' is not supported. "
35
+ f"Available providers: {', '.join(PROVIDER_MAP.keys())}"
36
+ )
37
+
38
+ client_cls = PROVIDER_MAP[provider_key]
39
+ return client_cls(**kwargs)
@@ -0,0 +1,3 @@
1
+ from .base import BaseLLMClient
2
+
3
+ __all__ = ["BaseLLMClient"]
@@ -0,0 +1,59 @@
1
+ from typing import Any, NoReturn, Optional
2
+
3
+ from ..exceptions import (
4
+ APIError,
5
+ AuthenticationError,
6
+ InvalidRequestError,
7
+ RateLimitError,
8
+ SDKNotInstalledError,
9
+ )
10
+ from ..schemas import LLMResponse, TokenUsage
11
+ from .base import BaseLLMClient
12
+
13
+
14
+ class AnthropicClient(BaseLLMClient):
15
+ """Anthropic provider client."""
16
+
17
+ def __init__(self, api_key: Optional[str] = None, **kwargs: Any):
18
+ try:
19
+ import anthropic
20
+
21
+ self._client = anthropic.AsyncAnthropic(api_key=api_key, **kwargs)
22
+ except ImportError:
23
+ raise SDKNotInstalledError("anthropic", "anthropic")
24
+
25
+ async def generate(self, model: str, prompt: str, **kwargs: Any) -> LLMResponse:
26
+ try:
27
+ response = await self._client.messages.create(
28
+ model=model,
29
+ max_tokens=kwargs.get("max_tokens", 4096),
30
+ messages=[{"role": "user", "content": prompt}],
31
+ **{k: v for k, v in kwargs.items() if k != "max_tokens"},
32
+ )
33
+
34
+ text = response.content[0].text
35
+
36
+ usage = TokenUsage(
37
+ input_tokens=response.usage.input_tokens,
38
+ output_tokens=response.usage.output_tokens,
39
+ total_tokens=response.usage.input_tokens + response.usage.output_tokens,
40
+ )
41
+
42
+ return LLMResponse(
43
+ text=text, provider="anthropic", model=model, usage=usage, raw=response
44
+ )
45
+ except Exception as e:
46
+ return self._handle_error(e)
47
+
48
+ def _handle_error(self, e: Exception) -> NoReturn:
49
+ import anthropic
50
+
51
+ if isinstance(e, anthropic.AuthenticationError):
52
+ raise AuthenticationError(str(e))
53
+ if isinstance(e, anthropic.RateLimitError):
54
+ raise RateLimitError(str(e))
55
+ if isinstance(e, anthropic.BadRequestError):
56
+ raise InvalidRequestError(str(e))
57
+ if isinstance(e, anthropic.APIError):
58
+ raise APIError(str(e))
59
+ raise APIError(f"Unexpected Anthropic error: {str(e)}")
@@ -0,0 +1,42 @@
1
+ from abc import ABC, abstractmethod
2
+ from collections.abc import AsyncGenerator
3
+ from typing import Any
4
+
5
+ from ..schemas import LLMResponse
6
+
7
+
8
+ class BaseLLMClient(ABC):
9
+ """Base class for all LLM provider clients."""
10
+
11
+ @abstractmethod
12
+ async def generate(self, model: str, prompt: str, **kwargs: Any) -> LLMResponse:
13
+ """
14
+ Generate a response from the LLM.
15
+
16
+ Args:
17
+ model: The model name to use.
18
+ prompt: The input text prompt.
19
+ **kwargs: Provider-specific arguments.
20
+
21
+ Returns:
22
+ LLMResponse object.
23
+ """
24
+ pass
25
+
26
+ async def stream(
27
+ self, model: str, prompt: str, **kwargs: Any
28
+ ) -> AsyncGenerator[str, None]:
29
+ """
30
+ Stream back the response from the LLM.
31
+
32
+ Args:
33
+ model: The model name to use.
34
+ prompt: The input text prompt.
35
+ **kwargs: Provider-specific arguments.
36
+
37
+ Yields:
38
+ Response chunks text.
39
+ """
40
+ raise NotImplementedError(
41
+ f"Streaming is not implemented for {self.__class__.__name__}"
42
+ )
@@ -0,0 +1,46 @@
1
+ from typing import Any, Optional
2
+
3
+ from ..exceptions import APIError, SDKNotInstalledError
4
+ from ..schemas import LLMResponse, TokenUsage
5
+ from .base import BaseLLMClient
6
+
7
+
8
+ class GeminiClient(BaseLLMClient):
9
+ """Google Gemini provider client."""
10
+
11
+ def __init__(self, api_key: Optional[str] = None, **kwargs: Any):
12
+ try:
13
+ from google import genai
14
+
15
+ self._client = genai.Client(api_key=api_key, **kwargs)
16
+ except ImportError:
17
+ raise SDKNotInstalledError("google-genai", "gemini")
18
+
19
+ async def generate(self, model: str, prompt: str, **kwargs: Any) -> LLMResponse:
20
+ try:
21
+ # The new SDK uses client.aio.models.generate_content for async
22
+ response = await self._client.aio.models.generate_content(
23
+ model=model, contents=prompt, **kwargs
24
+ )
25
+
26
+ text = response.text or ""
27
+ usage = TokenUsage()
28
+
29
+ if hasattr(response, "usage_metadata") and response.usage_metadata:
30
+ usage = TokenUsage(
31
+ input_tokens=getattr(
32
+ response.usage_metadata, "prompt_token_count", 0
33
+ ),
34
+ output_tokens=getattr(
35
+ response.usage_metadata, "candidates_token_count", 0
36
+ ),
37
+ total_tokens=getattr(
38
+ response.usage_metadata, "total_token_count", 0
39
+ ),
40
+ )
41
+
42
+ return LLMResponse(
43
+ text=text, provider="gemini", model=model, usage=usage, raw=response
44
+ )
45
+ except Exception as e:
46
+ raise APIError(f"Gemini error: {str(e)}")
@@ -0,0 +1,57 @@
1
+ from typing import Any, NoReturn, Optional
2
+
3
+ from ..exceptions import (
4
+ APIError,
5
+ AuthenticationError,
6
+ InvalidRequestError,
7
+ RateLimitError,
8
+ SDKNotInstalledError,
9
+ )
10
+ from ..schemas import LLMResponse, TokenUsage
11
+ from .base import BaseLLMClient
12
+
13
+
14
+ class OpenAIClient(BaseLLMClient):
15
+ """OpenAI provider client."""
16
+
17
+ def __init__(self, api_key: Optional[str] = None, **kwargs: Any):
18
+ try:
19
+ import openai
20
+
21
+ self._client = openai.AsyncOpenAI(api_key=api_key, **kwargs)
22
+ except ImportError:
23
+ raise SDKNotInstalledError("openai", "openai")
24
+
25
+ async def generate(self, model: str, prompt: str, **kwargs: Any) -> LLMResponse:
26
+ try:
27
+ response = await self._client.chat.completions.create(
28
+ model=model, messages=[{"role": "user", "content": prompt}], **kwargs
29
+ )
30
+
31
+ choice = response.choices[0]
32
+ text = choice.message.content or ""
33
+
34
+ usage = TokenUsage(
35
+ input_tokens=response.usage.prompt_tokens,
36
+ output_tokens=response.usage.completion_tokens,
37
+ total_tokens=response.usage.total_tokens,
38
+ )
39
+
40
+ return LLMResponse(
41
+ text=text, provider="openai", model=model, usage=usage, raw=response
42
+ )
43
+ except Exception as e:
44
+ return self._handle_error(e)
45
+
46
+ def _handle_error(self, e: Exception) -> NoReturn:
47
+ import openai
48
+
49
+ if isinstance(e, openai.AuthenticationError):
50
+ raise AuthenticationError(str(e))
51
+ if isinstance(e, openai.RateLimitError):
52
+ raise RateLimitError(str(e))
53
+ if isinstance(e, openai.BadRequestError):
54
+ raise InvalidRequestError(str(e))
55
+ if isinstance(e, openai.APIError):
56
+ raise APIError(str(e))
57
+ raise APIError(f"Unexpected OpenAI error: {str(e)}")
multillm/schemas.py ADDED
@@ -0,0 +1,23 @@
1
+ from typing import Any, Optional
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class TokenUsage(BaseModel):
7
+ """Token usage statistics."""
8
+
9
+ input_tokens: int = Field(default=0, description="Number of tokens in the input.")
10
+ output_tokens: int = Field(default=0, description="Number of tokens in the output.")
11
+ total_tokens: Optional[int] = Field(default=None, description="Total tokens used.")
12
+
13
+
14
+ class LLMResponse(BaseModel):
15
+ """Unified response model for all LLM providers."""
16
+
17
+ text: str = Field(..., description="The generated text response.")
18
+ provider: str = Field(..., description="The name of the provider.")
19
+ model: str = Field(..., description="The model name used.")
20
+ usage: TokenUsage = Field(
21
+ default_factory=TokenUsage, description="Token usage details."
22
+ )
23
+ raw: Any = Field(None, description="The raw response from the provider's SDK.")
multillm/version.py ADDED
@@ -0,0 +1 @@
1
+ __version__ = "0.1.0"
@@ -0,0 +1,69 @@
1
+ Metadata-Version: 2.4
2
+ Name: multillm-core
3
+ Version: 0.1.0
4
+ Summary: A unified interface for multiple LLM providers
5
+ Author: Divas Rajan
6
+ License: MIT
7
+ Classifier: License :: OSI Approved :: MIT License
8
+ Classifier: Operating System :: OS Independent
9
+ Classifier: Programming Language :: Python :: 3
10
+ Requires-Python: >=3.9
11
+ Requires-Dist: google-genai>=1.47.0
12
+ Requires-Dist: pydantic>=2.0.0
13
+ Provides-Extra: all
14
+ Requires-Dist: anthropic>=0.10.0; extra == 'all'
15
+ Requires-Dist: google-genai>=0.1.0; extra == 'all'
16
+ Requires-Dist: openai>=1.0.0; extra == 'all'
17
+ Provides-Extra: anthropic
18
+ Requires-Dist: anthropic>=0.10.0; extra == 'anthropic'
19
+ Provides-Extra: gemini
20
+ Requires-Dist: google-genai>=0.1.0; extra == 'gemini'
21
+ Provides-Extra: openai
22
+ Requires-Dist: openai>=1.0.0; extra == 'openai'
23
+ Description-Content-Type: text/markdown
24
+
25
+ # multillm-core
26
+
27
+ A unified interface for multiple LLM providers (OpenAI, Anthropic, Gemini).
28
+
29
+ ## Installation
30
+
31
+ ```bash
32
+ pip install multillm-core
33
+ ```
34
+
35
+ ### Install with specific providers:
36
+
37
+ ```bash
38
+ pip install "multillm-core[openai]"
39
+ pip install "multillm-core[anthropic]"
40
+ pip install "multillm-core[gemini]"
41
+ pip install "multillm-core[all]"
42
+ ```
43
+
44
+ ## Example Usage
45
+
46
+ ```python
47
+ import asyncio
48
+ from multillm import create_client
49
+
50
+ async def main():
51
+ client = create_client("openai", api_key="your-key")
52
+ response = await client.generate(
53
+ model="gpt-4o",
54
+ prompt="Explain quantum entanglement in one sentence."
55
+ )
56
+ print(f"[{response.provider}] {response.text}")
57
+ print(f"Usage: {response.usage}")
58
+
59
+ if __name__ == "__main__":
60
+ asyncio.run(main())
61
+ ```
62
+
63
+ ## Supported Providers
64
+ - OpenAI
65
+ - Anthropic
66
+ - Google Gemini
67
+
68
+ ## License
69
+ MIT
@@ -0,0 +1,13 @@
1
+ multillm/__init__.py,sha256=wCArBQoBBqzJrNy5oknONDCVqKshYKl7PZk719iGQns,297
2
+ multillm/exceptions.py,sha256=rbFKQLRjLp8zSMwZK3VVRSUMzaPBFTifxzZU0fzghcQ,983
3
+ multillm/factory.py,sha256=DZHUMCX6wDfwyL-C_GDrbHCs-9oddbAqEVY2YyYn7P0,1175
4
+ multillm/schemas.py,sha256=cI-tIkpnEmcHt6PdGUuCkWOn4kedtfta4YsNRbWRXaY,890
5
+ multillm/version.py,sha256=kUR5RAFc7HCeiqdlX36dZOHkUI5wI6V_43RpEcD8b-0,22
6
+ multillm/providers/__init__.py,sha256=EvfbkIzjIgrtqU9jzs9dvp7eZzwpaagXlZ6xONZhlTY,61
7
+ multillm/providers/anthropic.py,sha256=gdeR0q_jaUopAvNiJEcgECMXR3DNznRR06qgCRK_jCQ,2050
8
+ multillm/providers/base.py,sha256=qAooArEj93ke7KAwTvMD7NiS5bP5W5JVC8tDPONDaVc,1106
9
+ multillm/providers/gemini.py,sha256=KOg2i34UNn5VCOnDL9tQSOvi5hCX5lqhyezQLo1Uhrw,1686
10
+ multillm/providers/openai.py,sha256=x0uml603T4JDST89ZMwGXGRGZC6gxIjP0bna8gDMJlo,1895
11
+ multillm_core-0.1.0.dist-info/METADATA,sha256=9iXHebRMtSUDP8VtRsko_zhMYJUBl7wvpyqYOJB6fxs,1670
12
+ multillm_core-0.1.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
13
+ multillm_core-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.29.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any