eval-ai-library 0.3.2__py3-none-any.whl → 0.3.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eval-ai-library might be problematic. Click here for more details.

eval_lib/llm_client.py CHANGED
@@ -2,6 +2,7 @@
2
2
  import openai
3
3
  import functools
4
4
  import anthropic
5
+ from abc import ABC, abstractmethod
5
6
  from openai import AsyncAzureOpenAI
6
7
  from google import genai
7
8
  from google.genai.types import GenerateContentConfig
@@ -13,6 +14,45 @@ from types import SimpleNamespace
13
14
  from .price import model_pricing
14
15
 
15
16
 
17
+ class CustomLLMClient(ABC):
18
+ """
19
+ Base class for custom LLM clients.
20
+ Inherit from this to create your own model implementations.
21
+
22
+ Example:
23
+ class MyCustomLLM(CustomLLMClient):
24
+ async def chat_complete(self, messages, temperature):
25
+ # Your implementation
26
+ return response_text, cost
27
+
28
+ def get_model_name(self):
29
+ return "my-custom-model"
30
+ """
31
+
32
+ @abstractmethod
33
+ async def chat_complete(
34
+ self,
35
+ messages: list[dict[str, str]],
36
+ temperature: float
37
+ ) -> tuple[str, Optional[float]]:
38
+ """
39
+ Generate a response for the given messages.
40
+
41
+ Args:
42
+ messages: List of message dicts [{"role": "user", "content": "..."}]
43
+ temperature: Sampling temperature
44
+
45
+ Returns:
46
+ Tuple of (response_text, cost_in_usd)
47
+ """
48
+ pass
49
+
50
+ @abstractmethod
51
+ def get_model_name(self) -> str:
52
+ """Return the model name for logging/tracking purposes."""
53
+ pass
54
+
55
+
16
56
  class LLMConfigurationError(Exception):
17
57
  """Raised when LLM client configuration is missing or invalid."""
18
58
  pass
@@ -24,6 +64,7 @@ class Provider(str, Enum):
24
64
  GOOGLE = "google"
25
65
  OLLAMA = "ollama"
26
66
  ANTHROPIC = "anthropic"
67
+ CUSTOM = "custom"
27
68
 
28
69
 
29
70
  @dataclass(frozen=True, slots=True)
@@ -308,7 +349,7 @@ _HELPERS = {
308
349
 
309
350
 
310
351
  async def chat_complete(
311
- llm: str | tuple[str, str] | LLMDescriptor,
352
+ llm: str | tuple[str, str] | LLMDescriptor | CustomLLMClient,
312
353
  messages: list[dict[str, str]],
313
354
  temperature: float = 0.0,
314
355
  ):
@@ -327,6 +368,11 @@ async def chat_complete(
327
368
  LLMConfigurationError: If required API keys or configuration are missing
328
369
  ValueError: If provider is not supported
329
370
  """
371
+ # Handle custom LLM clients
372
+ if isinstance(llm, CustomLLMClient):
373
+ return await llm.chat_complete(messages, temperature)
374
+
375
+ # Standard providers
330
376
  llm = LLMDescriptor.parse(llm)
331
377
  helper = _HELPERS.get(llm.provider)
332
378