levelapp 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of levelapp might be problematic. Click here for more details.
- levelapp/__init__.py +0 -0
- levelapp/aspects/__init__.py +8 -0
- levelapp/aspects/loader.py +253 -0
- levelapp/aspects/logger.py +59 -0
- levelapp/aspects/monitor.py +614 -0
- levelapp/aspects/sanitizer.py +168 -0
- levelapp/clients/__init__.py +119 -0
- levelapp/clients/anthropic.py +112 -0
- levelapp/clients/ionos.py +116 -0
- levelapp/clients/mistral.py +106 -0
- levelapp/clients/openai.py +102 -0
- levelapp/comparator/__init__.py +5 -0
- levelapp/comparator/comparator.py +232 -0
- levelapp/comparator/extractor.py +108 -0
- levelapp/comparator/schemas.py +61 -0
- levelapp/comparator/scorer.py +271 -0
- levelapp/comparator/utils.py +136 -0
- levelapp/config/__init__.py +5 -0
- levelapp/config/endpoint.py +190 -0
- levelapp/config/prompts.py +35 -0
- levelapp/core/__init__.py +0 -0
- levelapp/core/base.py +386 -0
- levelapp/core/session.py +214 -0
- levelapp/evaluator/__init__.py +3 -0
- levelapp/evaluator/evaluator.py +265 -0
- levelapp/metrics/__init__.py +67 -0
- levelapp/metrics/embedding.py +2 -0
- levelapp/metrics/exact.py +182 -0
- levelapp/metrics/fuzzy.py +80 -0
- levelapp/metrics/token.py +103 -0
- levelapp/plugins/__init__.py +0 -0
- levelapp/repository/__init__.py +3 -0
- levelapp/repository/firestore.py +282 -0
- levelapp/simulator/__init__.py +3 -0
- levelapp/simulator/schemas.py +89 -0
- levelapp/simulator/simulator.py +441 -0
- levelapp/simulator/utils.py +201 -0
- levelapp/workflow/__init__.py +5 -0
- levelapp/workflow/base.py +113 -0
- levelapp/workflow/factory.py +51 -0
- levelapp/workflow/registration.py +6 -0
- levelapp/workflow/schemas.py +121 -0
- levelapp-0.1.0.dist-info/METADATA +254 -0
- levelapp-0.1.0.dist-info/RECORD +46 -0
- levelapp-0.1.0.dist-info/WHEEL +4 -0
- levelapp-0.1.0.dist-info/licenses/LICENSE +0 -0
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""'levelapp/aspects/sanitizers.py'"""
|
|
2
|
+
import re
|
|
3
|
+
import json
|
|
4
|
+
from typing import Dict, Any, Callable
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class JSONSanitizer:
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
type_conversions: Dict[str, Callable[[str], Any]] = None,
|
|
11
|
+
default_values: Dict[str, Any] = None,
|
|
12
|
+
remove_nulls: bool = True,
|
|
13
|
+
raise_on_missing: bool = False,
|
|
14
|
+
):
|
|
15
|
+
"""
|
|
16
|
+
Initialize the sanitizer with optional transformation rules.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
type_conversions: Map of field names to functions that convert their values.
|
|
20
|
+
default_values: Map of default values for missing or null fields.
|
|
21
|
+
remove_nulls: If True, null-valued fields will be dropped.
|
|
22
|
+
raise_on_missing: If True, an error is raised when a required field is missing.
|
|
23
|
+
"""
|
|
24
|
+
self.type_conversions = type_conversions or {}
|
|
25
|
+
self.default_values = default_values or {}
|
|
26
|
+
self.remove_nulls = remove_nulls
|
|
27
|
+
self.raise_on_missing = raise_on_missing
|
|
28
|
+
|
|
29
|
+
def sanitize(self, data: Any) -> Any:
|
|
30
|
+
"""
|
|
31
|
+
Entry point for sanitization logic.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
data: Input data (expected to be a dict or list of dicts).
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
Sanitized data with transformations and corrections applied.
|
|
38
|
+
"""
|
|
39
|
+
if isinstance(data, dict):
|
|
40
|
+
return self._sanitize_dict(data)
|
|
41
|
+
|
|
42
|
+
elif isinstance(data, list):
|
|
43
|
+
return [self._sanitize_dict(item) for item in data]
|
|
44
|
+
|
|
45
|
+
else:
|
|
46
|
+
return self._sanitize_value(data)
|
|
47
|
+
|
|
48
|
+
def _sanitize_dict(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
|
49
|
+
"""
|
|
50
|
+
Sanitize a dictionary recursively.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
data: Dictionary to be cleaned.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
A sanitized version of the dictionary.
|
|
57
|
+
"""
|
|
58
|
+
sanitized_data = {}
|
|
59
|
+
for key, value in data.items():
|
|
60
|
+
if value is None:
|
|
61
|
+
if self.remove_nulls:
|
|
62
|
+
continue
|
|
63
|
+
|
|
64
|
+
elif key in self.default_values:
|
|
65
|
+
value = self.default_values[key]
|
|
66
|
+
|
|
67
|
+
elif self.raise_on_missing:
|
|
68
|
+
raise ValueError(f'[_sanitize_dict] Missing value for key "{key}"')
|
|
69
|
+
|
|
70
|
+
if isinstance(value, (dict, list)):
|
|
71
|
+
sanitized_data[key] = self.sanitize(value)
|
|
72
|
+
|
|
73
|
+
else:
|
|
74
|
+
sanitized_data[key] = self._sanitize_field(key, value)
|
|
75
|
+
|
|
76
|
+
for key, default in self.default_values.items():
|
|
77
|
+
if key not in sanitized_data:
|
|
78
|
+
sanitized_data[key] = default
|
|
79
|
+
|
|
80
|
+
return sanitized_data
|
|
81
|
+
|
|
82
|
+
def _sanitize_field(self, key: str, value: Any) -> Any:
|
|
83
|
+
"""
|
|
84
|
+
Apply type conversion to a single field if configured.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
key: Field name.
|
|
88
|
+
value: Original value.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
Sanitized and type-converted value.
|
|
92
|
+
"""
|
|
93
|
+
if key in self.type_conversions:
|
|
94
|
+
try:
|
|
95
|
+
value = self.type_conversions[key](value)
|
|
96
|
+
|
|
97
|
+
except Exception as e:
|
|
98
|
+
raise ValueError(f"[_sanitized_field] Failed to convert field {key} to type {type(value)}: {e}")
|
|
99
|
+
|
|
100
|
+
return self._sanitize_value(value)
|
|
101
|
+
|
|
102
|
+
def _sanitize_value(self, value: Any) -> Any:
|
|
103
|
+
"""
|
|
104
|
+
Ensure a value is JSON-serializable and safely encoded.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
value: Raw value from input.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Cleaned value, suitable for JSON serialization.
|
|
111
|
+
"""
|
|
112
|
+
if isinstance(value, str):
|
|
113
|
+
return self._escape_special_characters(value)
|
|
114
|
+
|
|
115
|
+
else:
|
|
116
|
+
try:
|
|
117
|
+
json.dumps(value)
|
|
118
|
+
return value
|
|
119
|
+
|
|
120
|
+
except (TypeError, ValueError):
|
|
121
|
+
return str(value)
|
|
122
|
+
|
|
123
|
+
@staticmethod
|
|
124
|
+
def _escape_special_characters(value: Any) -> str:
|
|
125
|
+
"""
|
|
126
|
+
Escape non-UTF-8 or invalid characters in string data.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
value: A string that may contain unsafe characters.
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
UTF-8-safe, sanitized string.
|
|
133
|
+
"""
|
|
134
|
+
try:
|
|
135
|
+
value.encode("utf-8").decode("utf-8")
|
|
136
|
+
return value
|
|
137
|
+
|
|
138
|
+
except UnicodeDecodeError:
|
|
139
|
+
return value.encode("utf-8", errors="replace").decode("utf-8")
|
|
140
|
+
|
|
141
|
+
@staticmethod
|
|
142
|
+
def strip_code_fences(text: str) -> str:
|
|
143
|
+
"""
|
|
144
|
+
Remove triple backticks and language hints from code blocks.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
text: Input string potentially containing code fences.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
String with code fences removed.
|
|
151
|
+
"""
|
|
152
|
+
return re.sub(r"^(```[a-zA-Z]*\n?)$", "", text.strip(), flags=re.MULTILINE)
|
|
153
|
+
|
|
154
|
+
def safe_load_json(self, text: str) -> Dict[str, Any]:
|
|
155
|
+
"""
|
|
156
|
+
Safely parse JSON from a string, even if surrounded by extra spaces/newlines.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
text: Input string containing JSON.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
Parsed JSON as a dictionary, or empty dict on failure.
|
|
163
|
+
"""
|
|
164
|
+
try:
|
|
165
|
+
return json.loads(text.strip())
|
|
166
|
+
|
|
167
|
+
except json.JSONDecodeError:
|
|
168
|
+
return self.sanitize(data=text)
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
"""levelapp/clients/__init__.py"""
|
|
2
|
+
import dotenv
|
|
3
|
+
import threading
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Type
|
|
6
|
+
|
|
7
|
+
from levelapp.clients.anthropic import AnthropicClient
|
|
8
|
+
from levelapp.clients.ionos import IonosClient
|
|
9
|
+
from levelapp.clients.mistral import MistralClient
|
|
10
|
+
from levelapp.clients.openai import OpenAIClient
|
|
11
|
+
from levelapp.core.base import BaseChatClient
|
|
12
|
+
from levelapp.aspects import MonitoringAspect, logger
|
|
13
|
+
|
|
14
|
+
dotenv.load_dotenv()
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ClientRegistry:
|
|
18
|
+
"""Thread-safe client registry with monitoring"""
|
|
19
|
+
_clients: Dict[str, Type[BaseChatClient]] = {}
|
|
20
|
+
_lock = threading.RLock()
|
|
21
|
+
|
|
22
|
+
@classmethod
|
|
23
|
+
def register(cls, provider: str, client_class: Type[BaseChatClient]) -> None:
|
|
24
|
+
"""
|
|
25
|
+
Register a client class under a provider name.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
provider (str): Unique identifier for the provider.
|
|
29
|
+
client_class (Type[BaseChatClient]): The client class to register.
|
|
30
|
+
|
|
31
|
+
Raises:
|
|
32
|
+
TypeError: If client_class is not a subclass of BaseChatClient.
|
|
33
|
+
KeyError: If a client for the provider is already registered.
|
|
34
|
+
"""
|
|
35
|
+
if not isinstance(client_class, type) or not issubclass(client_class, BaseChatClient):
|
|
36
|
+
raise TypeError(f"Client '{provider}' must be a subclass of BaseChatClient")
|
|
37
|
+
|
|
38
|
+
if provider in cls._clients:
|
|
39
|
+
raise KeyError(f"[ClientRegistry] Client for provider '{provider}' is already registered")
|
|
40
|
+
|
|
41
|
+
with cls._lock:
|
|
42
|
+
if provider in cls._clients:
|
|
43
|
+
raise KeyError(f"[ClientRegistry] Client for provider '{provider}' is already registered")
|
|
44
|
+
|
|
45
|
+
cls._wrap_client_methods(client_class)
|
|
46
|
+
cls._clients[provider] = client_class
|
|
47
|
+
logger.info(f"[ClientRegistry] Registered client for provider: {provider}")
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def _wrap_client_methods(cls, client_class: Type[BaseChatClient]) -> None:
|
|
51
|
+
"""
|
|
52
|
+
Apply monitoring decorators to client methods.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
client_class (Type[BaseChatClient]): The client class whose methods to wrap.
|
|
56
|
+
|
|
57
|
+
Raises:
|
|
58
|
+
TypeError: If the methods are not callable.
|
|
59
|
+
"""
|
|
60
|
+
for method in ("call", "acall"):
|
|
61
|
+
if not hasattr(client_class, method):
|
|
62
|
+
raise TypeError(f"{client_class.__name__} missing required method: {method}")
|
|
63
|
+
|
|
64
|
+
original = getattr(client_class, method)
|
|
65
|
+
|
|
66
|
+
if getattr(original, "_is_monitored", False):
|
|
67
|
+
continue
|
|
68
|
+
|
|
69
|
+
monitored = MonitoringAspect.monitor(
|
|
70
|
+
name=f"{client_class.__name__}.{method}",
|
|
71
|
+
cached=False,
|
|
72
|
+
enable_timing=True
|
|
73
|
+
)(original)
|
|
74
|
+
|
|
75
|
+
setattr(monitored, "_is_monitored", True)
|
|
76
|
+
setattr(client_class, method, monitored)
|
|
77
|
+
|
|
78
|
+
@classmethod
|
|
79
|
+
def get(cls, provider: str, **kwargs) -> BaseChatClient:
|
|
80
|
+
"""
|
|
81
|
+
Retrieve a registered chat client by provider name.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
provider (str): The name of the provider to retrieve.
|
|
85
|
+
**kwargs: Additional keyword arguments to pass to the client constructor.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
BaseChatClient: An instance of the registered client class.
|
|
89
|
+
"""
|
|
90
|
+
if provider not in cls._clients:
|
|
91
|
+
raise KeyError(f"Client for provider '{provider}' is not registered")
|
|
92
|
+
|
|
93
|
+
return cls._clients[provider](**kwargs)
|
|
94
|
+
|
|
95
|
+
@classmethod
|
|
96
|
+
def list_providers(cls) -> list[str]:
|
|
97
|
+
"""List all registered provider names"""
|
|
98
|
+
return list(cls._clients.keys())
|
|
99
|
+
|
|
100
|
+
@classmethod
|
|
101
|
+
def unregister(cls, provider: str) -> None:
|
|
102
|
+
"""Remove a provider from registry"""
|
|
103
|
+
with cls._lock:
|
|
104
|
+
cls._clients.pop(provider, None)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
clients = {
|
|
108
|
+
"openai": OpenAIClient,
|
|
109
|
+
"ionos": IonosClient,
|
|
110
|
+
"mistral": MistralClient,
|
|
111
|
+
"anthropic": AnthropicClient
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
for provider_, client_class_ in clients.items():
|
|
115
|
+
try:
|
|
116
|
+
ClientRegistry.register(provider=provider_, client_class=client_class_)
|
|
117
|
+
|
|
118
|
+
except (TypeError, KeyError) as e:
|
|
119
|
+
logger.error(f"Failed to register client for {provider_}: {e}")
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
"""levelapp/clients/anthropic.py"""
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from typing import Dict, Any
|
|
5
|
+
|
|
6
|
+
from levelapp.core.base import BaseChatClient
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AnthropicClient(BaseChatClient):
|
|
10
|
+
"""
|
|
11
|
+
Client for interacting with Anthropic's Claude API.
|
|
12
|
+
|
|
13
|
+
This implementation adapts requests and responses to the Anthropic API
|
|
14
|
+
format, including authentication, versioning headers, and structured
|
|
15
|
+
response parsing.
|
|
16
|
+
|
|
17
|
+
Attributes:
|
|
18
|
+
model (str): Target model ID (default: "claude-sonnet-4-20250514").
|
|
19
|
+
version (str): API version header required by Anthropic (default: "2023-06-01").
|
|
20
|
+
base_url (str): Base endpoint for Anthropic API (default: https://api.anthropic.com/v1).
|
|
21
|
+
api_key (str): Authentication token for Anthropic API.
|
|
22
|
+
max_tokens (int): Maximum tokens allowed in the response.
|
|
23
|
+
"""
|
|
24
|
+
def __init__(self, **kwargs):
|
|
25
|
+
super().__init__(**kwargs)
|
|
26
|
+
self.model = kwargs.get('model') or "claude-sonnet-4-20250514"
|
|
27
|
+
self.version = kwargs.get('version') or "2023-06-01"
|
|
28
|
+
self.base_url = kwargs.get("base_url") or "https://api.anthropic.com/v1"
|
|
29
|
+
self.api_key = kwargs.get('api_key') or os.environ.get('ANTHROPIC_API_KEY')
|
|
30
|
+
self.max_tokens = kwargs.get('max_tokens') or 1024
|
|
31
|
+
|
|
32
|
+
if not self.api_key:
|
|
33
|
+
raise ValueError("Anthropic API key not set.")
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def endpoint_path(self) -> str:
|
|
37
|
+
"""
|
|
38
|
+
API-specific endpoint path for message-based chat.
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
str: "/messages"
|
|
42
|
+
"""
|
|
43
|
+
return "/messages"
|
|
44
|
+
|
|
45
|
+
def _build_endpoint(self) -> str:
|
|
46
|
+
"""
|
|
47
|
+
Construct the full API endpoint URL.
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
str: Concatenation of base_url and endpoint_path.
|
|
51
|
+
"""
|
|
52
|
+
return f"{self.base_url}/{self.endpoint_path.lstrip('/')}"
|
|
53
|
+
|
|
54
|
+
def _build_headers(self) -> Dict[str, str]:
|
|
55
|
+
"""
|
|
56
|
+
Build HTTP headers for the Anthropic API request.
|
|
57
|
+
|
|
58
|
+
Required headers include:
|
|
59
|
+
- `x-api-key`: Authentication token.
|
|
60
|
+
- `anthropic-version`: API version string.
|
|
61
|
+
- `content-type`: Always "application/json".
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Dict[str, str]: Headers with authentication and API version.
|
|
65
|
+
"""
|
|
66
|
+
return {
|
|
67
|
+
"x-api-key": self.api_key,
|
|
68
|
+
"anthropic-version": self.version,
|
|
69
|
+
"content-type": "application/json"
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
def _build_payload(self, message: str) -> Dict[str, Any]:
|
|
73
|
+
"""
|
|
74
|
+
Construct the JSON payload for the Anthropic Messages API.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
message (str): User input or prompt to evaluate.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Dict[str, Any]: Payload containing model ID, messages, and token limit.
|
|
81
|
+
"""
|
|
82
|
+
return {
|
|
83
|
+
"model": self.model,
|
|
84
|
+
"messages": [{"role": "user", "content": message}],
|
|
85
|
+
"max_tokens": self.max_tokens
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
def parse_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
|
89
|
+
"""
|
|
90
|
+
Parse and normalize the Anthropic API response.
|
|
91
|
+
|
|
92
|
+
- Extracts text output from `content[0].text`.
|
|
93
|
+
- Attempts to JSON-parse the output if it contains structured data.
|
|
94
|
+
- Collects token usage metadata from `usage`.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
response (Dict[str, Any]): Raw JSON response from Anthropic.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
Dict[str, Any]: {
|
|
101
|
+
"output": Parsed model output (dict or str),
|
|
102
|
+
"metadata": {
|
|
103
|
+
"input_tokens": int,
|
|
104
|
+
"output_tokens": int
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
"""
|
|
108
|
+
input_tokens = response.get("usage", {}).get("input_tokens", 0)
|
|
109
|
+
output_tokens = response.get("usage", {}).get("output_tokens", 0)
|
|
110
|
+
output = response.get("content", {})[0].get("text", "")
|
|
111
|
+
parsed = self.sanitizer.safe_load_json(text=output)
|
|
112
|
+
return {'output': parsed, 'metadata': {'input_tokens': input_tokens, 'output_tokens': output_tokens}}
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
"""levelapp/clients/ionos.py"""
|
|
2
|
+
import os
|
|
3
|
+
import uuid
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Any
|
|
6
|
+
from levelapp.core.base import BaseChatClient
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class IonosClient(BaseChatClient):
|
|
10
|
+
"""
|
|
11
|
+
Client for interacting with the IONOS LLM API.
|
|
12
|
+
|
|
13
|
+
This implementation adapts requests and responses to the IONOS
|
|
14
|
+
API format, including payload structure, headers, and response parsing.
|
|
15
|
+
|
|
16
|
+
Attributes:
|
|
17
|
+
model_id (str): Model identifier to target (from IONOS dashboard or env).
|
|
18
|
+
base_url (str): Base endpoint for IONOS API, e.g. https://api.ionos.ai.
|
|
19
|
+
api_key (str): Authentication token for the IONOS API.
|
|
20
|
+
top_k (int): Sampling parameter; number of top tokens to consider.
|
|
21
|
+
top_p (float): Sampling parameter; nucleus probability cutoff.
|
|
22
|
+
temperature (float): Sampling randomness.
|
|
23
|
+
max_tokens (int): Maximum tokens allowed in completion.
|
|
24
|
+
"""
|
|
25
|
+
def __init__(self, **kwargs):
|
|
26
|
+
super().__init__(**kwargs)
|
|
27
|
+
self.model_id = kwargs.get('model_id') or os.getenv('IONOS_MODEL_ID')
|
|
28
|
+
self.base_url = kwargs.get('base_url') or os.getenv("IONOS_BASE_URL")
|
|
29
|
+
self.api_key = kwargs.get('api_key') or os.environ.get("IONOS_API_KEY")
|
|
30
|
+
self.top_k = kwargs.get('top_k') or 5
|
|
31
|
+
self.top_p = kwargs.get('top_p') or 0.5
|
|
32
|
+
self.temperature = kwargs.get('temperature') or 0.0
|
|
33
|
+
self.max_tokens = kwargs.get('max_tokens') or 150
|
|
34
|
+
|
|
35
|
+
if not self.api_key:
|
|
36
|
+
raise ValueError("IONOS API key not set.")
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def endpoint_path(self) -> str:
|
|
40
|
+
"""
|
|
41
|
+
API-specific endpoint path for inference calls.
|
|
42
|
+
|
|
43
|
+
Example:
|
|
44
|
+
"models/{model_id}/predictions"
|
|
45
|
+
"""
|
|
46
|
+
return f"models/{self.model_id}/predictions"
|
|
47
|
+
|
|
48
|
+
def _build_endpoint(self) -> str:
|
|
49
|
+
"""
|
|
50
|
+
Construct the full API endpoint URL.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
str: Concatenation of base_url and endpoint_path.
|
|
54
|
+
"""
|
|
55
|
+
return f"{self.base_url}/{self.endpoint_path.lstrip('/')}"
|
|
56
|
+
|
|
57
|
+
def _build_headers(self) -> Dict[str, str]:
|
|
58
|
+
"""
|
|
59
|
+
Build HTTP headers for the IONOS API request.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Dict[str, str]: Headers with authentication and content type.
|
|
63
|
+
"""
|
|
64
|
+
return {
|
|
65
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
66
|
+
"Content-Type": "application/json",
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
def _build_payload(self, message: str) -> Dict[str, Any]:
|
|
70
|
+
"""
|
|
71
|
+
Construct the JSON payload for the IONOS API.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
message (str): User input or prompt to evaluate.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
Dict[str, Any]: Payload containing properties and sampling options.
|
|
78
|
+
"""
|
|
79
|
+
return {
|
|
80
|
+
"properties": {"input": message},
|
|
81
|
+
"option": {
|
|
82
|
+
"top-k": self.top_k,
|
|
83
|
+
"top-p": self.top_p,
|
|
84
|
+
"temperature": self.temperature,
|
|
85
|
+
"max_tokens": self.max_tokens,
|
|
86
|
+
"seed": uuid.uuid4().int & ((1 << 16) - 1),
|
|
87
|
+
},
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
def parse_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
|
91
|
+
"""
|
|
92
|
+
Parse and normalize the IONOS API response.
|
|
93
|
+
|
|
94
|
+
- Extracts model output from `properties.output`.
|
|
95
|
+
- Strips any code fences or formatting noise.
|
|
96
|
+
- Attempts to JSON-parse the result (safe fallback if invalid).
|
|
97
|
+
- Collects token usage metadata.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
response (Dict[str, Any]): Raw JSON response from IONOS.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
Dict[str, Any]: {
|
|
104
|
+
"output": Parsed model output (dict or str),
|
|
105
|
+
"metadata": {
|
|
106
|
+
"input_tokens": int,
|
|
107
|
+
"output_tokens": int
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
"""
|
|
111
|
+
input_tokens = response.get("metadata", {}).get("inputTokens", -1)
|
|
112
|
+
output_tokens = response.get("metadata", {}).get("outputTokens", -1)
|
|
113
|
+
output = response.get("properties", {}).get("output", "")
|
|
114
|
+
cleaned = self.sanitizer.strip_code_fences(output)
|
|
115
|
+
parsed = self.sanitizer.safe_load_json(text=cleaned)
|
|
116
|
+
return {"output": parsed, "metadata": {"input_tokens": input_tokens, "output_tokens": output_tokens}}
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
"""levelapp/clients/mistral.py"""
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from typing import Dict, Any
|
|
5
|
+
from levelapp.core.base import BaseChatClient
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class MistralClient(BaseChatClient):
|
|
9
|
+
"""
|
|
10
|
+
Client for interacting with the Mistral API.
|
|
11
|
+
|
|
12
|
+
This implementation adapts requests and responses to the Mistral API
|
|
13
|
+
format, handling authentication, request payload structure, and
|
|
14
|
+
response parsing into a normalized format.
|
|
15
|
+
|
|
16
|
+
Attributes:
|
|
17
|
+
model (str): Target model identifier (default: "mistral-large-latest").
|
|
18
|
+
base_url (str): Base endpoint for Mistral API (default: https://api.mistral.ai/v1).
|
|
19
|
+
api_key (str): Authentication token for Mistral API.
|
|
20
|
+
"""
|
|
21
|
+
def __init__(self, **kwargs):
|
|
22
|
+
super().__init__(**kwargs)
|
|
23
|
+
self.model = kwargs.get("model") or "mistral-large-latest"
|
|
24
|
+
self.base_url = kwargs.get('base_url') or "https://api.mistral.ai/v1"
|
|
25
|
+
self.api_key = kwargs.get('api_key') or os.environ.get('MISTRAL_API_KEY')
|
|
26
|
+
|
|
27
|
+
if not self.api_key:
|
|
28
|
+
raise ValueError("Missing API key not set.")
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def endpoint_path(self) -> str:
|
|
32
|
+
"""
|
|
33
|
+
API-specific endpoint path for chat completions.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
str: "/chat/completions"
|
|
37
|
+
"""
|
|
38
|
+
return "/chat/completions"
|
|
39
|
+
|
|
40
|
+
def _build_endpoint(self) -> str:
|
|
41
|
+
"""
|
|
42
|
+
Construct the full API endpoint URL.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
str: Concatenation of base_url and endpoint_path.
|
|
46
|
+
"""
|
|
47
|
+
return f"{self.base_url}/{self.endpoint_path.lstrip('/')}"
|
|
48
|
+
|
|
49
|
+
def _build_headers(self) -> Dict[str, str]:
|
|
50
|
+
"""
|
|
51
|
+
Build HTTP headers for the Mistral API request.
|
|
52
|
+
|
|
53
|
+
Required headers include:
|
|
54
|
+
- `Authorization`: Bearer token for API access.
|
|
55
|
+
- `Content-Type`: Always "application/json".
|
|
56
|
+
- `Accept`: Expected response format ("application/json").
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Dict[str, str]: Headers including authentication and content type.
|
|
60
|
+
"""
|
|
61
|
+
return {
|
|
62
|
+
"Content-Type": "application/json",
|
|
63
|
+
"Accept": "application/json",
|
|
64
|
+
"Authorization": f"Bearer {self.api_key}"
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
def _build_payload(self, message: str) -> Dict[str, Any]:
|
|
68
|
+
"""
|
|
69
|
+
Construct the JSON payload for the Mistral chat completions API.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
message (str): User input or prompt to evaluate.
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
Dict[str, Any]: Payload containing model ID and user message.
|
|
76
|
+
"""
|
|
77
|
+
return {
|
|
78
|
+
"model": self.model,
|
|
79
|
+
"messages": [{"role": "user", "content": message}],
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
def parse_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
|
83
|
+
"""
|
|
84
|
+
Parse and normalize the Mistral API response.
|
|
85
|
+
|
|
86
|
+
- Extracts text output from `choices[0].message.content`.
|
|
87
|
+
- Attempts to JSON-parse the output if structured data is detected.
|
|
88
|
+
- Collects token usage metadata from `usage`.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
response (Dict[str, Any]): Raw JSON response from Mistral API.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Dict[str, Any]: {
|
|
95
|
+
"output": Parsed model output (dict or str),
|
|
96
|
+
"metadata": {
|
|
97
|
+
"input_tokens": int,
|
|
98
|
+
"output_tokens": int
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
"""
|
|
102
|
+
input_tokens = response.get("usage", {}).get("prompt_tokens", 0)
|
|
103
|
+
output_tokens = response.get("usage", {}).get("completion_tokens", 0)
|
|
104
|
+
output = response.get("choices", [{}])[0].get("message", {}).get("content", "")
|
|
105
|
+
parsed = self.sanitizer.safe_load_json(text=output)
|
|
106
|
+
return {'output': parsed, 'metadata': {'input': input_tokens, 'output': output_tokens}}
|