levelapp 0.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- levelapp/__init__.py +0 -0
- levelapp/aspects/__init__.py +8 -0
- levelapp/aspects/loader.py +253 -0
- levelapp/aspects/logger.py +59 -0
- levelapp/aspects/monitor.py +617 -0
- levelapp/aspects/sanitizer.py +168 -0
- levelapp/clients/__init__.py +122 -0
- levelapp/clients/anthropic.py +112 -0
- levelapp/clients/gemini.py +130 -0
- levelapp/clients/groq.py +101 -0
- levelapp/clients/huggingface.py +162 -0
- levelapp/clients/ionos.py +126 -0
- levelapp/clients/mistral.py +106 -0
- levelapp/clients/openai.py +116 -0
- levelapp/comparator/__init__.py +5 -0
- levelapp/comparator/comparator.py +232 -0
- levelapp/comparator/extractor.py +108 -0
- levelapp/comparator/schemas.py +61 -0
- levelapp/comparator/scorer.py +269 -0
- levelapp/comparator/utils.py +136 -0
- levelapp/config/__init__.py +5 -0
- levelapp/config/endpoint.py +199 -0
- levelapp/config/prompts.py +57 -0
- levelapp/core/__init__.py +0 -0
- levelapp/core/base.py +386 -0
- levelapp/core/schemas.py +24 -0
- levelapp/core/session.py +336 -0
- levelapp/endpoint/__init__.py +0 -0
- levelapp/endpoint/client.py +188 -0
- levelapp/endpoint/client_test.py +41 -0
- levelapp/endpoint/manager.py +114 -0
- levelapp/endpoint/parsers.py +119 -0
- levelapp/endpoint/schemas.py +38 -0
- levelapp/endpoint/tester.py +52 -0
- levelapp/evaluator/__init__.py +3 -0
- levelapp/evaluator/evaluator.py +307 -0
- levelapp/metrics/__init__.py +63 -0
- levelapp/metrics/embedding.py +56 -0
- levelapp/metrics/embeddings/__init__.py +0 -0
- levelapp/metrics/embeddings/sentence_transformer.py +30 -0
- levelapp/metrics/embeddings/torch_based.py +56 -0
- levelapp/metrics/exact.py +182 -0
- levelapp/metrics/fuzzy.py +80 -0
- levelapp/metrics/token.py +103 -0
- levelapp/plugins/__init__.py +0 -0
- levelapp/repository/__init__.py +3 -0
- levelapp/repository/filesystem.py +203 -0
- levelapp/repository/firestore.py +291 -0
- levelapp/simulator/__init__.py +3 -0
- levelapp/simulator/schemas.py +116 -0
- levelapp/simulator/simulator.py +531 -0
- levelapp/simulator/utils.py +134 -0
- levelapp/visualization/__init__.py +7 -0
- levelapp/visualization/charts.py +358 -0
- levelapp/visualization/dashboard.py +240 -0
- levelapp/visualization/exporter.py +167 -0
- levelapp/visualization/templates/base.html +158 -0
- levelapp/visualization/templates/comparator_dashboard.html +57 -0
- levelapp/visualization/templates/simulator_dashboard.html +111 -0
- levelapp/workflow/__init__.py +6 -0
- levelapp/workflow/base.py +192 -0
- levelapp/workflow/config.py +96 -0
- levelapp/workflow/context.py +64 -0
- levelapp/workflow/factory.py +42 -0
- levelapp/workflow/registration.py +6 -0
- levelapp/workflow/runtime.py +19 -0
- levelapp-0.1.15.dist-info/METADATA +571 -0
- levelapp-0.1.15.dist-info/RECORD +70 -0
- levelapp-0.1.15.dist-info/WHEEL +4 -0
- levelapp-0.1.15.dist-info/licenses/LICENSE +0 -0
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""'levelapp/aspects/sanitizers.py'"""
|
|
2
|
+
import re
|
|
3
|
+
import json
|
|
4
|
+
from typing import Dict, Any, Callable
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class JSONSanitizer:
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
type_conversions: Dict[str, Callable[[str], Any]] = None,
|
|
11
|
+
default_values: Dict[str, Any] = None,
|
|
12
|
+
remove_nulls: bool = True,
|
|
13
|
+
raise_on_missing: bool = False,
|
|
14
|
+
):
|
|
15
|
+
"""
|
|
16
|
+
Initialize the sanitizer with optional transformation rules.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
type_conversions: Map of field names to functions that convert their values.
|
|
20
|
+
default_values: Map of default values for missing or null fields.
|
|
21
|
+
remove_nulls: If True, null-valued fields will be dropped.
|
|
22
|
+
raise_on_missing: If True, an error is raised when a required field is missing.
|
|
23
|
+
"""
|
|
24
|
+
self.type_conversions = type_conversions or {}
|
|
25
|
+
self.default_values = default_values or {}
|
|
26
|
+
self.remove_nulls = remove_nulls
|
|
27
|
+
self.raise_on_missing = raise_on_missing
|
|
28
|
+
|
|
29
|
+
def sanitize(self, data: Any) -> Any:
|
|
30
|
+
"""
|
|
31
|
+
Entry point for sanitization logic.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
data: Input data (expected to be a dict or list of dicts).
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
Sanitized data with transformations and corrections applied.
|
|
38
|
+
"""
|
|
39
|
+
if isinstance(data, dict):
|
|
40
|
+
return self._sanitize_dict(data)
|
|
41
|
+
|
|
42
|
+
elif isinstance(data, list):
|
|
43
|
+
return [self._sanitize_dict(item) for item in data]
|
|
44
|
+
|
|
45
|
+
else:
|
|
46
|
+
return self._sanitize_value(data)
|
|
47
|
+
|
|
48
|
+
def _sanitize_dict(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
|
49
|
+
"""
|
|
50
|
+
Sanitize a dictionary recursively.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
data: Dictionary to be cleaned.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
A sanitized version of the dictionary.
|
|
57
|
+
"""
|
|
58
|
+
sanitized_data = {}
|
|
59
|
+
for key, value in data.items():
|
|
60
|
+
if value is None:
|
|
61
|
+
if self.remove_nulls:
|
|
62
|
+
continue
|
|
63
|
+
|
|
64
|
+
elif key in self.default_values:
|
|
65
|
+
value = self.default_values[key]
|
|
66
|
+
|
|
67
|
+
elif self.raise_on_missing:
|
|
68
|
+
raise ValueError(f'[_sanitize_dict] Missing value for key "{key}"')
|
|
69
|
+
|
|
70
|
+
if isinstance(value, (dict, list)):
|
|
71
|
+
sanitized_data[key] = self.sanitize(value)
|
|
72
|
+
|
|
73
|
+
else:
|
|
74
|
+
sanitized_data[key] = self._sanitize_field(key, value)
|
|
75
|
+
|
|
76
|
+
for key, default in self.default_values.items():
|
|
77
|
+
if key not in sanitized_data:
|
|
78
|
+
sanitized_data[key] = default
|
|
79
|
+
|
|
80
|
+
return sanitized_data
|
|
81
|
+
|
|
82
|
+
def _sanitize_field(self, key: str, value: Any) -> Any:
|
|
83
|
+
"""
|
|
84
|
+
Apply type conversion to a single field if configured.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
key: Field name.
|
|
88
|
+
value: Original value.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
Sanitized and type-converted value.
|
|
92
|
+
"""
|
|
93
|
+
if key in self.type_conversions:
|
|
94
|
+
try:
|
|
95
|
+
value = self.type_conversions[key](value)
|
|
96
|
+
|
|
97
|
+
except Exception as e:
|
|
98
|
+
raise ValueError(f"[_sanitized_field] Failed to convert field {key} to type {type(value)}: {e}")
|
|
99
|
+
|
|
100
|
+
return self._sanitize_value(value)
|
|
101
|
+
|
|
102
|
+
def _sanitize_value(self, value: Any) -> Any:
|
|
103
|
+
"""
|
|
104
|
+
Ensure a value is JSON-serializable and safely encoded.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
value: Raw value from input.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Cleaned value, suitable for JSON serialization.
|
|
111
|
+
"""
|
|
112
|
+
if isinstance(value, str):
|
|
113
|
+
return self._escape_special_characters(value)
|
|
114
|
+
|
|
115
|
+
else:
|
|
116
|
+
try:
|
|
117
|
+
json.dumps(value)
|
|
118
|
+
return value
|
|
119
|
+
|
|
120
|
+
except (TypeError, ValueError):
|
|
121
|
+
return str(value)
|
|
122
|
+
|
|
123
|
+
@staticmethod
|
|
124
|
+
def _escape_special_characters(value: Any) -> str:
|
|
125
|
+
"""
|
|
126
|
+
Escape non-UTF-8 or invalid characters in string data.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
value: A string that may contain unsafe characters.
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
UTF-8-safe, sanitized string.
|
|
133
|
+
"""
|
|
134
|
+
try:
|
|
135
|
+
value.encode("utf-8").decode("utf-8")
|
|
136
|
+
return value
|
|
137
|
+
|
|
138
|
+
except UnicodeDecodeError:
|
|
139
|
+
return value.encode("utf-8", errors="replace").decode("utf-8")
|
|
140
|
+
|
|
141
|
+
@staticmethod
|
|
142
|
+
def strip_code_fences(text: str) -> str:
|
|
143
|
+
"""
|
|
144
|
+
Remove triple backticks and language hints from code blocks.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
text: Input string potentially containing code fences.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
String with code fences removed.
|
|
151
|
+
"""
|
|
152
|
+
return re.sub(r"^(```[a-zA-Z]*\n?)$", "", text.strip(), flags=re.MULTILINE)
|
|
153
|
+
|
|
154
|
+
def safe_load_json(self, text: str) -> Dict[str, Any]:
|
|
155
|
+
"""
|
|
156
|
+
Safely parse JSON from a string, even if surrounded by extra spaces/newlines.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
text: Input string containing JSON.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
Parsed JSON as a dictionary, or empty dict on failure.
|
|
163
|
+
"""
|
|
164
|
+
try:
|
|
165
|
+
return json.loads(text.strip())
|
|
166
|
+
|
|
167
|
+
except json.JSONDecodeError:
|
|
168
|
+
return self.sanitize(data=text)
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
"""levelapp/clients/__init__.py"""
|
|
2
|
+
import dotenv
|
|
3
|
+
import threading
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Type
|
|
6
|
+
|
|
7
|
+
from levelapp.clients.anthropic import AnthropicClient
|
|
8
|
+
from levelapp.clients.gemini import GeminiClient
|
|
9
|
+
from levelapp.clients.groq import GroqClient
|
|
10
|
+
from levelapp.clients.ionos import IonosClient
|
|
11
|
+
from levelapp.clients.mistral import MistralClient
|
|
12
|
+
from levelapp.clients.openai import OpenAIClient
|
|
13
|
+
from levelapp.core.base import BaseChatClient
|
|
14
|
+
from levelapp.aspects import MonitoringAspect, logger
|
|
15
|
+
|
|
16
|
+
dotenv.load_dotenv()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ClientRegistry:
|
|
20
|
+
"""Thread-safe client registry with monitoring"""
|
|
21
|
+
_clients: Dict[str, Type[BaseChatClient]] = {}
|
|
22
|
+
_lock = threading.RLock()
|
|
23
|
+
|
|
24
|
+
@classmethod
|
|
25
|
+
def register(cls, provider: str, client_class: Type[BaseChatClient]) -> None:
|
|
26
|
+
"""
|
|
27
|
+
Register a client class under a provider name.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
provider (str): Unique identifier for the provider.
|
|
31
|
+
client_class (Type[BaseChatClient]): The client class to register.
|
|
32
|
+
|
|
33
|
+
Raises:
|
|
34
|
+
TypeError: If client_class is not a subclass of BaseChatClient.
|
|
35
|
+
KeyError: If a client for the provider is already registered.
|
|
36
|
+
"""
|
|
37
|
+
if not isinstance(client_class, type) or not issubclass(client_class, BaseChatClient):
|
|
38
|
+
raise TypeError(f"Client '{provider}' must be a subclass of BaseChatClient")
|
|
39
|
+
|
|
40
|
+
if provider in cls._clients:
|
|
41
|
+
raise KeyError(f"[ClientRegistry] Client for provider '{provider}' is already registered")
|
|
42
|
+
|
|
43
|
+
with cls._lock:
|
|
44
|
+
if provider in cls._clients:
|
|
45
|
+
raise KeyError(f"[ClientRegistry] Client for provider '{provider}' is already registered")
|
|
46
|
+
|
|
47
|
+
cls._wrap_client_methods(client_class)
|
|
48
|
+
cls._clients[provider] = client_class
|
|
49
|
+
|
|
50
|
+
@classmethod
|
|
51
|
+
def _wrap_client_methods(cls, client_class: Type[BaseChatClient]) -> None:
|
|
52
|
+
"""
|
|
53
|
+
Apply monitoring decorators to client methods.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
client_class (Type[BaseChatClient]): The client class whose methods to wrap.
|
|
57
|
+
|
|
58
|
+
Raises:
|
|
59
|
+
TypeError: If the methods are not callable.
|
|
60
|
+
"""
|
|
61
|
+
for method in ("call", "acall"):
|
|
62
|
+
if not hasattr(client_class, method):
|
|
63
|
+
raise TypeError(f"{client_class.__name__} missing required method: {method}")
|
|
64
|
+
|
|
65
|
+
original = getattr(client_class, method)
|
|
66
|
+
|
|
67
|
+
if getattr(original, "_is_monitored", False):
|
|
68
|
+
continue
|
|
69
|
+
|
|
70
|
+
monitored = MonitoringAspect.monitor(
|
|
71
|
+
name=f"{client_class.__name__}.{method}",
|
|
72
|
+
cached=False,
|
|
73
|
+
enable_timing=True
|
|
74
|
+
)(original)
|
|
75
|
+
|
|
76
|
+
setattr(monitored, "_is_monitored", True)
|
|
77
|
+
setattr(client_class, method, monitored)
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
def get(cls, provider: str, **kwargs) -> BaseChatClient:
|
|
81
|
+
"""
|
|
82
|
+
Retrieve a registered chat client by provider name.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
provider (str): The name of the provider to retrieve.
|
|
86
|
+
**kwargs: Additional keyword arguments to pass to the client constructor.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
BaseChatClient: An instance of the registered client class.
|
|
90
|
+
"""
|
|
91
|
+
if provider not in cls._clients:
|
|
92
|
+
raise KeyError(f"Client for provider '{provider}' is not registered")
|
|
93
|
+
|
|
94
|
+
return cls._clients[provider](**kwargs)
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
def list_providers(cls) -> list[str]:
|
|
98
|
+
"""List all registered provider names"""
|
|
99
|
+
return list(cls._clients.keys())
|
|
100
|
+
|
|
101
|
+
@classmethod
|
|
102
|
+
def unregister(cls, provider: str) -> None:
|
|
103
|
+
"""Remove a provider from registry"""
|
|
104
|
+
with cls._lock:
|
|
105
|
+
cls._clients.pop(provider, None)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
clients = {
|
|
109
|
+
"openai": OpenAIClient,
|
|
110
|
+
"ionos": IonosClient,
|
|
111
|
+
"mistral": MistralClient,
|
|
112
|
+
"anthropic": AnthropicClient,
|
|
113
|
+
"groq": GroqClient,
|
|
114
|
+
"gemini": GeminiClient
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
for provider_, client_class_ in clients.items():
|
|
118
|
+
try:
|
|
119
|
+
ClientRegistry.register(provider=provider_, client_class=client_class_)
|
|
120
|
+
|
|
121
|
+
except (TypeError, KeyError) as e:
|
|
122
|
+
logger.error(f"Failed to register client for {provider_}: {e}")
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
"""levelapp/clients/anthropic.py"""
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from typing import Dict, Any
|
|
5
|
+
|
|
6
|
+
from levelapp.core.base import BaseChatClient
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AnthropicClient(BaseChatClient):
|
|
10
|
+
"""
|
|
11
|
+
Client for interacting with Anthropic's Claude API.
|
|
12
|
+
|
|
13
|
+
This implementation adapts requests and responses to the Anthropic API
|
|
14
|
+
format, including authentication, versioning headers, and structured
|
|
15
|
+
response parsing.
|
|
16
|
+
|
|
17
|
+
Attributes:
|
|
18
|
+
model (str): Target model ID (default: "claude-sonnet-4-20250514").
|
|
19
|
+
version (str): API version header required by Anthropic (default: "2023-06-01").
|
|
20
|
+
base_url (str): Base endpoint for Anthropic API (default: https://api.anthropic.com/v1).
|
|
21
|
+
api_key (str): Authentication token for Anthropic API.
|
|
22
|
+
max_tokens (int): Maximum tokens allowed in the response.
|
|
23
|
+
"""
|
|
24
|
+
def __init__(self, **kwargs):
|
|
25
|
+
super().__init__(**kwargs)
|
|
26
|
+
self.model = kwargs.get('model') or "claude-sonnet-4-20250514"
|
|
27
|
+
self.version = kwargs.get('version') or "2023-06-01"
|
|
28
|
+
self.base_url = kwargs.get("base_url") or "https://api.anthropic.com/v1"
|
|
29
|
+
self.api_key = kwargs.get('api_key') or os.environ.get('ANTHROPIC_API_KEY')
|
|
30
|
+
self.max_tokens = kwargs.get('max_tokens') or 1024
|
|
31
|
+
|
|
32
|
+
if not self.api_key:
|
|
33
|
+
raise ValueError("Anthropic API key not set.")
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def endpoint_path(self) -> str:
|
|
37
|
+
"""
|
|
38
|
+
API-specific endpoint path for message-based chat.
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
str: "/messages"
|
|
42
|
+
"""
|
|
43
|
+
return "/messages"
|
|
44
|
+
|
|
45
|
+
def _build_endpoint(self) -> str:
|
|
46
|
+
"""
|
|
47
|
+
Construct the full API endpoint URL.
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
str: Concatenation of base_url and endpoint_path.
|
|
51
|
+
"""
|
|
52
|
+
return f"{self.base_url}/{self.endpoint_path.lstrip('/')}"
|
|
53
|
+
|
|
54
|
+
def _build_headers(self) -> Dict[str, str]:
|
|
55
|
+
"""
|
|
56
|
+
Build HTTP headers for the Anthropic API request.
|
|
57
|
+
|
|
58
|
+
Required headers include:
|
|
59
|
+
- `x-api-key`: Authentication token.
|
|
60
|
+
- `anthropic-version`: API version string.
|
|
61
|
+
- `content-type`: Always "application/json".
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Dict[str, str]: Headers with authentication and API version.
|
|
65
|
+
"""
|
|
66
|
+
return {
|
|
67
|
+
"x-api-key": self.api_key,
|
|
68
|
+
"anthropic-version": self.version,
|
|
69
|
+
"content-type": "application/json"
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
def _build_payload(self, message: str) -> Dict[str, Any]:
|
|
73
|
+
"""
|
|
74
|
+
Construct the JSON payload for the Anthropic Messages API.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
message (str): User input or prompt to evaluate.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Dict[str, Any]: Payload containing model ID, messages, and token limit.
|
|
81
|
+
"""
|
|
82
|
+
return {
|
|
83
|
+
"model": self.model,
|
|
84
|
+
"messages": [{"role": "user", "content": message}],
|
|
85
|
+
"max_tokens": self.max_tokens
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
def parse_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
|
89
|
+
"""
|
|
90
|
+
Parse and normalize the Anthropic API response.
|
|
91
|
+
|
|
92
|
+
- Extracts text output from `content[0].text`.
|
|
93
|
+
- Attempts to JSON-parse the output if it contains structured data.
|
|
94
|
+
- Collects token usage metadata from `usage`.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
response (Dict[str, Any]): Raw JSON response from Anthropic.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
Dict[str, Any]: {
|
|
101
|
+
"output": Parsed model output (dict or str),
|
|
102
|
+
"metadata": {
|
|
103
|
+
"input_tokens": int,
|
|
104
|
+
"output_tokens": int
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
"""
|
|
108
|
+
input_tokens = response.get("usage", {}).get("input_tokens", 0)
|
|
109
|
+
output_tokens = response.get("usage", {}).get("output_tokens", 0)
|
|
110
|
+
output = response.get("content", {})[0].get("text", "")
|
|
111
|
+
parsed = self.sanitizer.safe_load_json(text=output)
|
|
112
|
+
return {'output': parsed, 'metadata': {'input_tokens': input_tokens, 'output_tokens': output_tokens}}
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
"""levelapp/clients/gemini.py"""
|
|
2
|
+
import os
|
|
3
|
+
from typing import Dict, Any
|
|
4
|
+
|
|
5
|
+
from levelapp.core.base import BaseChatClient
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class GeminiClient(BaseChatClient):
|
|
9
|
+
"""
|
|
10
|
+
Client for interacting with Google's Gemini API.
|
|
11
|
+
|
|
12
|
+
This implementation adapts requests and responses to the Gemini API
|
|
13
|
+
format, including content structure, headers, and token usage reporting.
|
|
14
|
+
|
|
15
|
+
Attributes:
|
|
16
|
+
model (str): Target model ID (default: "gemini-2.0-flash-exp").
|
|
17
|
+
base_url (str): Base endpoint for Gemini API (default: https://generativelanguage.googleapis.com/v1beta).
|
|
18
|
+
api_key (str): Authentication token for the Gemini API.
|
|
19
|
+
max_tokens (int): Maximum tokens allowed in the completion.
|
|
20
|
+
"""
|
|
21
|
+
def __init__(self, **kwargs):
|
|
22
|
+
super().__init__(**kwargs)
|
|
23
|
+
self.model = kwargs.get('model') or os.environ.get("GEMINI_MODEL")
|
|
24
|
+
self.base_url = kwargs.get('base_url') or "https://generativelanguage.googleapis.com/v1beta"
|
|
25
|
+
self.api_key = kwargs.get('api_key') or os.environ.get('GEMINI_API_KEY')
|
|
26
|
+
self.max_tokens = kwargs.get('max_tokens') or 1024
|
|
27
|
+
|
|
28
|
+
if not self.api_key:
|
|
29
|
+
raise ValueError("Gemini API key not set")
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def endpoint_path(self) -> str:
|
|
33
|
+
"""
|
|
34
|
+
API-specific endpoint path for content generation.
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
str: Formatted endpoint with model name.
|
|
38
|
+
"""
|
|
39
|
+
return f"/models/{self.model}:generateContent"
|
|
40
|
+
|
|
41
|
+
def _build_headers(self) -> Dict[str, str]:
|
|
42
|
+
"""
|
|
43
|
+
Build HTTP headers for the Gemini API request.
|
|
44
|
+
|
|
45
|
+
Gemini uses x-goog-api-key header instead of Authorization: Bearer.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Dict[str, str]: Headers with API key and content type.
|
|
49
|
+
"""
|
|
50
|
+
return {
|
|
51
|
+
"x-goog-api-key": self.api_key,
|
|
52
|
+
"Content-Type": "application/json",
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
def _build_payload(self, message: str) -> Dict[str, Any]:
|
|
56
|
+
"""
|
|
57
|
+
Construct the JSON payload for the Gemini generateContent API.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
message (str): User input or prompt to evaluate.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Dict[str, Any]: Payload containing model ID, contents structure, and token limit.
|
|
64
|
+
"""
|
|
65
|
+
return {
|
|
66
|
+
"contents": [
|
|
67
|
+
{
|
|
68
|
+
"parts": [
|
|
69
|
+
{
|
|
70
|
+
"text": message
|
|
71
|
+
}
|
|
72
|
+
]
|
|
73
|
+
}
|
|
74
|
+
],
|
|
75
|
+
"generationConfig": {
|
|
76
|
+
"maxOutputTokens": self.max_tokens,
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
def parse_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
|
81
|
+
"""
|
|
82
|
+
Parse and normalize the Gemini API response.
|
|
83
|
+
|
|
84
|
+
- Extracts text output from `candidates[0].content.parts[0].text`.
|
|
85
|
+
- Attempts to JSON-parse the result if it contains structured content.
|
|
86
|
+
- Collects token usage metadata from `usageMetadata`.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
response (Dict[str, Any]): Raw JSON response from Gemini.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
Dict[str, Any]: {
|
|
93
|
+
"output": Parsed model output (dict or str),
|
|
94
|
+
"metadata": {
|
|
95
|
+
"input_tokens": int,
|
|
96
|
+
"output_tokens": int,
|
|
97
|
+
"total_tokens": int,
|
|
98
|
+
"finish_reason": str
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
"""
|
|
102
|
+
# Extract text from candidates
|
|
103
|
+
candidates = response.get("candidates", [{}])
|
|
104
|
+
candidate = candidates[0] if candidates else {}
|
|
105
|
+
content = candidate.get("content", {})
|
|
106
|
+
parts = content.get("parts", [{}])
|
|
107
|
+
part = parts[0] if parts else {}
|
|
108
|
+
output_text = part.get("text", "")
|
|
109
|
+
|
|
110
|
+
# Extract token usage
|
|
111
|
+
usage_metadata = response.get("usageMetadata", {})
|
|
112
|
+
input_tokens = usage_metadata.get("promptTokenCount", 0)
|
|
113
|
+
output_tokens = usage_metadata.get("candidatesTokenCount", 0)
|
|
114
|
+
total_tokens = usage_metadata.get("totalTokenCount", 0)
|
|
115
|
+
|
|
116
|
+
# Extract finish reason
|
|
117
|
+
finish_reason = candidate.get("finishReason", "UNKNOWN")
|
|
118
|
+
|
|
119
|
+
# Try to parse as JSON if it looks like structured data
|
|
120
|
+
parsed = self.sanitizer.safe_load_json(text=output_text)
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
"output": parsed,
|
|
124
|
+
"metadata": {
|
|
125
|
+
"input_tokens": input_tokens,
|
|
126
|
+
"output_tokens": output_tokens,
|
|
127
|
+
"total_tokens": total_tokens,
|
|
128
|
+
"finish_reason": finish_reason
|
|
129
|
+
}
|
|
130
|
+
}
|
levelapp/clients/groq.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"""levelapp/clients/groq.py"""
|
|
2
|
+
import os
|
|
3
|
+
from typing import Dict, Any
|
|
4
|
+
from levelapp.core.base import BaseChatClient
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class GroqClient(BaseChatClient):
|
|
8
|
+
"""
|
|
9
|
+
Client for interacting with Groq's Chat Completions API.
|
|
10
|
+
|
|
11
|
+
This implementation adapts requests and responses to the Groq API
|
|
12
|
+
format, which is OpenAI-compatible but with Groq-specific models and endpoints.
|
|
13
|
+
|
|
14
|
+
Attributes:
|
|
15
|
+
model (str): Target model ID (default: "llama-3.3-70b-versatile").
|
|
16
|
+
base_url (str): Base endpoint for Groq API (default: https://api.groq.com/openai/v1).
|
|
17
|
+
api_key (str): Authentication token for the Groq API.
|
|
18
|
+
max_tokens (int): Maximum tokens allowed in the completion.
|
|
19
|
+
"""
|
|
20
|
+
def __init__(self, **kwargs):
|
|
21
|
+
super().__init__(**kwargs)
|
|
22
|
+
self.model = kwargs.get('model') or os.environ.get('GROK_MODEL')
|
|
23
|
+
self.base_url = kwargs.get('base_url') or "https://api.groq.com/openai/v1"
|
|
24
|
+
self.api_key = kwargs.get('api_key') or os.environ.get('GROQ_API_KEY')
|
|
25
|
+
self.max_tokens = kwargs.get('max_tokens') or 1024
|
|
26
|
+
|
|
27
|
+
if not self.api_key:
|
|
28
|
+
raise ValueError("Groq API key not set")
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def endpoint_path(self) -> str:
|
|
32
|
+
"""
|
|
33
|
+
API-specific endpoint path for chat completions.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
str: "/chat/completions"
|
|
37
|
+
"""
|
|
38
|
+
return "/chat/completions"
|
|
39
|
+
|
|
40
|
+
def _build_endpoint(self) -> str:
|
|
41
|
+
"""
|
|
42
|
+
Construct the full API endpoint URL.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
str: Concatenation of base_url and endpoint_path.
|
|
46
|
+
"""
|
|
47
|
+
return f"{self.base_url}/{self.endpoint_path.lstrip('/')}"
|
|
48
|
+
|
|
49
|
+
def _build_headers(self) -> Dict[str, str]:
|
|
50
|
+
"""
|
|
51
|
+
Build HTTP headers for the Groq API request.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Dict[str, str]: Headers with authentication and content type.
|
|
55
|
+
"""
|
|
56
|
+
return {
|
|
57
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
58
|
+
"Content-Type": "application/json",
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
def _build_payload(self, message: str) -> Dict[str, Any]:
|
|
62
|
+
"""
|
|
63
|
+
Construct the JSON payload for the Groq Chat Completions API.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
message (str): User input or prompt to evaluate.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Dict[str, Any]: Payload containing model ID, messages, and token limit.
|
|
70
|
+
"""
|
|
71
|
+
return {
|
|
72
|
+
"model": self.model,
|
|
73
|
+
"messages": [{"role": "user", "content": message}],
|
|
74
|
+
"max_tokens": self.max_tokens,
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
def parse_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
|
78
|
+
"""
|
|
79
|
+
Parse and normalize the Groq API response.
|
|
80
|
+
|
|
81
|
+
- Extracts text output from `choices[0].message.content`.
|
|
82
|
+
- Attempts to JSON-parse the result if it contains structured content.
|
|
83
|
+
- Collects token usage metadata from `usage`.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
response (Dict[str, Any]): Raw JSON response from Groq.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
Dict[str, Any]: {
|
|
90
|
+
"output": Parsed model output (dict or str),
|
|
91
|
+
"metadata": {
|
|
92
|
+
"input_tokens": int,
|
|
93
|
+
"output_tokens": int
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
"""
|
|
97
|
+
input_tokens = response.get("usage", {}).get("prompt_tokens", 0)
|
|
98
|
+
output_tokens = response.get("usage", {}).get("completion_tokens", 0)
|
|
99
|
+
output = response.get("choices", [{}])[0].get("message", {}).get("content", "")
|
|
100
|
+
parsed = self.sanitizer.safe_load_json(text=output)
|
|
101
|
+
return {"output": parsed, "metadata": {"input_tokens": input_tokens, "output_tokens": output_tokens}}
|