levelapp 0.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- levelapp/__init__.py +0 -0
- levelapp/aspects/__init__.py +8 -0
- levelapp/aspects/loader.py +253 -0
- levelapp/aspects/logger.py +59 -0
- levelapp/aspects/monitor.py +617 -0
- levelapp/aspects/sanitizer.py +168 -0
- levelapp/clients/__init__.py +122 -0
- levelapp/clients/anthropic.py +112 -0
- levelapp/clients/gemini.py +130 -0
- levelapp/clients/groq.py +101 -0
- levelapp/clients/huggingface.py +162 -0
- levelapp/clients/ionos.py +126 -0
- levelapp/clients/mistral.py +106 -0
- levelapp/clients/openai.py +116 -0
- levelapp/comparator/__init__.py +5 -0
- levelapp/comparator/comparator.py +232 -0
- levelapp/comparator/extractor.py +108 -0
- levelapp/comparator/schemas.py +61 -0
- levelapp/comparator/scorer.py +269 -0
- levelapp/comparator/utils.py +136 -0
- levelapp/config/__init__.py +5 -0
- levelapp/config/endpoint.py +199 -0
- levelapp/config/prompts.py +57 -0
- levelapp/core/__init__.py +0 -0
- levelapp/core/base.py +386 -0
- levelapp/core/schemas.py +24 -0
- levelapp/core/session.py +336 -0
- levelapp/endpoint/__init__.py +0 -0
- levelapp/endpoint/client.py +188 -0
- levelapp/endpoint/client_test.py +41 -0
- levelapp/endpoint/manager.py +114 -0
- levelapp/endpoint/parsers.py +119 -0
- levelapp/endpoint/schemas.py +38 -0
- levelapp/endpoint/tester.py +52 -0
- levelapp/evaluator/__init__.py +3 -0
- levelapp/evaluator/evaluator.py +307 -0
- levelapp/metrics/__init__.py +63 -0
- levelapp/metrics/embedding.py +56 -0
- levelapp/metrics/embeddings/__init__.py +0 -0
- levelapp/metrics/embeddings/sentence_transformer.py +30 -0
- levelapp/metrics/embeddings/torch_based.py +56 -0
- levelapp/metrics/exact.py +182 -0
- levelapp/metrics/fuzzy.py +80 -0
- levelapp/metrics/token.py +103 -0
- levelapp/plugins/__init__.py +0 -0
- levelapp/repository/__init__.py +3 -0
- levelapp/repository/filesystem.py +203 -0
- levelapp/repository/firestore.py +291 -0
- levelapp/simulator/__init__.py +3 -0
- levelapp/simulator/schemas.py +116 -0
- levelapp/simulator/simulator.py +531 -0
- levelapp/simulator/utils.py +134 -0
- levelapp/visualization/__init__.py +7 -0
- levelapp/visualization/charts.py +358 -0
- levelapp/visualization/dashboard.py +240 -0
- levelapp/visualization/exporter.py +167 -0
- levelapp/visualization/templates/base.html +158 -0
- levelapp/visualization/templates/comparator_dashboard.html +57 -0
- levelapp/visualization/templates/simulator_dashboard.html +111 -0
- levelapp/workflow/__init__.py +6 -0
- levelapp/workflow/base.py +192 -0
- levelapp/workflow/config.py +96 -0
- levelapp/workflow/context.py +64 -0
- levelapp/workflow/factory.py +42 -0
- levelapp/workflow/registration.py +6 -0
- levelapp/workflow/runtime.py +19 -0
- levelapp-0.1.15.dist-info/METADATA +571 -0
- levelapp-0.1.15.dist-info/RECORD +70 -0
- levelapp-0.1.15.dist-info/WHEEL +4 -0
- levelapp-0.1.15.dist-info/licenses/LICENSE +0 -0
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
""" levelapp/clients/huggingface.py"""
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from typing import List, Dict, Any
|
|
5
|
+
|
|
6
|
+
from levelapp.core.base import BaseChatClient
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class HuggingFaceClient(BaseChatClient):
|
|
10
|
+
"""
|
|
11
|
+
Client for interacting with HuggingFace's Chat Completions API.
|
|
12
|
+
|
|
13
|
+
This implementation adapts requests and responses to the HuggingFace Router API format,
|
|
14
|
+
which is OpenAI-compatible but with HuggingFace-specific endpoint.
|
|
15
|
+
|
|
16
|
+
Attributes:
|
|
17
|
+
model (str): Target model ID (default: "openai/gpt-oss-120b:fastest").
|
|
18
|
+
base_url (str): Base endpoint for HuggingFace API (default: "https://huggingface.co/v1").
|
|
19
|
+
api_key (str): Authentication token for the HuggingFace API
|
|
20
|
+
max_tokens (int): Maximum tokens allowed in the completion.
|
|
21
|
+
"""
|
|
22
|
+
SUPPORTED_PROVIDERS: List[str] = [
|
|
23
|
+
"cerebras",
|
|
24
|
+
"cohere",
|
|
25
|
+
"featherless-ai",
|
|
26
|
+
"fireworks-ai",
|
|
27
|
+
"groq",
|
|
28
|
+
"hf-inference",
|
|
29
|
+
"hyperbolic",
|
|
30
|
+
"nebius",
|
|
31
|
+
"novita",
|
|
32
|
+
"nscale",
|
|
33
|
+
"ovhcloud",
|
|
34
|
+
"publicai",
|
|
35
|
+
"sambanova",
|
|
36
|
+
"scaleway",
|
|
37
|
+
"together",
|
|
38
|
+
"zai-org"
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
def __init__(self, **kwargs):
|
|
42
|
+
super().__init__(**kwargs)
|
|
43
|
+
self.model = kwargs.get("model") or "openai/gpt-oss-120b"
|
|
44
|
+
self.base_url = kwargs.get("base_url") or "https://router.huggingface.co/v1"
|
|
45
|
+
self.api_key = kwargs.get("api_key") or os.getenv("HF_TOKEN")
|
|
46
|
+
self.max_tokens = kwargs.get("max_tokens") or 1024
|
|
47
|
+
|
|
48
|
+
_provider = os.getenv("HUGGINGFACE_PROVIDER")
|
|
49
|
+
self.provider = kwargs.get("provider") or _provider or "auto"
|
|
50
|
+
|
|
51
|
+
self._validate_provider(self.provider)
|
|
52
|
+
|
|
53
|
+
if not self.api_key:
|
|
54
|
+
raise ValueError("HuggingFace API token not set (HF_TOKEN env var).")
|
|
55
|
+
|
|
56
|
+
def _validate_provider(self, provider):
|
|
57
|
+
"""
|
|
58
|
+
Validate that the provided provider string is supported.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
provider (str): The provider string to validate.
|
|
62
|
+
|
|
63
|
+
Raises:
|
|
64
|
+
ValueError: If the provider is not in the SUPPORTED_PROVIDERS list.
|
|
65
|
+
"""
|
|
66
|
+
if provider not in self.SUPPORTED_PROVIDERS:
|
|
67
|
+
supported_str = ", ".join(self.SUPPORTED_PROVIDERS)
|
|
68
|
+
raise ValueError(
|
|
69
|
+
f"[HuggingFaceClient] Unsupported HuggingFace provider '{provider}'. "
|
|
70
|
+
f"Supported providers: {supported_str}."
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def endpoint_path(self) -> str:
|
|
75
|
+
"""
|
|
76
|
+
API-specific endpoint path for chat completions.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
str: "/chat/completions
|
|
80
|
+
"""
|
|
81
|
+
return "/chat/completions"
|
|
82
|
+
|
|
83
|
+
def _build_endpoint(self) -> str:
|
|
84
|
+
"""
|
|
85
|
+
Construct the full API endpoint URL.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
str: Concatenated API endpoint URL.
|
|
89
|
+
"""
|
|
90
|
+
return f"{self.base_url}/{self.endpoint_path.lstrip('/')}"
|
|
91
|
+
|
|
92
|
+
def _build_headers(self) -> Dict[str, str]:
|
|
93
|
+
"""
|
|
94
|
+
Build HTTP headers for the HuggingFace API request.
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Dict[str, str]: HTTP headers with authentication and content type.
|
|
98
|
+
"""
|
|
99
|
+
return {
|
|
100
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
101
|
+
"Content-Type": "application/json"
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
def _build_payload(self, message: str) -> Dict[str, Any]:
|
|
105
|
+
"""
|
|
106
|
+
Construct the JSON payload for the HuggingFace Chat Completions API.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
message (str): Message to send.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
Dict[str, Any]: JSON payload containing model ID and messages.
|
|
113
|
+
"""
|
|
114
|
+
model_with_provider = self.model
|
|
115
|
+
if self.provider and self.provider != "auto":
|
|
116
|
+
model_with_provider = f"{self.model}:{self.provider}"
|
|
117
|
+
|
|
118
|
+
payload = {
|
|
119
|
+
"model": model_with_provider,
|
|
120
|
+
"messages": [{"role": "user", "content": message}]
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
if self.max_tokens:
|
|
124
|
+
payload["max_tokens"] = self.max_tokens
|
|
125
|
+
|
|
126
|
+
return payload
|
|
127
|
+
|
|
128
|
+
def parse_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
|
129
|
+
"""
|
|
130
|
+
Parse and normalize the HuggingFace API response.
|
|
131
|
+
|
|
132
|
+
- Extracts text output from 'choices[0].message.content'.
|
|
133
|
+
- Attempts to JSON-parse the result if it contains structured content.
|
|
134
|
+
- Collects token usage metadata from 'usage'.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
response (Dict[str, Any]): Raw JSON response from HuggingFace.
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
Dict[str, Any]: {
|
|
141
|
+
"output": Parsed model output (dict or str),
|
|
142
|
+
"metadata": {
|
|
143
|
+
"input_tokens": int,
|
|
144
|
+
"output_tokens": int,
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
"""
|
|
148
|
+
usage = response.get("usage", {})
|
|
149
|
+
input_tokens = usage.get("prompt_tokens", 0)
|
|
150
|
+
output_tokens = usage.get("completion_tokens", 0)
|
|
151
|
+
|
|
152
|
+
choices = response.get("choices", [])
|
|
153
|
+
if choices:
|
|
154
|
+
message = choices[0].get("message", {})
|
|
155
|
+
output = message.get("content", "")
|
|
156
|
+
|
|
157
|
+
else:
|
|
158
|
+
output = ""
|
|
159
|
+
|
|
160
|
+
parsed = self.sanitizer.safe_load_json(text=output)
|
|
161
|
+
|
|
162
|
+
return {"output": parsed, "metadata": {"input_tokens": input_tokens, "output_tokens": output_tokens}}
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
"""levelapp/clients/ionos.py"""
|
|
2
|
+
import os
|
|
3
|
+
import uuid
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Any
|
|
6
|
+
from levelapp.core.base import BaseChatClient
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class IonosClient(BaseChatClient):
|
|
10
|
+
"""
|
|
11
|
+
Client for interacting with the IONOS LLM API.
|
|
12
|
+
|
|
13
|
+
This implementation adapts requests and responses to the IONOS
|
|
14
|
+
API format, including payload structure, headers, and response parsing.
|
|
15
|
+
|
|
16
|
+
Attributes:
|
|
17
|
+
model_id (str): Model identifier to target (from IONOS dashboard or env).
|
|
18
|
+
base_url (str): Base endpoint for IONOS API, e.g. https://api.ionos.ai.
|
|
19
|
+
api_key (str): Authentication token for the IONOS API.
|
|
20
|
+
top_k (int): Sampling parameter; number of top tokens to consider.
|
|
21
|
+
top_p (float): Sampling parameter; nucleus probability cutoff.
|
|
22
|
+
temperature (float): Sampling randomness.
|
|
23
|
+
max_tokens (int): Maximum tokens allowed in completion.
|
|
24
|
+
"""
|
|
25
|
+
def __init__(self, **kwargs):
|
|
26
|
+
super().__init__(**kwargs)
|
|
27
|
+
self.model_id = kwargs.get('model_id') or os.getenv('IONOS_MODEL_ID')
|
|
28
|
+
self.base_url = kwargs.get('base_url') or os.getenv("IONOS_BASE_URL")
|
|
29
|
+
self.api_key = kwargs.get('api_key') or os.environ.get("IONOS_API_KEY")
|
|
30
|
+
self.top_k = kwargs.get('top_k') or 5
|
|
31
|
+
self.top_p = kwargs.get('top_p') or 0.5
|
|
32
|
+
self.temperature = kwargs.get('temperature') or 0.0
|
|
33
|
+
self.max_tokens = kwargs.get('max_tokens') or 150
|
|
34
|
+
|
|
35
|
+
if not self.api_key:
|
|
36
|
+
raise ValueError("IONOS API key not set.")
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def endpoint_path(self) -> str:
|
|
40
|
+
"""
|
|
41
|
+
API-specific endpoint path for inference calls.
|
|
42
|
+
|
|
43
|
+
Example:
|
|
44
|
+
"models/{model_id}/predictions"
|
|
45
|
+
"""
|
|
46
|
+
return f"v1/chat/completions"
|
|
47
|
+
|
|
48
|
+
def _build_endpoint(self) -> str:
|
|
49
|
+
"""
|
|
50
|
+
Construct the full API endpoint URL.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
str: Concatenation of base_url and endpoint_path.
|
|
54
|
+
"""
|
|
55
|
+
return f"{self.base_url}/{self.endpoint_path.lstrip('/')}"
|
|
56
|
+
|
|
57
|
+
def _build_headers(self) -> Dict[str, str]:
|
|
58
|
+
"""
|
|
59
|
+
Build HTTP headers for the IONOS API request.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Dict[str, str]: Headers with authentication and content type.
|
|
63
|
+
"""
|
|
64
|
+
return {
|
|
65
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
66
|
+
"Content-Type": "application/json",
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
def _build_payload(self, message: str) -> Dict[str, Any]:
|
|
70
|
+
"""
|
|
71
|
+
Construct the JSON payload for the IONOS API.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
message (str): User input or prompt to evaluate.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
Dict[str, Any]: Payload containing properties and sampling options.
|
|
78
|
+
"""
|
|
79
|
+
return {
|
|
80
|
+
"model": self.model_id,
|
|
81
|
+
"messages": [
|
|
82
|
+
{"role": "user", "content": message}
|
|
83
|
+
],
|
|
84
|
+
"temperature": self.temperature,
|
|
85
|
+
"top_p": self.top_p,
|
|
86
|
+
"max_completion_tokens": self.max_tokens
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
def parse_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
|
90
|
+
"""
|
|
91
|
+
Parse and normalize the IONOS API response.
|
|
92
|
+
|
|
93
|
+
- Extracts model output from `properties.output`.
|
|
94
|
+
- Strips any code fences or formatting noise.
|
|
95
|
+
- Attempts to JSON-parse the result (safe fallback if invalid).
|
|
96
|
+
- Collects token usage metadata.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
response (Dict[str, Any]): Raw JSON response from IONOS.
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Dict[str, Any]: {
|
|
103
|
+
"output": Parsed model output (dict or str),
|
|
104
|
+
"metadata": {
|
|
105
|
+
"input_tokens": int,
|
|
106
|
+
"output_tokens": int
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
"""
|
|
110
|
+
message = response["choices"][0]["message"]["content"]
|
|
111
|
+
|
|
112
|
+
cleaned = self.sanitizer.strip_code_fences(message)
|
|
113
|
+
parsed = self.sanitizer.safe_load_json(text=cleaned)
|
|
114
|
+
|
|
115
|
+
if parsed is None:
|
|
116
|
+
parsed = cleaned
|
|
117
|
+
|
|
118
|
+
usage = response.get("usage", {})
|
|
119
|
+
|
|
120
|
+
return {
|
|
121
|
+
"output": parsed,
|
|
122
|
+
"metadata": {
|
|
123
|
+
"input_tokens": usage.get("prompt_tokens", -1),
|
|
124
|
+
"output_tokens": usage.get("completion_tokens", -1)
|
|
125
|
+
}
|
|
126
|
+
}
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
"""levelapp/clients/mistral.py"""
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from typing import Dict, Any
|
|
5
|
+
from levelapp.core.base import BaseChatClient
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class MistralClient(BaseChatClient):
|
|
9
|
+
"""
|
|
10
|
+
Client for interacting with the Mistral API.
|
|
11
|
+
|
|
12
|
+
This implementation adapts requests and responses to the Mistral API
|
|
13
|
+
format, handling authentication, request payload structure, and
|
|
14
|
+
response parsing into a normalized format.
|
|
15
|
+
|
|
16
|
+
Attributes:
|
|
17
|
+
model (str): Target model identifier (default: "mistral-large-latest").
|
|
18
|
+
base_url (str): Base endpoint for Mistral API (default: https://api.mistral.ai/v1).
|
|
19
|
+
api_key (str): Authentication token for Mistral API.
|
|
20
|
+
"""
|
|
21
|
+
def __init__(self, **kwargs):
|
|
22
|
+
super().__init__(**kwargs)
|
|
23
|
+
self.model = kwargs.get("model") or "mistral-large-latest"
|
|
24
|
+
self.base_url = kwargs.get('base_url') or "https://api.mistral.ai/v1"
|
|
25
|
+
self.api_key = kwargs.get('api_key') or os.environ.get('MISTRAL_API_KEY')
|
|
26
|
+
|
|
27
|
+
if not self.api_key:
|
|
28
|
+
raise ValueError("Missing API key not set.")
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def endpoint_path(self) -> str:
|
|
32
|
+
"""
|
|
33
|
+
API-specific endpoint path for chat completions.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
str: "/chat/completions"
|
|
37
|
+
"""
|
|
38
|
+
return "/chat/completions"
|
|
39
|
+
|
|
40
|
+
def _build_endpoint(self) -> str:
|
|
41
|
+
"""
|
|
42
|
+
Construct the full API endpoint URL.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
str: Concatenation of base_url and endpoint_path.
|
|
46
|
+
"""
|
|
47
|
+
return f"{self.base_url}/{self.endpoint_path.lstrip('/')}"
|
|
48
|
+
|
|
49
|
+
def _build_headers(self) -> Dict[str, str]:
|
|
50
|
+
"""
|
|
51
|
+
Build HTTP headers for the Mistral API request.
|
|
52
|
+
|
|
53
|
+
Required headers include:
|
|
54
|
+
- `Authorization`: Bearer token for API access.
|
|
55
|
+
- `Content-Type`: Always "application/json".
|
|
56
|
+
- `Accept`: Expected response format ("application/json").
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Dict[str, str]: Headers including authentication and content type.
|
|
60
|
+
"""
|
|
61
|
+
return {
|
|
62
|
+
"Content-Type": "application/json",
|
|
63
|
+
"Accept": "application/json",
|
|
64
|
+
"Authorization": f"Bearer {self.api_key}"
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
def _build_payload(self, message: str) -> Dict[str, Any]:
|
|
68
|
+
"""
|
|
69
|
+
Construct the JSON payload for the Mistral chat completions API.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
message (str): User input or prompt to evaluate.
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
Dict[str, Any]: Payload containing model ID and user message.
|
|
76
|
+
"""
|
|
77
|
+
return {
|
|
78
|
+
"model": self.model,
|
|
79
|
+
"messages": [{"role": "user", "content": message}],
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
def parse_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
|
83
|
+
"""
|
|
84
|
+
Parse and normalize the Mistral API response.
|
|
85
|
+
|
|
86
|
+
- Extracts text output from `choices[0].message.content`.
|
|
87
|
+
- Attempts to JSON-parse the output if structured data is detected.
|
|
88
|
+
- Collects token usage metadata from `usage`.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
response (Dict[str, Any]): Raw JSON response from Mistral API.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Dict[str, Any]: {
|
|
95
|
+
"output": Parsed model output (dict or str),
|
|
96
|
+
"metadata": {
|
|
97
|
+
"input_tokens": int,
|
|
98
|
+
"output_tokens": int
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
"""
|
|
102
|
+
input_tokens = response.get("usage", {}).get("prompt_tokens", 0)
|
|
103
|
+
output_tokens = response.get("usage", {}).get("completion_tokens", 0)
|
|
104
|
+
output = response.get("choices", [{}])[0].get("message", {}).get("content", "")
|
|
105
|
+
parsed = self.sanitizer.safe_load_json(text=output)
|
|
106
|
+
return {'output': parsed, 'metadata': {'input': input_tokens, 'output': output_tokens}}
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
"""levelapp/clients/openai.py"""
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from typing import Dict, Any
|
|
5
|
+
from levelapp.core.base import BaseChatClient
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class OpenAIClient(BaseChatClient):
|
|
9
|
+
"""
|
|
10
|
+
Client for interacting with OpenAI's Chat Completions API.
|
|
11
|
+
|
|
12
|
+
This implementation adapts requests and responses to the OpenAI API
|
|
13
|
+
format, including chat message structure, headers, and token usage reporting.
|
|
14
|
+
|
|
15
|
+
Attributes:
|
|
16
|
+
model (str): Target model ID (default: "gpt-4o-mini").
|
|
17
|
+
base_url (str): Base endpoint for OpenAI API (default: https://api.openai.com/v1).
|
|
18
|
+
api_key (str): Authentication token for the OpenAI API.
|
|
19
|
+
max_tokens (int): Maximum tokens allowed in the completion.
|
|
20
|
+
"""
|
|
21
|
+
def __init__(self, **kwargs):
|
|
22
|
+
super().__init__(**kwargs)
|
|
23
|
+
self.model = kwargs.get('model') or os.environ.get("OPENAI_MODEL")
|
|
24
|
+
self.base_url = kwargs.get('base_url') or "https://api.openai.com/v1"
|
|
25
|
+
self.api_key = kwargs.get('api_key') or os.environ.get('OPENAI_API_KEY')
|
|
26
|
+
self.max_tokens = kwargs.get('max_tokens') or 1024
|
|
27
|
+
|
|
28
|
+
if not self.api_key:
|
|
29
|
+
raise ValueError("OpenAI API key not set")
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def endpoint_path(self) -> str:
|
|
33
|
+
"""
|
|
34
|
+
API-specific endpoint path for chat completions.
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
str: "/chat/completions"
|
|
38
|
+
"""
|
|
39
|
+
return "/responses"
|
|
40
|
+
|
|
41
|
+
def _build_endpoint(self) -> str:
|
|
42
|
+
"""
|
|
43
|
+
Construct the full API endpoint URL.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
str: Concatenation of base_url and endpoint_path.
|
|
47
|
+
"""
|
|
48
|
+
return f"{self.base_url}/{self.endpoint_path.lstrip('/')}"
|
|
49
|
+
|
|
50
|
+
def _build_headers(self) -> Dict[str, str]:
|
|
51
|
+
"""
|
|
52
|
+
Build HTTP headers for the OpenAI API request.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Dict[str, str]: Headers with authentication and content type.
|
|
56
|
+
"""
|
|
57
|
+
return {
|
|
58
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
59
|
+
"Content-Type": "application/json",
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
def _build_payload(self, message: str) -> Dict[str, Any]:
|
|
63
|
+
"""
|
|
64
|
+
Construct the JSON payload for the OpenAI Chat Completions API.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
message (str): User input or prompt to evaluate.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
Dict[str, Any]: Payload containing model ID, messages, and token limit.
|
|
71
|
+
"""
|
|
72
|
+
return {
|
|
73
|
+
"model": self.model,
|
|
74
|
+
"input": message,
|
|
75
|
+
"max_output_tokens": self.max_tokens,
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
def parse_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
|
79
|
+
"""
|
|
80
|
+
Parse and normalize the OpenAI API response.
|
|
81
|
+
|
|
82
|
+
- Extracts text output from `choices[0].message.content`.
|
|
83
|
+
- Attempts to JSON-parse the result if it contains structured content.
|
|
84
|
+
- Collects token usage metadata from `usage`.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
response (Dict[str, Any]): Raw JSON response from OpenAI.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Dict[str, Any]: {
|
|
91
|
+
"output": Parsed model output (dict or str),
|
|
92
|
+
"metadata": {
|
|
93
|
+
"input_tokens": int,
|
|
94
|
+
"output_tokens": int
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
"""
|
|
98
|
+
usage = response.get("usage", {})
|
|
99
|
+
input_tokens = usage.get("input_tokens", 0)
|
|
100
|
+
output_tokens = usage.get("output_tokens", 0)
|
|
101
|
+
|
|
102
|
+
output_text = ""
|
|
103
|
+
for item in response.get("output", []):
|
|
104
|
+
for block in item.get("content", []):
|
|
105
|
+
if block.get("type") == "output_text":
|
|
106
|
+
output_text += block.get("text", "")
|
|
107
|
+
|
|
108
|
+
parsed = self.sanitizer.safe_load_json(text=output_text)
|
|
109
|
+
|
|
110
|
+
return {
|
|
111
|
+
"output": parsed,
|
|
112
|
+
"metadata": {
|
|
113
|
+
"input_tokens": input_tokens,
|
|
114
|
+
"output_tokens": output_tokens,
|
|
115
|
+
},
|
|
116
|
+
}
|