auditi 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auditi/__init__.py +47 -0
- auditi/client.py +76 -0
- auditi/context.py +71 -0
- auditi/decorators.py +1441 -0
- auditi/evaluator.py +38 -0
- auditi/events.py +194 -0
- auditi/providers/__init__.py +41 -0
- auditi/providers/anthropic.py +141 -0
- auditi/providers/base.py +156 -0
- auditi/providers/google.py +182 -0
- auditi/providers/openai.py +147 -0
- auditi/providers/registry.py +166 -0
- auditi/transport.py +78 -0
- auditi/types/__init__.py +12 -0
- auditi/types/api_types.py +107 -0
- auditi-0.1.0.dist-info/METADATA +703 -0
- auditi-0.1.0.dist-info/RECORD +19 -0
- auditi-0.1.0.dist-info/WHEEL +4 -0
- auditi-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Google Gemini provider implementation for usage extraction and cost calculation.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Optional, Any, Dict, Tuple
|
|
6
|
+
from .base import BaseProvider
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _coerce_int(value: Any) -> Optional[int]:
|
|
10
|
+
"""Helper to safely convert values to int."""
|
|
11
|
+
if value is None:
|
|
12
|
+
return None
|
|
13
|
+
try:
|
|
14
|
+
return int(value)
|
|
15
|
+
except (TypeError, ValueError):
|
|
16
|
+
return None
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class GoogleProvider(BaseProvider):
|
|
20
|
+
"""Provider implementation for Google Gemini models."""
|
|
21
|
+
|
|
22
|
+
@property
|
|
23
|
+
def name(self) -> str:
|
|
24
|
+
return "google"
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def model_pricing(self) -> Dict[str, Tuple[float, float]]:
|
|
28
|
+
"""
|
|
29
|
+
Google Gemini model pricing per 1M tokens (input, output) in USD.
|
|
30
|
+
Updated as of January 2025.
|
|
31
|
+
|
|
32
|
+
Note: Pricing may vary by context window size and features.
|
|
33
|
+
"""
|
|
34
|
+
return {
|
|
35
|
+
# Gemini 2.0 family
|
|
36
|
+
"gemini-2.0-flash-exp": (0.00, 0.00), # Free during preview
|
|
37
|
+
"gemini-2.0-flash": (0.10, 0.40),
|
|
38
|
+
"gemini-2.0-flash-thinking-exp": (0.00, 0.00), # Free during preview
|
|
39
|
+
# Gemini 1.5 family
|
|
40
|
+
"gemini-1.5-pro": (1.25, 5.00),
|
|
41
|
+
"gemini-1.5-pro-001": (1.25, 5.00),
|
|
42
|
+
"gemini-1.5-pro-002": (1.25, 5.00),
|
|
43
|
+
"gemini-1.5-pro-latest": (1.25, 5.00),
|
|
44
|
+
"gemini-1.5-flash": (0.075, 0.30),
|
|
45
|
+
"gemini-1.5-flash-001": (0.075, 0.30),
|
|
46
|
+
"gemini-1.5-flash-002": (0.075, 0.30),
|
|
47
|
+
"gemini-1.5-flash-latest": (0.075, 0.30),
|
|
48
|
+
"gemini-1.5-flash-8b": (0.0375, 0.15),
|
|
49
|
+
# Gemini 1.0 family (legacy)
|
|
50
|
+
"gemini-1.0-pro": (0.50, 1.50),
|
|
51
|
+
"gemini-1.0-pro-001": (0.50, 1.50),
|
|
52
|
+
"gemini-1.0-pro-latest": (0.50, 1.50),
|
|
53
|
+
"gemini-pro": (0.50, 1.50),
|
|
54
|
+
# Model variants with features
|
|
55
|
+
"gemini-1.5-pro-exp-0827": (1.25, 5.00),
|
|
56
|
+
"gemini-exp-1206": (0.00, 0.00), # Experimental, free
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
def get_default_pricing(self) -> Tuple[float, float]:
|
|
60
|
+
"""Conservative default for unknown Google models."""
|
|
61
|
+
return (1.25, 5.00) # Similar to Gemini 1.5 Pro
|
|
62
|
+
|
|
63
|
+
def get_model_prefixes(self) -> list[str]:
|
|
64
|
+
return ["gemini-", "gemini"]
|
|
65
|
+
|
|
66
|
+
def extract_usage(self, usage: Any) -> Tuple[Optional[int], Optional[int], Optional[int]]:
|
|
67
|
+
"""
|
|
68
|
+
Extract usage from Google Gemini response.
|
|
69
|
+
|
|
70
|
+
Google structure (note the different field names):
|
|
71
|
+
{
|
|
72
|
+
"usageMetadata": {
|
|
73
|
+
"promptTokenCount": 100,
|
|
74
|
+
"candidatesTokenCount": 50,
|
|
75
|
+
"totalTokenCount": 150
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
OR older structure:
|
|
80
|
+
{
|
|
81
|
+
"usage_metadata": {
|
|
82
|
+
"prompt_token_count": 100,
|
|
83
|
+
"candidates_token_count": 50,
|
|
84
|
+
"total_token_count": 150
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
"""
|
|
88
|
+
if usage is None:
|
|
89
|
+
return None, None, None
|
|
90
|
+
|
|
91
|
+
input_tokens = None
|
|
92
|
+
output_tokens = None
|
|
93
|
+
total_tokens = None
|
|
94
|
+
|
|
95
|
+
if isinstance(usage, dict):
|
|
96
|
+
# Try camelCase (newer API)
|
|
97
|
+
input_tokens = _coerce_int(usage.get("promptTokenCount"))
|
|
98
|
+
output_tokens = _coerce_int(usage.get("candidatesTokenCount"))
|
|
99
|
+
total_tokens = _coerce_int(usage.get("totalTokenCount"))
|
|
100
|
+
|
|
101
|
+
# Fallback to snake_case (older API)
|
|
102
|
+
if input_tokens is None:
|
|
103
|
+
input_tokens = _coerce_int(usage.get("prompt_token_count"))
|
|
104
|
+
if output_tokens is None:
|
|
105
|
+
output_tokens = _coerce_int(usage.get("candidates_token_count"))
|
|
106
|
+
if total_tokens is None:
|
|
107
|
+
total_tokens = _coerce_int(usage.get("total_token_count"))
|
|
108
|
+
else:
|
|
109
|
+
# Handle object attributes (camelCase)
|
|
110
|
+
input_tokens = _coerce_int(getattr(usage, "promptTokenCount", None))
|
|
111
|
+
output_tokens = _coerce_int(getattr(usage, "candidatesTokenCount", None))
|
|
112
|
+
total_tokens = _coerce_int(getattr(usage, "totalTokenCount", None))
|
|
113
|
+
|
|
114
|
+
# Fallback to snake_case attributes
|
|
115
|
+
if input_tokens is None:
|
|
116
|
+
input_tokens = _coerce_int(getattr(usage, "prompt_token_count", None))
|
|
117
|
+
if output_tokens is None:
|
|
118
|
+
output_tokens = _coerce_int(getattr(usage, "candidates_token_count", None))
|
|
119
|
+
if total_tokens is None:
|
|
120
|
+
total_tokens = _coerce_int(getattr(usage, "total_token_count", None))
|
|
121
|
+
|
|
122
|
+
# Calculate total if not provided
|
|
123
|
+
if total_tokens is None and (input_tokens is not None or output_tokens is not None):
|
|
124
|
+
total_tokens = (input_tokens or 0) + (output_tokens or 0)
|
|
125
|
+
|
|
126
|
+
return input_tokens, output_tokens, total_tokens
|
|
127
|
+
|
|
128
|
+
def extract_model(self, response: Any) -> Optional[str]:
|
|
129
|
+
"""Extract model name from Google response."""
|
|
130
|
+
if response is None:
|
|
131
|
+
return None
|
|
132
|
+
|
|
133
|
+
# Try dict access
|
|
134
|
+
if isinstance(response, dict):
|
|
135
|
+
# Try common locations
|
|
136
|
+
model = response.get("model") or response.get("modelVersion")
|
|
137
|
+
if model:
|
|
138
|
+
return model
|
|
139
|
+
|
|
140
|
+
# Try object attribute
|
|
141
|
+
if hasattr(response, "model"):
|
|
142
|
+
return str(response.model)
|
|
143
|
+
if hasattr(response, "model_version"):
|
|
144
|
+
return str(response.model_version)
|
|
145
|
+
|
|
146
|
+
return None
|
|
147
|
+
|
|
148
|
+
def matches_response(self, response: Any) -> bool:
|
|
149
|
+
"""
|
|
150
|
+
Detect Google Gemini responses by structure.
|
|
151
|
+
|
|
152
|
+
Gemini responses typically have:
|
|
153
|
+
- 'candidates' array (not 'choices')
|
|
154
|
+
- 'usageMetadata' or 'usage_metadata' (not 'usage')
|
|
155
|
+
- 'promptTokenCount' style fields
|
|
156
|
+
"""
|
|
157
|
+
if response is None:
|
|
158
|
+
return False
|
|
159
|
+
|
|
160
|
+
# Check for Gemini-specific structure
|
|
161
|
+
if isinstance(response, dict):
|
|
162
|
+
has_candidates = "candidates" in response
|
|
163
|
+
has_gemini_usage = "usageMetadata" in response or "usage_metadata" in response
|
|
164
|
+
|
|
165
|
+
# Check for Gemini-specific token counting fields
|
|
166
|
+
usage_metadata = response.get("usageMetadata") or response.get("usage_metadata")
|
|
167
|
+
if isinstance(usage_metadata, dict):
|
|
168
|
+
has_prompt_token_count = (
|
|
169
|
+
"promptTokenCount" in usage_metadata or "prompt_token_count" in usage_metadata
|
|
170
|
+
)
|
|
171
|
+
if has_prompt_token_count:
|
|
172
|
+
return True
|
|
173
|
+
|
|
174
|
+
if has_candidates or has_gemini_usage:
|
|
175
|
+
return True
|
|
176
|
+
elif hasattr(response, "candidates"):
|
|
177
|
+
return True
|
|
178
|
+
elif hasattr(response, "usage_metadata") or hasattr(response, "usageMetadata"):
|
|
179
|
+
return True
|
|
180
|
+
|
|
181
|
+
# Fallback to model prefix matching
|
|
182
|
+
return super().matches_response(response)
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI provider implementation for usage extraction and cost calculation.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Optional, Any, Dict, Tuple
|
|
6
|
+
from .base import BaseProvider
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _coerce_int(value: Any) -> Optional[int]:
|
|
10
|
+
"""Helper to safely convert values to int."""
|
|
11
|
+
if value is None:
|
|
12
|
+
return None
|
|
13
|
+
try:
|
|
14
|
+
return int(value)
|
|
15
|
+
except (TypeError, ValueError):
|
|
16
|
+
return None
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class OpenAIProvider(BaseProvider):
|
|
20
|
+
"""Provider implementation for OpenAI models."""
|
|
21
|
+
|
|
22
|
+
@property
|
|
23
|
+
def name(self) -> str:
|
|
24
|
+
return "openai"
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def model_pricing(self) -> Dict[str, Tuple[float, float]]:
|
|
28
|
+
"""
|
|
29
|
+
OpenAI model pricing per 1M tokens (input, output) in USD.
|
|
30
|
+
Updated as of January 2025.
|
|
31
|
+
"""
|
|
32
|
+
return {
|
|
33
|
+
# GPT-4o family
|
|
34
|
+
"gpt-4o": (2.50, 10.00),
|
|
35
|
+
"gpt-4o-2024-11-20": (2.50, 10.00),
|
|
36
|
+
"gpt-4o-2024-08-06": (2.50, 10.00),
|
|
37
|
+
"gpt-4o-2024-05-13": (5.00, 15.00),
|
|
38
|
+
"gpt-4o-mini": (0.15, 0.60),
|
|
39
|
+
"gpt-4o-mini-2024-07-18": (0.15, 0.60),
|
|
40
|
+
# GPT-4 Turbo
|
|
41
|
+
"gpt-4-turbo": (10.00, 30.00),
|
|
42
|
+
"gpt-4-turbo-2024-04-09": (10.00, 30.00),
|
|
43
|
+
"gpt-4-turbo-preview": (10.00, 30.00),
|
|
44
|
+
"gpt-4-0125-preview": (10.00, 30.00),
|
|
45
|
+
"gpt-4-1106-preview": (10.00, 30.00),
|
|
46
|
+
# GPT-4 base
|
|
47
|
+
"gpt-4": (30.00, 60.00),
|
|
48
|
+
"gpt-4-0613": (30.00, 60.00),
|
|
49
|
+
"gpt-4-32k": (60.00, 120.00),
|
|
50
|
+
"gpt-4-32k-0613": (60.00, 120.00),
|
|
51
|
+
# GPT-3.5 Turbo
|
|
52
|
+
"gpt-3.5-turbo": (0.50, 1.50),
|
|
53
|
+
"gpt-3.5-turbo-0125": (0.50, 1.50),
|
|
54
|
+
"gpt-3.5-turbo-1106": (1.00, 2.00),
|
|
55
|
+
"gpt-3.5-turbo-16k": (3.00, 4.00),
|
|
56
|
+
# o1 models
|
|
57
|
+
"o1-preview": (15.00, 60.00),
|
|
58
|
+
"o1-preview-2024-09-12": (15.00, 60.00),
|
|
59
|
+
"o1-mini": (3.00, 12.00),
|
|
60
|
+
"o1-mini-2024-09-12": (3.00, 12.00),
|
|
61
|
+
"o1": (15.00, 60.00),
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
def get_default_pricing(self) -> Tuple[float, float]:
|
|
65
|
+
"""Conservative default for unknown OpenAI models."""
|
|
66
|
+
return (10.00, 30.00) # Similar to GPT-4 Turbo
|
|
67
|
+
|
|
68
|
+
def get_model_prefixes(self) -> list[str]:
|
|
69
|
+
return ["gpt-", "o1-", "o1"]
|
|
70
|
+
|
|
71
|
+
def extract_usage(self, usage: Any) -> Tuple[Optional[int], Optional[int], Optional[int]]:
|
|
72
|
+
"""
|
|
73
|
+
Extract usage from OpenAI response.
|
|
74
|
+
|
|
75
|
+
OpenAI structure:
|
|
76
|
+
{
|
|
77
|
+
"usage": {
|
|
78
|
+
"prompt_tokens": 100,
|
|
79
|
+
"completion_tokens": 50,
|
|
80
|
+
"total_tokens": 150
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
"""
|
|
84
|
+
if usage is None:
|
|
85
|
+
return None, None, None
|
|
86
|
+
|
|
87
|
+
input_tokens = None
|
|
88
|
+
output_tokens = None
|
|
89
|
+
total_tokens = None
|
|
90
|
+
|
|
91
|
+
if isinstance(usage, dict):
|
|
92
|
+
input_tokens = _coerce_int(usage.get("prompt_tokens"))
|
|
93
|
+
output_tokens = _coerce_int(usage.get("completion_tokens"))
|
|
94
|
+
total_tokens = _coerce_int(usage.get("total_tokens"))
|
|
95
|
+
else:
|
|
96
|
+
# Handle object attributes
|
|
97
|
+
input_tokens = _coerce_int(getattr(usage, "prompt_tokens", None))
|
|
98
|
+
output_tokens = _coerce_int(getattr(usage, "completion_tokens", None))
|
|
99
|
+
total_tokens = _coerce_int(getattr(usage, "total_tokens", None))
|
|
100
|
+
|
|
101
|
+
# Calculate total if not provided
|
|
102
|
+
if total_tokens is None and (input_tokens is not None or output_tokens is not None):
|
|
103
|
+
total_tokens = (input_tokens or 0) + (output_tokens or 0)
|
|
104
|
+
|
|
105
|
+
return input_tokens, output_tokens, total_tokens
|
|
106
|
+
|
|
107
|
+
def extract_model(self, response: Any) -> Optional[str]:
|
|
108
|
+
"""Extract model name from OpenAI response."""
|
|
109
|
+
if response is None:
|
|
110
|
+
return None
|
|
111
|
+
|
|
112
|
+
# Try dict access
|
|
113
|
+
if isinstance(response, dict):
|
|
114
|
+
return response.get("model")
|
|
115
|
+
|
|
116
|
+
# Try object attribute
|
|
117
|
+
if hasattr(response, "model"):
|
|
118
|
+
return str(response.model)
|
|
119
|
+
|
|
120
|
+
return None
|
|
121
|
+
|
|
122
|
+
def matches_response(self, response: Any) -> bool:
|
|
123
|
+
"""
|
|
124
|
+
Detect OpenAI responses by structure.
|
|
125
|
+
|
|
126
|
+
OpenAI responses typically have:
|
|
127
|
+
- 'choices' array with 'message' or 'text'
|
|
128
|
+
- 'usage' with 'prompt_tokens' and 'completion_tokens'
|
|
129
|
+
"""
|
|
130
|
+
if response is None:
|
|
131
|
+
return False
|
|
132
|
+
|
|
133
|
+
# Check for OpenAI-specific structure
|
|
134
|
+
if isinstance(response, dict):
|
|
135
|
+
has_choices = "choices" in response
|
|
136
|
+
has_openai_usage = (
|
|
137
|
+
"usage" in response
|
|
138
|
+
and isinstance(response.get("usage"), dict)
|
|
139
|
+
and "prompt_tokens" in response.get("usage", {})
|
|
140
|
+
)
|
|
141
|
+
if has_choices or has_openai_usage:
|
|
142
|
+
return True
|
|
143
|
+
elif hasattr(response, "choices") and hasattr(response, "usage"):
|
|
144
|
+
return True
|
|
145
|
+
|
|
146
|
+
# Fallback to model prefix matching
|
|
147
|
+
return super().matches_response(response)
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Provider registry for auto-detecting and managing LLM providers.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Optional, Any, List
|
|
6
|
+
from .base import BaseProvider
|
|
7
|
+
from .openai import OpenAIProvider
|
|
8
|
+
from .anthropic import AnthropicProvider
|
|
9
|
+
from .google import GoogleProvider
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ProviderRegistry:
|
|
13
|
+
"""
|
|
14
|
+
Central registry for LLM providers.
|
|
15
|
+
|
|
16
|
+
Handles:
|
|
17
|
+
- Provider registration
|
|
18
|
+
- Auto-detection from model names
|
|
19
|
+
- Auto-detection from response structures
|
|
20
|
+
- Fallback to generic provider
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self):
|
|
24
|
+
self._providers: List[BaseProvider] = []
|
|
25
|
+
self._default_provider: Optional[BaseProvider] = None
|
|
26
|
+
|
|
27
|
+
# Register built-in providers
|
|
28
|
+
self.register(OpenAIProvider())
|
|
29
|
+
self.register(AnthropicProvider())
|
|
30
|
+
self.register(GoogleProvider())
|
|
31
|
+
|
|
32
|
+
# Set OpenAI as default fallback (most common format)
|
|
33
|
+
self._default_provider = self.get_provider("openai")
|
|
34
|
+
|
|
35
|
+
def register(self, provider: BaseProvider) -> None:
|
|
36
|
+
"""Register a new provider."""
|
|
37
|
+
self._providers.append(provider)
|
|
38
|
+
|
|
39
|
+
def get_provider(self, name: str) -> Optional[BaseProvider]:
|
|
40
|
+
"""Get provider by name."""
|
|
41
|
+
for provider in self._providers:
|
|
42
|
+
if provider.name == name:
|
|
43
|
+
return provider
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
def detect_from_model(self, model: Optional[str]) -> Optional[BaseProvider]:
|
|
47
|
+
"""
|
|
48
|
+
Detect provider from model name.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
model: Model name string (e.g., "gpt-4", "claude-3-opus-20240229")
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Matching provider, or None if no match
|
|
55
|
+
|
|
56
|
+
Example:
|
|
57
|
+
>>> registry.detect_from_model("gpt-4o")
|
|
58
|
+
OpenAIProvider()
|
|
59
|
+
>>> registry.detect_from_model("claude-3-5-sonnet-20241022")
|
|
60
|
+
AnthropicProvider()
|
|
61
|
+
"""
|
|
62
|
+
if not model:
|
|
63
|
+
return None
|
|
64
|
+
|
|
65
|
+
for provider in self._providers:
|
|
66
|
+
if provider.matches_model(model):
|
|
67
|
+
return provider
|
|
68
|
+
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
def detect_from_response(self, response: Any) -> Optional[BaseProvider]:
|
|
72
|
+
"""
|
|
73
|
+
Detect provider from response structure.
|
|
74
|
+
|
|
75
|
+
This is useful when the model name is not known but we have
|
|
76
|
+
the API response object.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
response: Raw API response object or dict
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
Matching provider, or None if no match
|
|
83
|
+
|
|
84
|
+
Example:
|
|
85
|
+
>>> response = {"choices": [...], "usage": {"prompt_tokens": 10}}
|
|
86
|
+
>>> registry.detect_from_response(response)
|
|
87
|
+
OpenAIProvider()
|
|
88
|
+
"""
|
|
89
|
+
if response is None:
|
|
90
|
+
return None
|
|
91
|
+
|
|
92
|
+
# Try to extract model first (fastest method)
|
|
93
|
+
for provider in self._providers:
|
|
94
|
+
model = provider.extract_model(response)
|
|
95
|
+
if model and provider.matches_model(model):
|
|
96
|
+
return provider
|
|
97
|
+
|
|
98
|
+
# Fall back to structure detection
|
|
99
|
+
for provider in self._providers:
|
|
100
|
+
if provider.matches_response(response):
|
|
101
|
+
return provider
|
|
102
|
+
|
|
103
|
+
return None
|
|
104
|
+
|
|
105
|
+
def get_provider_or_default(
|
|
106
|
+
self, model: Optional[str] = None, response: Any = None
|
|
107
|
+
) -> BaseProvider:
|
|
108
|
+
"""
|
|
109
|
+
Get provider with fallback to default.
|
|
110
|
+
|
|
111
|
+
Tries in order:
|
|
112
|
+
1. Detect from model name
|
|
113
|
+
2. Detect from response structure
|
|
114
|
+
3. Use default provider
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
model: Optional model name
|
|
118
|
+
response: Optional API response
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Provider (never None, returns default if detection fails)
|
|
122
|
+
"""
|
|
123
|
+
# Try model detection first
|
|
124
|
+
if model:
|
|
125
|
+
provider = self.detect_from_model(model)
|
|
126
|
+
if provider:
|
|
127
|
+
return provider
|
|
128
|
+
|
|
129
|
+
# Try response detection
|
|
130
|
+
if response:
|
|
131
|
+
provider = self.detect_from_response(response)
|
|
132
|
+
if provider:
|
|
133
|
+
return provider
|
|
134
|
+
|
|
135
|
+
# Fallback to default
|
|
136
|
+
return self._default_provider or self._providers[0]
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
# Global singleton registry
|
|
140
|
+
_registry = ProviderRegistry()
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def get_registry() -> ProviderRegistry:
|
|
144
|
+
"""Get the global provider registry."""
|
|
145
|
+
return _registry
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def detect_provider(model: Optional[str] = None, response: Any = None) -> BaseProvider:
|
|
149
|
+
"""
|
|
150
|
+
Detect provider from model name or response.
|
|
151
|
+
|
|
152
|
+
This is the main entry point for provider detection.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
model: Optional model name
|
|
156
|
+
response: Optional API response
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
Detected provider (falls back to default if detection fails)
|
|
160
|
+
|
|
161
|
+
Example:
|
|
162
|
+
>>> provider = detect_provider(model="gpt-4o")
|
|
163
|
+
>>> input_tokens, output_tokens, total = provider.extract_usage(usage)
|
|
164
|
+
>>> cost = provider.calculate_cost(model, input_tokens, output_tokens)
|
|
165
|
+
"""
|
|
166
|
+
return _registry.get_provider_or_default(model=model, response=response)
|
auditi/transport.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Transport layer for sending traces to the Auditi platform.
|
|
3
|
+
|
|
4
|
+
Provides different transport implementations for various use cases:
|
|
5
|
+
- SyncHttpTransport: Synchronous HTTP transport (default)
|
|
6
|
+
- DebugTransport: Debug transport that prints to console
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import abc
|
|
10
|
+
import httpx
|
|
11
|
+
from typing import Any, Dict
|
|
12
|
+
import logging
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger("auditi")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class BaseTransport(abc.ABC):
|
|
18
|
+
"""
|
|
19
|
+
Abstract base class for transport implementations.
|
|
20
|
+
Subclass this to create custom transport mechanisms.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
@abc.abstractmethod
|
|
24
|
+
def send_trace(self, trace_data: Dict[str, Any]) -> None:
|
|
25
|
+
"""
|
|
26
|
+
Send trace data to the Auditi platform.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
trace_data: Serialized trace data as a dictionary
|
|
30
|
+
"""
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class SyncHttpTransport(BaseTransport):
|
|
35
|
+
"""
|
|
36
|
+
Synchronous HTTP transport using httpx.
|
|
37
|
+
Sends traces immediately via HTTP POST.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self, base_url: str, api_key: str = None):
|
|
41
|
+
"""
|
|
42
|
+
Initialize the transport.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
base_url: Base URL of the Auditi API
|
|
46
|
+
api_key: API key for authentication (optional)
|
|
47
|
+
"""
|
|
48
|
+
self.base_url = base_url.rstrip("/")
|
|
49
|
+
self.api_key = api_key
|
|
50
|
+
self.headers = {
|
|
51
|
+
"Content-Type": "application/json",
|
|
52
|
+
}
|
|
53
|
+
if api_key:
|
|
54
|
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
55
|
+
|
|
56
|
+
def send_trace(self, trace_data: Dict[str, Any]) -> None:
|
|
57
|
+
"""Send trace data via HTTP POST."""
|
|
58
|
+
url = f"{self.base_url}/api/v1/ingest"
|
|
59
|
+
try:
|
|
60
|
+
with httpx.Client() as client:
|
|
61
|
+
response = client.post(url, json=trace_data, headers=self.headers, timeout=5.0)
|
|
62
|
+
response.raise_for_status()
|
|
63
|
+
logger.debug(f"Trace sent successfully: {trace_data.get('id')}")
|
|
64
|
+
except Exception as e:
|
|
65
|
+
logger.error(f"Failed to send trace to Auditi: {e}")
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class DebugTransport(BaseTransport):
|
|
69
|
+
"""
|
|
70
|
+
Debug transport that prints trace data to console.
|
|
71
|
+
Useful for local development and testing.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
def send_trace(self, trace_data: Dict[str, Any]) -> None:
|
|
75
|
+
"""Print trace data to console."""
|
|
76
|
+
trace_id = trace_data.get("id", "unknown")
|
|
77
|
+
spans_count = len(trace_data.get("spans", []))
|
|
78
|
+
print(f"[Auditi] Trace captured: {trace_id} ({spans_count} spans)")
|
auditi/types/__init__.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pydantic models for Auditi SDK API types.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field, field_validator
|
|
6
|
+
from typing import List, Optional, Dict, Any
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from uuid import UUID
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SpanInput(BaseModel):
|
|
12
|
+
"""
|
|
13
|
+
Represents a single operation (tool call, LLM call) within a trace.
|
|
14
|
+
|
|
15
|
+
UPDATED: Added processing_time field for performance tracking.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
id: UUID
|
|
19
|
+
trace_id: UUID
|
|
20
|
+
parent_id: Optional[UUID] = None
|
|
21
|
+
name: str
|
|
22
|
+
span_type: str # "tool", "llm", "retrieval", etc.
|
|
23
|
+
start_time: datetime
|
|
24
|
+
end_time: Optional[datetime] = None
|
|
25
|
+
processing_time: Optional[float] = None # NEW: Duration in seconds
|
|
26
|
+
|
|
27
|
+
# Input/Output
|
|
28
|
+
inputs: Optional[Dict[str, Any]] = None
|
|
29
|
+
outputs: Optional[str] = None
|
|
30
|
+
|
|
31
|
+
# LLM specific
|
|
32
|
+
model: Optional[str] = None
|
|
33
|
+
input_tokens: Optional[int] = None
|
|
34
|
+
output_tokens: Optional[int] = None
|
|
35
|
+
tokens: Optional[int] = None
|
|
36
|
+
cost: Optional[float] = None
|
|
37
|
+
|
|
38
|
+
# Status
|
|
39
|
+
status: Optional[str] = None # "ok", "error"
|
|
40
|
+
error: Optional[str] = None
|
|
41
|
+
|
|
42
|
+
# Metadata
|
|
43
|
+
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class TraceInput(BaseModel):
|
|
47
|
+
"""
|
|
48
|
+
Represents a complete agent interaction with user.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
id: UUID
|
|
52
|
+
user_id: Optional[str] = None
|
|
53
|
+
conversation_id: Optional[str] = None
|
|
54
|
+
|
|
55
|
+
# Timing
|
|
56
|
+
start_time: datetime
|
|
57
|
+
end_time: Optional[datetime] = None
|
|
58
|
+
|
|
59
|
+
# Content
|
|
60
|
+
# Content
|
|
61
|
+
name: str
|
|
62
|
+
user_input: str = ""
|
|
63
|
+
assistant_output: Optional[str] = None
|
|
64
|
+
|
|
65
|
+
# Metrics
|
|
66
|
+
total_tokens: Optional[int] = None
|
|
67
|
+
cost: Optional[float] = None
|
|
68
|
+
|
|
69
|
+
# Evaluation
|
|
70
|
+
status: Optional[str] = None # "pass", "fail", "review", "pending"
|
|
71
|
+
score: Optional[float] = None
|
|
72
|
+
failure_mode: Optional[str] = None
|
|
73
|
+
eval_reason: Optional[str] = None
|
|
74
|
+
|
|
75
|
+
# Relations
|
|
76
|
+
spans: List[SpanInput] = Field(default_factory=list)
|
|
77
|
+
|
|
78
|
+
# Metadata
|
|
79
|
+
tags: List[str] = Field(default_factory=list)
|
|
80
|
+
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
|
|
81
|
+
error: Optional[str] = None
|
|
82
|
+
|
|
83
|
+
@field_validator("user_input", mode="before")
|
|
84
|
+
def normalize_user_input(cls, v):
|
|
85
|
+
return v or ""
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class EvaluationResult(BaseModel):
|
|
89
|
+
"""
|
|
90
|
+
Result of evaluating a trace.
|
|
91
|
+
Contains pass/fail status, score, and optional failure information.
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
status: str # pass, fail
|
|
95
|
+
score: float
|
|
96
|
+
reason: Optional[str] = None
|
|
97
|
+
failure_mode: Optional[str] = None
|
|
98
|
+
recommended_action: Optional[str] = None
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class TraceResponse(BaseModel):
|
|
102
|
+
"""
|
|
103
|
+
Response from the Auditi API after ingesting a trace.
|
|
104
|
+
"""
|
|
105
|
+
|
|
106
|
+
success: bool
|
|
107
|
+
count: int
|