botanu 0.1.dev60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botanu/__init__.py +76 -0
- botanu/_version.py +13 -0
- botanu/integrations/__init__.py +4 -0
- botanu/integrations/tenacity.py +60 -0
- botanu/models/__init__.py +10 -0
- botanu/models/run_context.py +328 -0
- botanu/processors/__init__.py +12 -0
- botanu/processors/enricher.py +84 -0
- botanu/py.typed +0 -0
- botanu/resources/__init__.py +87 -0
- botanu/sdk/__init__.py +37 -0
- botanu/sdk/bootstrap.py +405 -0
- botanu/sdk/config.py +330 -0
- botanu/sdk/context.py +73 -0
- botanu/sdk/decorators.py +407 -0
- botanu/sdk/middleware.py +97 -0
- botanu/sdk/span_helpers.py +143 -0
- botanu/tracking/__init__.py +55 -0
- botanu/tracking/data.py +488 -0
- botanu/tracking/llm.py +700 -0
- botanu-0.1.dev60.dist-info/METADATA +208 -0
- botanu-0.1.dev60.dist-info/RECORD +25 -0
- botanu-0.1.dev60.dist-info/WHEEL +4 -0
- botanu-0.1.dev60.dist-info/licenses/LICENSE +200 -0
- botanu-0.1.dev60.dist-info/licenses/NOTICE +17 -0
botanu/tracking/llm.py
ADDED
|
@@ -0,0 +1,700 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: 2026 The Botanu Authors
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
"""LLM/Model Tracking — Track AI model usage for cost attribution.
|
|
5
|
+
|
|
6
|
+
Aligned with OpenTelemetry GenAI Semantic Conventions:
|
|
7
|
+
https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/
|
|
8
|
+
|
|
9
|
+
Usage::
|
|
10
|
+
|
|
11
|
+
from botanu.tracking.llm import track_llm_call, track_tool_call
|
|
12
|
+
|
|
13
|
+
with track_llm_call(vendor="openai", model="gpt-4") as tracker:
|
|
14
|
+
response = openai.chat.completions.create(...)
|
|
15
|
+
tracker.set_tokens(
|
|
16
|
+
input_tokens=response.usage.prompt_tokens,
|
|
17
|
+
output_tokens=response.usage.completion_tokens,
|
|
18
|
+
)
|
|
19
|
+
tracker.set_request_id(response.id)
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from __future__ import annotations
|
|
23
|
+
|
|
24
|
+
import contextvars
|
|
25
|
+
import functools
|
|
26
|
+
from contextlib import contextmanager
|
|
27
|
+
from dataclasses import dataclass, field
|
|
28
|
+
from datetime import datetime, timezone
|
|
29
|
+
from typing import Any, Dict, Generator, List, Optional
|
|
30
|
+
|
|
31
|
+
from opentelemetry import metrics, trace
|
|
32
|
+
from opentelemetry.trace import Span, SpanKind, Status, StatusCode
|
|
33
|
+
|
|
34
|
+
# Context variable for automatic retry detection (set by tenacity integration).
|
|
35
|
+
# Default 0 means "not set by retry callback"; 1+ means the attempt number.
|
|
36
|
+
_retry_attempt: contextvars.ContextVar[int] = contextvars.ContextVar(
|
|
37
|
+
"botanu_retry_attempt", default=0
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
# =========================================================================
|
|
41
|
+
# OTel GenAI Semantic Convention Attribute Names
|
|
42
|
+
# =========================================================================
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class GenAIAttributes:
|
|
46
|
+
"""OpenTelemetry GenAI Semantic Convention attribute names."""
|
|
47
|
+
|
|
48
|
+
OPERATION_NAME = "gen_ai.operation.name"
|
|
49
|
+
PROVIDER_NAME = "gen_ai.provider.name"
|
|
50
|
+
REQUEST_MODEL = "gen_ai.request.model"
|
|
51
|
+
RESPONSE_MODEL = "gen_ai.response.model"
|
|
52
|
+
USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens"
|
|
53
|
+
USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens"
|
|
54
|
+
REQUEST_TEMPERATURE = "gen_ai.request.temperature"
|
|
55
|
+
REQUEST_TOP_P = "gen_ai.request.top_p"
|
|
56
|
+
REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens"
|
|
57
|
+
REQUEST_STOP_SEQUENCES = "gen_ai.request.stop_sequences"
|
|
58
|
+
REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty"
|
|
59
|
+
REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
|
|
60
|
+
RESPONSE_ID = "gen_ai.response.id"
|
|
61
|
+
RESPONSE_FINISH_REASONS = "gen_ai.response.finish_reasons"
|
|
62
|
+
TOOL_NAME = "gen_ai.tool.name"
|
|
63
|
+
TOOL_CALL_ID = "gen_ai.tool.call.id"
|
|
64
|
+
ERROR_TYPE = "error.type"
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class BotanuAttributes:
|
|
68
|
+
"""Botanu-specific attributes for cost attribution."""
|
|
69
|
+
|
|
70
|
+
VENDOR_REQUEST_ID = "botanu.vendor.request_id"
|
|
71
|
+
VENDOR_CLIENT_REQUEST_ID = "botanu.vendor.client_request_id"
|
|
72
|
+
TOKENS_CACHED = "botanu.usage.cached_tokens"
|
|
73
|
+
TOKENS_CACHED_READ = "botanu.usage.cache_read_tokens"
|
|
74
|
+
TOKENS_CACHED_WRITE = "botanu.usage.cache_write_tokens"
|
|
75
|
+
STREAMING = "botanu.request.streaming"
|
|
76
|
+
CACHE_HIT = "botanu.request.cache_hit"
|
|
77
|
+
ATTEMPT_NUMBER = "botanu.request.attempt"
|
|
78
|
+
TOOL_SUCCESS = "botanu.tool.success"
|
|
79
|
+
TOOL_ITEMS_RETURNED = "botanu.tool.items_returned"
|
|
80
|
+
TOOL_BYTES_PROCESSED = "botanu.tool.bytes_processed"
|
|
81
|
+
TOOL_DURATION_MS = "botanu.tool.duration_ms"
|
|
82
|
+
VENDOR = "botanu.vendor"
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
# =========================================================================
|
|
86
|
+
# Vendor name normalization
|
|
87
|
+
# =========================================================================
|
|
88
|
+
|
|
89
|
+
LLM_VENDORS: Dict[str, str] = {
|
|
90
|
+
"openai": "openai",
|
|
91
|
+
"azure_openai": "azure.openai",
|
|
92
|
+
"azure-openai": "azure.openai",
|
|
93
|
+
"azureopenai": "azure.openai",
|
|
94
|
+
"anthropic": "anthropic",
|
|
95
|
+
"claude": "anthropic",
|
|
96
|
+
"bedrock": "aws.bedrock",
|
|
97
|
+
"aws_bedrock": "aws.bedrock",
|
|
98
|
+
"amazon_bedrock": "aws.bedrock",
|
|
99
|
+
"vertex": "gcp.vertex_ai",
|
|
100
|
+
"vertexai": "gcp.vertex_ai",
|
|
101
|
+
"vertex_ai": "gcp.vertex_ai",
|
|
102
|
+
"gcp_vertex": "gcp.vertex_ai",
|
|
103
|
+
"gemini": "gcp.vertex_ai",
|
|
104
|
+
"google": "gcp.vertex_ai",
|
|
105
|
+
"cohere": "cohere",
|
|
106
|
+
"mistral": "mistral",
|
|
107
|
+
"mistralai": "mistral",
|
|
108
|
+
"together": "together",
|
|
109
|
+
"togetherai": "together",
|
|
110
|
+
"groq": "groq",
|
|
111
|
+
"replicate": "replicate",
|
|
112
|
+
"ollama": "ollama",
|
|
113
|
+
"huggingface": "huggingface",
|
|
114
|
+
"hf": "huggingface",
|
|
115
|
+
"fireworks": "fireworks",
|
|
116
|
+
"perplexity": "perplexity",
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class ModelOperation:
|
|
121
|
+
"""GenAI operation types per OTel semconv."""
|
|
122
|
+
|
|
123
|
+
CHAT = "chat"
|
|
124
|
+
TEXT_COMPLETION = "text_completion"
|
|
125
|
+
EMBEDDINGS = "embeddings"
|
|
126
|
+
GENERATE_CONTENT = "generate_content"
|
|
127
|
+
EXECUTE_TOOL = "execute_tool"
|
|
128
|
+
CREATE_AGENT = "create_agent"
|
|
129
|
+
INVOKE_AGENT = "invoke_agent"
|
|
130
|
+
RERANK = "rerank"
|
|
131
|
+
IMAGE_GENERATION = "image_generation"
|
|
132
|
+
IMAGE_EDIT = "image_edit"
|
|
133
|
+
SPEECH_TO_TEXT = "speech_to_text"
|
|
134
|
+
TEXT_TO_SPEECH = "text_to_speech"
|
|
135
|
+
MODERATION = "moderation"
|
|
136
|
+
|
|
137
|
+
# Aliases
|
|
138
|
+
COMPLETION = "text_completion"
|
|
139
|
+
EMBEDDING = "embeddings"
|
|
140
|
+
FUNCTION_CALL = "execute_tool"
|
|
141
|
+
TOOL_USE = "execute_tool"
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
# =========================================================================
|
|
145
|
+
# GenAI Metrics
|
|
146
|
+
# =========================================================================
|
|
147
|
+
|
|
148
|
+
_meter = metrics.get_meter("botanu.gen_ai")
|
|
149
|
+
|
|
150
|
+
_token_usage_histogram = _meter.create_histogram(
|
|
151
|
+
name="gen_ai.client.token.usage",
|
|
152
|
+
description="Number of input and output tokens used",
|
|
153
|
+
unit="{token}",
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
_operation_duration_histogram = _meter.create_histogram(
|
|
157
|
+
name="gen_ai.client.operation.duration",
|
|
158
|
+
description="GenAI operation duration",
|
|
159
|
+
unit="s",
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
_attempt_counter = _meter.create_counter(
|
|
163
|
+
name="botanu.gen_ai.attempts",
|
|
164
|
+
description="Number of request attempts (including retries)",
|
|
165
|
+
unit="{attempt}",
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def _record_token_metrics(
|
|
170
|
+
vendor: str,
|
|
171
|
+
model: str,
|
|
172
|
+
operation: str,
|
|
173
|
+
input_tokens: int,
|
|
174
|
+
output_tokens: int,
|
|
175
|
+
error_type: Optional[str] = None,
|
|
176
|
+
) -> None:
|
|
177
|
+
base_attrs: Dict[str, str] = {
|
|
178
|
+
GenAIAttributes.OPERATION_NAME: operation,
|
|
179
|
+
GenAIAttributes.PROVIDER_NAME: vendor,
|
|
180
|
+
GenAIAttributes.REQUEST_MODEL: model,
|
|
181
|
+
}
|
|
182
|
+
if error_type:
|
|
183
|
+
base_attrs[GenAIAttributes.ERROR_TYPE] = error_type
|
|
184
|
+
|
|
185
|
+
if input_tokens > 0:
|
|
186
|
+
_token_usage_histogram.record(
|
|
187
|
+
input_tokens,
|
|
188
|
+
{**base_attrs, "gen_ai.token.type": "input"},
|
|
189
|
+
)
|
|
190
|
+
if output_tokens > 0:
|
|
191
|
+
_token_usage_histogram.record(
|
|
192
|
+
output_tokens,
|
|
193
|
+
{**base_attrs, "gen_ai.token.type": "output"},
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def _record_duration_metric(
|
|
198
|
+
vendor: str,
|
|
199
|
+
model: str,
|
|
200
|
+
operation: str,
|
|
201
|
+
duration_seconds: float,
|
|
202
|
+
error_type: Optional[str] = None,
|
|
203
|
+
) -> None:
|
|
204
|
+
attrs: Dict[str, str] = {
|
|
205
|
+
GenAIAttributes.OPERATION_NAME: operation,
|
|
206
|
+
GenAIAttributes.PROVIDER_NAME: vendor,
|
|
207
|
+
GenAIAttributes.REQUEST_MODEL: model,
|
|
208
|
+
}
|
|
209
|
+
if error_type:
|
|
210
|
+
attrs[GenAIAttributes.ERROR_TYPE] = error_type
|
|
211
|
+
|
|
212
|
+
_operation_duration_histogram.record(duration_seconds, attrs)
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
# =========================================================================
|
|
216
|
+
# LLM Tracker
|
|
217
|
+
# =========================================================================
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
@dataclass
|
|
221
|
+
class LLMTracker:
|
|
222
|
+
"""Context manager for tracking LLM calls with OTel GenAI semconv."""
|
|
223
|
+
|
|
224
|
+
vendor: str
|
|
225
|
+
model: str
|
|
226
|
+
operation: str = ModelOperation.CHAT
|
|
227
|
+
span: Optional[Span] = field(default=None, repr=False)
|
|
228
|
+
start_time: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
229
|
+
|
|
230
|
+
input_tokens: int = 0
|
|
231
|
+
output_tokens: int = 0
|
|
232
|
+
cached_tokens: int = 0
|
|
233
|
+
cache_read_tokens: int = 0
|
|
234
|
+
cache_write_tokens: int = 0
|
|
235
|
+
|
|
236
|
+
vendor_request_id: Optional[str] = None
|
|
237
|
+
client_request_id: Optional[str] = None
|
|
238
|
+
response_model: Optional[str] = None
|
|
239
|
+
finish_reason: Optional[str] = None
|
|
240
|
+
is_streaming: bool = False
|
|
241
|
+
cache_hit: bool = False
|
|
242
|
+
attempt_number: int = 1
|
|
243
|
+
error_type: Optional[str] = None
|
|
244
|
+
|
|
245
|
+
def set_tokens(
|
|
246
|
+
self,
|
|
247
|
+
input_tokens: int = 0,
|
|
248
|
+
output_tokens: int = 0,
|
|
249
|
+
cached_tokens: int = 0,
|
|
250
|
+
cache_read_tokens: int = 0,
|
|
251
|
+
cache_write_tokens: int = 0,
|
|
252
|
+
) -> LLMTracker:
|
|
253
|
+
"""Set token counts from model response."""
|
|
254
|
+
self.input_tokens = input_tokens
|
|
255
|
+
self.output_tokens = output_tokens
|
|
256
|
+
self.cached_tokens = cached_tokens or cache_read_tokens
|
|
257
|
+
self.cache_read_tokens = cache_read_tokens
|
|
258
|
+
self.cache_write_tokens = cache_write_tokens
|
|
259
|
+
|
|
260
|
+
if self.span:
|
|
261
|
+
self.span.set_attribute(GenAIAttributes.USAGE_INPUT_TOKENS, input_tokens)
|
|
262
|
+
self.span.set_attribute(GenAIAttributes.USAGE_OUTPUT_TOKENS, output_tokens)
|
|
263
|
+
if self.cached_tokens > 0:
|
|
264
|
+
self.span.set_attribute(BotanuAttributes.TOKENS_CACHED, self.cached_tokens)
|
|
265
|
+
if cache_read_tokens > 0:
|
|
266
|
+
self.span.set_attribute(BotanuAttributes.TOKENS_CACHED_READ, cache_read_tokens)
|
|
267
|
+
if cache_write_tokens > 0:
|
|
268
|
+
self.span.set_attribute(BotanuAttributes.TOKENS_CACHED_WRITE, cache_write_tokens)
|
|
269
|
+
return self
|
|
270
|
+
|
|
271
|
+
def set_request_id(
|
|
272
|
+
self,
|
|
273
|
+
vendor_request_id: Optional[str] = None,
|
|
274
|
+
client_request_id: Optional[str] = None,
|
|
275
|
+
) -> LLMTracker:
|
|
276
|
+
"""Set vendor request IDs for billing reconciliation."""
|
|
277
|
+
if vendor_request_id:
|
|
278
|
+
self.vendor_request_id = vendor_request_id
|
|
279
|
+
if self.span:
|
|
280
|
+
self.span.set_attribute(GenAIAttributes.RESPONSE_ID, vendor_request_id)
|
|
281
|
+
self.span.set_attribute(BotanuAttributes.VENDOR_REQUEST_ID, vendor_request_id)
|
|
282
|
+
if client_request_id:
|
|
283
|
+
self.client_request_id = client_request_id
|
|
284
|
+
if self.span:
|
|
285
|
+
self.span.set_attribute(BotanuAttributes.VENDOR_CLIENT_REQUEST_ID, client_request_id)
|
|
286
|
+
return self
|
|
287
|
+
|
|
288
|
+
def set_response_model(self, model: str) -> LLMTracker:
|
|
289
|
+
"""Set the actual model used in the response."""
|
|
290
|
+
self.response_model = model
|
|
291
|
+
if self.span:
|
|
292
|
+
self.span.set_attribute(GenAIAttributes.RESPONSE_MODEL, model)
|
|
293
|
+
return self
|
|
294
|
+
|
|
295
|
+
def set_finish_reason(self, reason: str) -> LLMTracker:
|
|
296
|
+
"""Set the finish/stop reason from the response."""
|
|
297
|
+
self.finish_reason = reason
|
|
298
|
+
if self.span:
|
|
299
|
+
self.span.set_attribute(GenAIAttributes.RESPONSE_FINISH_REASONS, [reason])
|
|
300
|
+
return self
|
|
301
|
+
|
|
302
|
+
def set_streaming(self, is_streaming: bool = True) -> LLMTracker:
|
|
303
|
+
"""Mark request as streaming."""
|
|
304
|
+
self.is_streaming = is_streaming
|
|
305
|
+
if self.span:
|
|
306
|
+
self.span.set_attribute(BotanuAttributes.STREAMING, is_streaming)
|
|
307
|
+
return self
|
|
308
|
+
|
|
309
|
+
def set_cache_hit(self, cache_hit: bool = True) -> LLMTracker:
|
|
310
|
+
"""Mark as cache hit."""
|
|
311
|
+
self.cache_hit = cache_hit
|
|
312
|
+
if self.span:
|
|
313
|
+
self.span.set_attribute(BotanuAttributes.CACHE_HIT, cache_hit)
|
|
314
|
+
return self
|
|
315
|
+
|
|
316
|
+
def set_attempt(self, attempt_number: int) -> LLMTracker:
|
|
317
|
+
"""Set the attempt number (for retry tracking)."""
|
|
318
|
+
self.attempt_number = attempt_number
|
|
319
|
+
if self.span:
|
|
320
|
+
self.span.set_attribute(BotanuAttributes.ATTEMPT_NUMBER, attempt_number)
|
|
321
|
+
return self
|
|
322
|
+
|
|
323
|
+
def set_request_params(
|
|
324
|
+
self,
|
|
325
|
+
temperature: Optional[float] = None,
|
|
326
|
+
top_p: Optional[float] = None,
|
|
327
|
+
max_tokens: Optional[int] = None,
|
|
328
|
+
stop_sequences: Optional[List[str]] = None,
|
|
329
|
+
frequency_penalty: Optional[float] = None,
|
|
330
|
+
presence_penalty: Optional[float] = None,
|
|
331
|
+
) -> LLMTracker:
|
|
332
|
+
"""Set request parameters per OTel GenAI semconv."""
|
|
333
|
+
if self.span:
|
|
334
|
+
if temperature is not None:
|
|
335
|
+
self.span.set_attribute(GenAIAttributes.REQUEST_TEMPERATURE, temperature)
|
|
336
|
+
if top_p is not None:
|
|
337
|
+
self.span.set_attribute(GenAIAttributes.REQUEST_TOP_P, top_p)
|
|
338
|
+
if max_tokens is not None:
|
|
339
|
+
self.span.set_attribute(GenAIAttributes.REQUEST_MAX_TOKENS, max_tokens)
|
|
340
|
+
if stop_sequences is not None:
|
|
341
|
+
self.span.set_attribute(GenAIAttributes.REQUEST_STOP_SEQUENCES, stop_sequences)
|
|
342
|
+
if frequency_penalty is not None:
|
|
343
|
+
self.span.set_attribute(GenAIAttributes.REQUEST_FREQUENCY_PENALTY, frequency_penalty)
|
|
344
|
+
if presence_penalty is not None:
|
|
345
|
+
self.span.set_attribute(GenAIAttributes.REQUEST_PRESENCE_PENALTY, presence_penalty)
|
|
346
|
+
return self
|
|
347
|
+
|
|
348
|
+
def set_error(self, error: Exception) -> LLMTracker:
|
|
349
|
+
"""Record an error from the LLM call."""
|
|
350
|
+
self.error_type = type(error).__name__
|
|
351
|
+
if self.span:
|
|
352
|
+
self.span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
353
|
+
self.span.set_attribute(GenAIAttributes.ERROR_TYPE, self.error_type)
|
|
354
|
+
self.span.record_exception(error)
|
|
355
|
+
return self
|
|
356
|
+
|
|
357
|
+
def add_metadata(self, **kwargs: Any) -> LLMTracker:
|
|
358
|
+
"""Add custom metadata to the span."""
|
|
359
|
+
if self.span:
|
|
360
|
+
for key, value in kwargs.items():
|
|
361
|
+
attr_key = key if key.startswith(("botanu.", "gen_ai.")) else f"botanu.{key}"
|
|
362
|
+
self.span.set_attribute(attr_key, value)
|
|
363
|
+
return self
|
|
364
|
+
|
|
365
|
+
def _finalize(self) -> None:
|
|
366
|
+
if not self.span:
|
|
367
|
+
return
|
|
368
|
+
|
|
369
|
+
duration_seconds = (datetime.now(timezone.utc) - self.start_time).total_seconds()
|
|
370
|
+
|
|
371
|
+
_record_token_metrics(
|
|
372
|
+
vendor=self.vendor,
|
|
373
|
+
model=self.model,
|
|
374
|
+
operation=self.operation,
|
|
375
|
+
input_tokens=self.input_tokens,
|
|
376
|
+
output_tokens=self.output_tokens,
|
|
377
|
+
error_type=self.error_type,
|
|
378
|
+
)
|
|
379
|
+
_record_duration_metric(
|
|
380
|
+
vendor=self.vendor,
|
|
381
|
+
model=self.model,
|
|
382
|
+
operation=self.operation,
|
|
383
|
+
duration_seconds=duration_seconds,
|
|
384
|
+
error_type=self.error_type,
|
|
385
|
+
)
|
|
386
|
+
_attempt_counter.add(
|
|
387
|
+
1,
|
|
388
|
+
{
|
|
389
|
+
GenAIAttributes.PROVIDER_NAME: self.vendor,
|
|
390
|
+
GenAIAttributes.REQUEST_MODEL: self.model,
|
|
391
|
+
GenAIAttributes.OPERATION_NAME: self.operation,
|
|
392
|
+
"status": "error" if self.error_type else "success",
|
|
393
|
+
},
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
@contextmanager
|
|
398
|
+
def track_llm_call(
|
|
399
|
+
vendor: str,
|
|
400
|
+
model: str,
|
|
401
|
+
operation: str = ModelOperation.CHAT,
|
|
402
|
+
client_request_id: Optional[str] = None,
|
|
403
|
+
**kwargs: Any,
|
|
404
|
+
) -> Generator[LLMTracker, None, None]:
|
|
405
|
+
"""Context manager for tracking LLM/model calls with OTel GenAI semconv.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
vendor: LLM vendor (openai, anthropic, bedrock, vertex, …).
|
|
409
|
+
model: Model name/ID (gpt-4, claude-3-opus, …).
|
|
410
|
+
operation: Type of operation (chat, embeddings, text_completion, …).
|
|
411
|
+
client_request_id: Optional client-generated request ID.
|
|
412
|
+
**kwargs: Additional span attributes.
|
|
413
|
+
|
|
414
|
+
Yields:
|
|
415
|
+
:class:`LLMTracker` instance.
|
|
416
|
+
"""
|
|
417
|
+
tracer = trace.get_tracer("botanu.gen_ai")
|
|
418
|
+
normalized_vendor = LLM_VENDORS.get(vendor.lower(), vendor.lower())
|
|
419
|
+
span_name = f"{operation} {model}"
|
|
420
|
+
|
|
421
|
+
with tracer.start_as_current_span(name=span_name, kind=SpanKind.CLIENT) as span:
|
|
422
|
+
span.set_attribute(GenAIAttributes.OPERATION_NAME, operation)
|
|
423
|
+
span.set_attribute(GenAIAttributes.PROVIDER_NAME, normalized_vendor)
|
|
424
|
+
span.set_attribute(GenAIAttributes.REQUEST_MODEL, model)
|
|
425
|
+
span.set_attribute(BotanuAttributes.VENDOR, normalized_vendor)
|
|
426
|
+
|
|
427
|
+
for key, value in kwargs.items():
|
|
428
|
+
attr_key = key if key.startswith(("botanu.", "gen_ai.")) else f"botanu.{key}"
|
|
429
|
+
span.set_attribute(attr_key, value)
|
|
430
|
+
|
|
431
|
+
tracker = LLMTracker(
|
|
432
|
+
vendor=normalized_vendor,
|
|
433
|
+
model=model,
|
|
434
|
+
operation=operation,
|
|
435
|
+
span=span,
|
|
436
|
+
)
|
|
437
|
+
if client_request_id:
|
|
438
|
+
tracker.set_request_id(client_request_id=client_request_id)
|
|
439
|
+
|
|
440
|
+
# Auto-detect retry attempt from tenacity integration.
|
|
441
|
+
ctx_attempt = _retry_attempt.get()
|
|
442
|
+
if ctx_attempt > 0:
|
|
443
|
+
tracker.set_attempt(ctx_attempt)
|
|
444
|
+
|
|
445
|
+
try:
|
|
446
|
+
yield tracker
|
|
447
|
+
except Exception as exc:
|
|
448
|
+
tracker.set_error(exc)
|
|
449
|
+
raise
|
|
450
|
+
finally:
|
|
451
|
+
tracker._finalize()
|
|
452
|
+
|
|
453
|
+
|
|
454
|
+
# =========================================================================
|
|
455
|
+
# Tool/Function Call Tracker
|
|
456
|
+
# =========================================================================
|
|
457
|
+
|
|
458
|
+
_tool_duration_histogram = _meter.create_histogram(
|
|
459
|
+
name="botanu.tool.duration",
|
|
460
|
+
description="Tool execution duration",
|
|
461
|
+
unit="s",
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
_tool_counter = _meter.create_counter(
|
|
465
|
+
name="botanu.tool.executions",
|
|
466
|
+
description="Number of tool executions",
|
|
467
|
+
unit="{execution}",
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
|
|
471
|
+
@dataclass
|
|
472
|
+
class ToolTracker:
|
|
473
|
+
"""Context manager for tracking tool/function calls."""
|
|
474
|
+
|
|
475
|
+
tool_name: str
|
|
476
|
+
tool_call_id: Optional[str] = None
|
|
477
|
+
vendor: Optional[str] = None
|
|
478
|
+
span: Optional[Span] = field(default=None, repr=False)
|
|
479
|
+
start_time: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
480
|
+
|
|
481
|
+
success: bool = True
|
|
482
|
+
items_returned: int = 0
|
|
483
|
+
bytes_processed: int = 0
|
|
484
|
+
error_type: Optional[str] = None
|
|
485
|
+
|
|
486
|
+
def set_result(
|
|
487
|
+
self,
|
|
488
|
+
success: bool = True,
|
|
489
|
+
items_returned: int = 0,
|
|
490
|
+
bytes_processed: int = 0,
|
|
491
|
+
) -> ToolTracker:
|
|
492
|
+
"""Set tool execution result."""
|
|
493
|
+
self.success = success
|
|
494
|
+
self.items_returned = items_returned
|
|
495
|
+
self.bytes_processed = bytes_processed
|
|
496
|
+
if self.span:
|
|
497
|
+
self.span.set_attribute(BotanuAttributes.TOOL_SUCCESS, success)
|
|
498
|
+
if items_returned > 0:
|
|
499
|
+
self.span.set_attribute(BotanuAttributes.TOOL_ITEMS_RETURNED, items_returned)
|
|
500
|
+
if bytes_processed > 0:
|
|
501
|
+
self.span.set_attribute(BotanuAttributes.TOOL_BYTES_PROCESSED, bytes_processed)
|
|
502
|
+
return self
|
|
503
|
+
|
|
504
|
+
def set_tool_call_id(self, tool_call_id: str) -> ToolTracker:
|
|
505
|
+
"""Set the tool call ID from the LLM response."""
|
|
506
|
+
self.tool_call_id = tool_call_id
|
|
507
|
+
if self.span:
|
|
508
|
+
self.span.set_attribute(GenAIAttributes.TOOL_CALL_ID, tool_call_id)
|
|
509
|
+
return self
|
|
510
|
+
|
|
511
|
+
def set_error(self, error: Exception) -> ToolTracker:
|
|
512
|
+
"""Record tool execution error."""
|
|
513
|
+
self.success = False
|
|
514
|
+
self.error_type = type(error).__name__
|
|
515
|
+
if self.span:
|
|
516
|
+
self.span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
517
|
+
self.span.set_attribute(GenAIAttributes.ERROR_TYPE, self.error_type)
|
|
518
|
+
self.span.record_exception(error)
|
|
519
|
+
return self
|
|
520
|
+
|
|
521
|
+
def add_metadata(self, **kwargs: Any) -> ToolTracker:
|
|
522
|
+
"""Add custom metadata to the span."""
|
|
523
|
+
if self.span:
|
|
524
|
+
for key, value in kwargs.items():
|
|
525
|
+
attr_key = key if key.startswith(("botanu.", "gen_ai.")) else f"botanu.tool.{key}"
|
|
526
|
+
self.span.set_attribute(attr_key, value)
|
|
527
|
+
return self
|
|
528
|
+
|
|
529
|
+
def _finalize(self) -> None:
|
|
530
|
+
if not self.span:
|
|
531
|
+
return
|
|
532
|
+
duration_seconds = (datetime.now(timezone.utc) - self.start_time).total_seconds()
|
|
533
|
+
self.span.set_attribute(BotanuAttributes.TOOL_DURATION_MS, duration_seconds * 1000)
|
|
534
|
+
|
|
535
|
+
attrs: Dict[str, str] = {
|
|
536
|
+
GenAIAttributes.TOOL_NAME: self.tool_name,
|
|
537
|
+
"status": "error" if self.error_type else "success",
|
|
538
|
+
}
|
|
539
|
+
if self.vendor:
|
|
540
|
+
attrs[GenAIAttributes.PROVIDER_NAME] = self.vendor
|
|
541
|
+
|
|
542
|
+
_tool_duration_histogram.record(duration_seconds, attrs)
|
|
543
|
+
_tool_counter.add(1, attrs)
|
|
544
|
+
|
|
545
|
+
|
|
546
|
+
@contextmanager
|
|
547
|
+
def track_tool_call(
|
|
548
|
+
tool_name: str,
|
|
549
|
+
tool_call_id: Optional[str] = None,
|
|
550
|
+
vendor: Optional[str] = None,
|
|
551
|
+
**kwargs: Any,
|
|
552
|
+
) -> Generator[ToolTracker, None, None]:
|
|
553
|
+
"""Context manager for tracking tool/function calls.
|
|
554
|
+
|
|
555
|
+
Args:
|
|
556
|
+
tool_name: Name of the tool/function.
|
|
557
|
+
tool_call_id: Tool call ID from the LLM response.
|
|
558
|
+
vendor: Tool vendor if external (e.g., ``"tavily"``).
|
|
559
|
+
**kwargs: Additional span attributes.
|
|
560
|
+
|
|
561
|
+
Yields:
|
|
562
|
+
:class:`ToolTracker` instance.
|
|
563
|
+
"""
|
|
564
|
+
tracer = trace.get_tracer("botanu.gen_ai")
|
|
565
|
+
span_name = f"execute_tool {tool_name}"
|
|
566
|
+
|
|
567
|
+
with tracer.start_as_current_span(name=span_name, kind=SpanKind.INTERNAL) as span:
|
|
568
|
+
span.set_attribute(GenAIAttributes.OPERATION_NAME, ModelOperation.EXECUTE_TOOL)
|
|
569
|
+
span.set_attribute(GenAIAttributes.TOOL_NAME, tool_name)
|
|
570
|
+
|
|
571
|
+
if tool_call_id:
|
|
572
|
+
span.set_attribute(GenAIAttributes.TOOL_CALL_ID, tool_call_id)
|
|
573
|
+
if vendor:
|
|
574
|
+
normalized = LLM_VENDORS.get(vendor.lower(), vendor.lower())
|
|
575
|
+
span.set_attribute(GenAIAttributes.PROVIDER_NAME, normalized)
|
|
576
|
+
span.set_attribute(BotanuAttributes.VENDOR, normalized)
|
|
577
|
+
|
|
578
|
+
for key, value in kwargs.items():
|
|
579
|
+
attr_key = key if key.startswith(("botanu.", "gen_ai.")) else f"botanu.tool.{key}"
|
|
580
|
+
span.set_attribute(attr_key, value)
|
|
581
|
+
|
|
582
|
+
tracker = ToolTracker(
|
|
583
|
+
tool_name=tool_name,
|
|
584
|
+
tool_call_id=tool_call_id,
|
|
585
|
+
vendor=vendor,
|
|
586
|
+
span=span,
|
|
587
|
+
)
|
|
588
|
+
|
|
589
|
+
try:
|
|
590
|
+
yield tracker
|
|
591
|
+
except Exception as exc:
|
|
592
|
+
tracker.set_error(exc)
|
|
593
|
+
raise
|
|
594
|
+
finally:
|
|
595
|
+
tracker._finalize()
|
|
596
|
+
|
|
597
|
+
|
|
598
|
+
# =========================================================================
|
|
599
|
+
# Standalone Helpers
|
|
600
|
+
# =========================================================================
|
|
601
|
+
|
|
602
|
+
|
|
603
|
+
def set_llm_attributes(
|
|
604
|
+
vendor: str,
|
|
605
|
+
model: str,
|
|
606
|
+
operation: str = ModelOperation.CHAT,
|
|
607
|
+
input_tokens: int = 0,
|
|
608
|
+
output_tokens: int = 0,
|
|
609
|
+
cached_tokens: int = 0,
|
|
610
|
+
streaming: bool = False,
|
|
611
|
+
vendor_request_id: Optional[str] = None,
|
|
612
|
+
span: Optional[Span] = None,
|
|
613
|
+
) -> None:
|
|
614
|
+
"""Set LLM attributes on the current span using OTel GenAI semconv."""
|
|
615
|
+
target_span = span or trace.get_current_span()
|
|
616
|
+
if not target_span or not target_span.is_recording():
|
|
617
|
+
return
|
|
618
|
+
|
|
619
|
+
normalized_vendor = LLM_VENDORS.get(vendor.lower(), vendor.lower())
|
|
620
|
+
|
|
621
|
+
target_span.set_attribute(GenAIAttributes.OPERATION_NAME, operation)
|
|
622
|
+
target_span.set_attribute(GenAIAttributes.PROVIDER_NAME, normalized_vendor)
|
|
623
|
+
target_span.set_attribute(GenAIAttributes.REQUEST_MODEL, model)
|
|
624
|
+
target_span.set_attribute(BotanuAttributes.VENDOR, normalized_vendor)
|
|
625
|
+
|
|
626
|
+
if input_tokens > 0:
|
|
627
|
+
target_span.set_attribute(GenAIAttributes.USAGE_INPUT_TOKENS, input_tokens)
|
|
628
|
+
if output_tokens > 0:
|
|
629
|
+
target_span.set_attribute(GenAIAttributes.USAGE_OUTPUT_TOKENS, output_tokens)
|
|
630
|
+
if cached_tokens > 0:
|
|
631
|
+
target_span.set_attribute(BotanuAttributes.TOKENS_CACHED, cached_tokens)
|
|
632
|
+
if streaming:
|
|
633
|
+
target_span.set_attribute(BotanuAttributes.STREAMING, True)
|
|
634
|
+
if vendor_request_id:
|
|
635
|
+
target_span.set_attribute(GenAIAttributes.RESPONSE_ID, vendor_request_id)
|
|
636
|
+
target_span.set_attribute(BotanuAttributes.VENDOR_REQUEST_ID, vendor_request_id)
|
|
637
|
+
|
|
638
|
+
_record_token_metrics(
|
|
639
|
+
vendor=normalized_vendor,
|
|
640
|
+
model=model,
|
|
641
|
+
operation=operation,
|
|
642
|
+
input_tokens=input_tokens,
|
|
643
|
+
output_tokens=output_tokens,
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
|
|
647
|
+
def set_token_usage(
|
|
648
|
+
input_tokens: int,
|
|
649
|
+
output_tokens: int,
|
|
650
|
+
cached_tokens: int = 0,
|
|
651
|
+
span: Optional[Span] = None,
|
|
652
|
+
) -> None:
|
|
653
|
+
"""Set token usage on the current span using OTel GenAI semconv."""
|
|
654
|
+
target_span = span or trace.get_current_span()
|
|
655
|
+
if not target_span or not target_span.is_recording():
|
|
656
|
+
return
|
|
657
|
+
|
|
658
|
+
target_span.set_attribute(GenAIAttributes.USAGE_INPUT_TOKENS, input_tokens)
|
|
659
|
+
target_span.set_attribute(GenAIAttributes.USAGE_OUTPUT_TOKENS, output_tokens)
|
|
660
|
+
|
|
661
|
+
if cached_tokens > 0:
|
|
662
|
+
target_span.set_attribute(BotanuAttributes.TOKENS_CACHED, cached_tokens)
|
|
663
|
+
|
|
664
|
+
|
|
665
|
+
def llm_instrumented(
|
|
666
|
+
vendor: str,
|
|
667
|
+
model_param: str = "model",
|
|
668
|
+
tokens_from_response: bool = True,
|
|
669
|
+
) -> Any:
|
|
670
|
+
"""Decorator to auto-instrument LLM client methods.
|
|
671
|
+
|
|
672
|
+
Args:
|
|
673
|
+
vendor: LLM vendor name.
|
|
674
|
+
model_param: Name of the parameter containing the model name.
|
|
675
|
+
tokens_from_response: Whether to extract tokens from ``response.usage``.
|
|
676
|
+
"""
|
|
677
|
+
|
|
678
|
+
def decorator(func: Any) -> Any:
|
|
679
|
+
@functools.wraps(func)
|
|
680
|
+
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
681
|
+
model = kwargs.get(model_param) or (args[1] if len(args) > 1 else "unknown")
|
|
682
|
+
|
|
683
|
+
with track_llm_call(vendor, model) as tracker:
|
|
684
|
+
if kwargs.get("stream"):
|
|
685
|
+
tracker.set_streaming(True)
|
|
686
|
+
|
|
687
|
+
response = func(*args, **kwargs)
|
|
688
|
+
|
|
689
|
+
if tokens_from_response and hasattr(response, "usage"):
|
|
690
|
+
usage = response.usage
|
|
691
|
+
tracker.set_tokens(
|
|
692
|
+
input_tokens=getattr(usage, "prompt_tokens", 0) or getattr(usage, "input_tokens", 0),
|
|
693
|
+
output_tokens=getattr(usage, "completion_tokens", 0) or getattr(usage, "output_tokens", 0),
|
|
694
|
+
)
|
|
695
|
+
|
|
696
|
+
return response
|
|
697
|
+
|
|
698
|
+
return wrapper
|
|
699
|
+
|
|
700
|
+
return decorator
|