openlit 1.32.12__py3-none-any.whl → 1.33.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__init__.py +3 -1
- openlit/instrumentation/together/__init__.py +70 -0
- openlit/instrumentation/together/async_together.py +558 -0
- openlit/instrumentation/together/together.py +558 -0
- openlit/semcov/__init__.py +1 -0
- {openlit-1.32.12.dist-info → openlit-1.33.1.dist-info}/METADATA +4 -3
- {openlit-1.32.12.dist-info → openlit-1.33.1.dist-info}/RECORD +9 -6
- {openlit-1.32.12.dist-info → openlit-1.33.1.dist-info}/LICENSE +0 -0
- {openlit-1.32.12.dist-info → openlit-1.33.1.dist-info}/WHEEL +0 -0
openlit/__init__.py
CHANGED
@@ -52,6 +52,7 @@ from openlit.instrumentation.milvus import MilvusInstrumentor
|
|
52
52
|
from openlit.instrumentation.astra import AstraInstrumentor
|
53
53
|
from openlit.instrumentation.transformers import TransformersInstrumentor
|
54
54
|
from openlit.instrumentation.litellm import LiteLLMInstrumentor
|
55
|
+
from openlit.instrumentation.together import TogetherInstrumentor
|
55
56
|
from openlit.instrumentation.crewai import CrewAIInstrumentor
|
56
57
|
from openlit.instrumentation.ag2 import AG2Instrumentor
|
57
58
|
from openlit.instrumentation.multion import MultiOnInstrumentor
|
@@ -188,7 +189,6 @@ def instrument_if_available(
|
|
188
189
|
metrics_dict=config.metrics_dict,
|
189
190
|
disable_metrics=config.disable_metrics,
|
190
191
|
)
|
191
|
-
logger.info("Instrumented %s", instrumentor_name)
|
192
192
|
else:
|
193
193
|
# pylint: disable=line-too-long
|
194
194
|
logger.info("Library for %s (%s) not found. Skipping instrumentation", instrumentor_name, module_name)
|
@@ -264,6 +264,7 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
|
|
264
264
|
"crawl4ai": "crawl4ai",
|
265
265
|
"firecrawl": "firecrawl",
|
266
266
|
"letta": "letta",
|
267
|
+
"together": "together",
|
267
268
|
}
|
268
269
|
|
269
270
|
invalid_instrumentors = [
|
@@ -360,6 +361,7 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
|
|
360
361
|
"crawl4ai": Crawl4AIInstrumentor(),
|
361
362
|
"firecrawl": FireCrawlInstrumentor(),
|
362
363
|
"letta": LettaInstrumentor(),
|
364
|
+
"together": TogetherInstrumentor(),
|
363
365
|
}
|
364
366
|
|
365
367
|
# Initialize and instrument only the enabled instrumentors
|
@@ -0,0 +1,70 @@
|
|
1
|
+
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
|
+
"""Initializer of Auto Instrumentation of Together AI Functions"""
|
3
|
+
|
4
|
+
from typing import Collection
|
5
|
+
import importlib.metadata
|
6
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
7
|
+
from wrapt import wrap_function_wrapper
|
8
|
+
|
9
|
+
from openlit.instrumentation.together.together import (
|
10
|
+
completion, image_generate
|
11
|
+
)
|
12
|
+
from openlit.instrumentation.together.async_together import (
|
13
|
+
async_completion, async_image_generate
|
14
|
+
)
|
15
|
+
|
16
|
+
_instruments = ("together >= 1.3.5",)
|
17
|
+
|
18
|
+
class TogetherInstrumentor(BaseInstrumentor):
|
19
|
+
"""
|
20
|
+
An instrumentor for Together's client library.
|
21
|
+
"""
|
22
|
+
|
23
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
24
|
+
return _instruments
|
25
|
+
|
26
|
+
def _instrument(self, **kwargs):
|
27
|
+
application_name = kwargs.get("application_name", "default_application")
|
28
|
+
environment = kwargs.get("environment", "default_environment")
|
29
|
+
tracer = kwargs.get("tracer")
|
30
|
+
metrics = kwargs.get("metrics_dict")
|
31
|
+
pricing_info = kwargs.get("pricing_info", {})
|
32
|
+
trace_content = kwargs.get("trace_content", False)
|
33
|
+
disable_metrics = kwargs.get("disable_metrics")
|
34
|
+
version = importlib.metadata.version("together")
|
35
|
+
|
36
|
+
# Chat completions
|
37
|
+
wrap_function_wrapper(
|
38
|
+
"together.resources.chat.completions",
|
39
|
+
"ChatCompletions.create",
|
40
|
+
completion("together.chat.completions", version, environment, application_name,
|
41
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
42
|
+
)
|
43
|
+
|
44
|
+
# Image generate
|
45
|
+
wrap_function_wrapper(
|
46
|
+
"together.resources.images",
|
47
|
+
"Images.generate",
|
48
|
+
image_generate("together.image.generate", version, environment, application_name,
|
49
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
50
|
+
)
|
51
|
+
|
52
|
+
# Chat completions
|
53
|
+
wrap_function_wrapper(
|
54
|
+
"together.resources.chat.completions",
|
55
|
+
"AsyncChatCompletions.create",
|
56
|
+
async_completion("together.chat.completions", version, environment, application_name,
|
57
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
58
|
+
)
|
59
|
+
|
60
|
+
# Image generate
|
61
|
+
wrap_function_wrapper(
|
62
|
+
"together.resources.images",
|
63
|
+
"AsyncImages.generate",
|
64
|
+
async_image_generate("together.image.generate", version, environment, application_name,
|
65
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
66
|
+
)
|
67
|
+
|
68
|
+
def _uninstrument(self, **kwargs):
|
69
|
+
# Proper uninstrumentation logic to revert patched methods
|
70
|
+
pass
|
@@ -0,0 +1,558 @@
|
|
1
|
+
# pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, too-many-branches, too-many-instance-attributes
|
2
|
+
"""
|
3
|
+
Module for monitoring Together calls.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import logging
|
7
|
+
from opentelemetry.trace import SpanKind, Status, StatusCode
|
8
|
+
from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
|
9
|
+
from openlit.__helpers import (
|
10
|
+
get_chat_model_cost,
|
11
|
+
get_image_model_cost,
|
12
|
+
handle_exception,
|
13
|
+
response_as_dict,
|
14
|
+
)
|
15
|
+
from openlit.semcov import SemanticConvetion
|
16
|
+
|
17
|
+
# Initialize logger for logging potential issues and operations
|
18
|
+
logger = logging.getLogger(__name__)
|
19
|
+
|
20
|
+
def async_completion(gen_ai_endpoint, version, environment, application_name,
|
21
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics):
|
22
|
+
"""
|
23
|
+
Generates a telemetry wrapper for chat completions to collect metrics.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
gen_ai_endpoint: Endpoint identifier for logging and tracing.
|
27
|
+
version: Version of the monitoring package.
|
28
|
+
environment: Deployment environment (e.g., production, staging).
|
29
|
+
application_name: Name of the application using the Together AI SDK.
|
30
|
+
tracer: OpenTelemetry tracer for creating spans.
|
31
|
+
pricing_info: Information used for calculating the cost of Together AI usage.
|
32
|
+
trace_content: Flag indicating whether to trace the actual content.
|
33
|
+
|
34
|
+
Returns:
|
35
|
+
A function that wraps the chat completions method to add telemetry.
|
36
|
+
"""
|
37
|
+
|
38
|
+
class TracedAsyncStream:
|
39
|
+
"""
|
40
|
+
Wrapper for streaming responses to collect metrics and trace data.
|
41
|
+
|
42
|
+
This class implements the '__aiter__' and '__anext__' methods that
|
43
|
+
handle asynchronous streaming responses.
|
44
|
+
|
45
|
+
This class also implements '__aenter__' and '__aexit__' methods that
|
46
|
+
handle asynchronous context management protocol.
|
47
|
+
"""
|
48
|
+
def __init__(
|
49
|
+
self,
|
50
|
+
wrapped,
|
51
|
+
span,
|
52
|
+
kwargs,
|
53
|
+
**args,
|
54
|
+
):
|
55
|
+
self.__wrapped__ = wrapped
|
56
|
+
self._span = span
|
57
|
+
# Placeholder for aggregating streaming response
|
58
|
+
self._llmresponse = ""
|
59
|
+
self._response_id = ""
|
60
|
+
self._prompt_tokens = 0
|
61
|
+
self._completion_tokens = 0
|
62
|
+
self._total_tokens = 0
|
63
|
+
|
64
|
+
self._args = args
|
65
|
+
self._kwargs = kwargs
|
66
|
+
|
67
|
+
async def __aenter__(self):
|
68
|
+
await self.__wrapped__.__aenter__()
|
69
|
+
return self
|
70
|
+
|
71
|
+
async def __aexit__(self, exc_type, exc_value, traceback):
|
72
|
+
await self.__wrapped__.__aexit__(exc_type, exc_value, traceback)
|
73
|
+
|
74
|
+
def __aiter__(self):
|
75
|
+
return self
|
76
|
+
|
77
|
+
async def __getattr__(self, name):
|
78
|
+
"""Delegate attribute access to the wrapped object."""
|
79
|
+
return getattr(await self.__wrapped__, name)
|
80
|
+
|
81
|
+
async def __anext__(self):
|
82
|
+
try:
|
83
|
+
chunk = await self.__wrapped__.__anext__()
|
84
|
+
chunked = response_as_dict(chunk)
|
85
|
+
# Collect message IDs and aggregated response from events
|
86
|
+
if (len(chunked.get('choices')) > 0 and ('delta' in chunked.get('choices')[0] and
|
87
|
+
'content' in chunked.get('choices')[0].get('delta'))):
|
88
|
+
|
89
|
+
content = chunked.get('choices')[0].get('delta').get('content')
|
90
|
+
if content:
|
91
|
+
self._llmresponse += content
|
92
|
+
if chunked.get("usage"):
|
93
|
+
self._prompt_tokens = chunked.get("usage").get("prompt_tokens")
|
94
|
+
self._completion_tokens = chunked.get("usage").get("completion_tokens")
|
95
|
+
self._total_tokens = chunked.get("usage").get("total_tokens")
|
96
|
+
self._response_id = chunked.get('id')
|
97
|
+
return chunk
|
98
|
+
except StopAsyncIteration:
|
99
|
+
# Handling exception ensure observability without disrupting operation
|
100
|
+
try:
|
101
|
+
# Format 'messages' into a single string
|
102
|
+
message_prompt = self._kwargs.get("messages", "")
|
103
|
+
formatted_messages = []
|
104
|
+
for message in message_prompt:
|
105
|
+
role = message["role"]
|
106
|
+
content = message["content"]
|
107
|
+
|
108
|
+
if isinstance(content, list):
|
109
|
+
content_str = ", ".join(
|
110
|
+
# pylint: disable=line-too-long
|
111
|
+
f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
|
112
|
+
if "type" in item else f'text: {item["text"]}'
|
113
|
+
for item in content
|
114
|
+
)
|
115
|
+
formatted_messages.append(f"{role}: {content_str}")
|
116
|
+
else:
|
117
|
+
formatted_messages.append(f"{role}: {content}")
|
118
|
+
prompt = "\n".join(formatted_messages)
|
119
|
+
|
120
|
+
# Calculate cost of the operation
|
121
|
+
cost = get_chat_model_cost(self._kwargs.get(
|
122
|
+
"model",
|
123
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
124
|
+
),
|
125
|
+
pricing_info, self._prompt_tokens,
|
126
|
+
self._completion_tokens)
|
127
|
+
|
128
|
+
# Set Span attributes
|
129
|
+
self._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
130
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
131
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
|
132
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
133
|
+
SemanticConvetion.GEN_AI_TYPE_CHAT)
|
134
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
135
|
+
gen_ai_endpoint)
|
136
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
|
137
|
+
self._response_id)
|
138
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
|
139
|
+
environment)
|
140
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
|
141
|
+
application_name)
|
142
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
143
|
+
self._kwargs.get(
|
144
|
+
"model",
|
145
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
146
|
+
))
|
147
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
148
|
+
self._kwargs.get("user", ""))
|
149
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
150
|
+
self._kwargs.get("top_p", 1.0))
|
151
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
152
|
+
self._kwargs.get("max_tokens", -1))
|
153
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
154
|
+
self._kwargs.get("temperature", 1.0))
|
155
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
156
|
+
self._kwargs.get("presence_penalty", 0.0))
|
157
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
158
|
+
self._kwargs.get("frequency_penalty", 0.0))
|
159
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
160
|
+
self._kwargs.get("seed", ""))
|
161
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
162
|
+
True)
|
163
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
164
|
+
self._prompt_tokens)
|
165
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
|
166
|
+
self._completion_tokens)
|
167
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
168
|
+
self._total_tokens)
|
169
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
170
|
+
cost)
|
171
|
+
if trace_content:
|
172
|
+
self._span.add_event(
|
173
|
+
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
174
|
+
attributes={
|
175
|
+
SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
|
176
|
+
},
|
177
|
+
)
|
178
|
+
self._span.add_event(
|
179
|
+
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
180
|
+
attributes={
|
181
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: self._llmresponse,
|
182
|
+
},
|
183
|
+
)
|
184
|
+
|
185
|
+
self._span.set_status(Status(StatusCode.OK))
|
186
|
+
|
187
|
+
if disable_metrics is False:
|
188
|
+
attributes = {
|
189
|
+
TELEMETRY_SDK_NAME:
|
190
|
+
"openlit",
|
191
|
+
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
192
|
+
application_name,
|
193
|
+
SemanticConvetion.GEN_AI_SYSTEM:
|
194
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
|
195
|
+
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
196
|
+
environment,
|
197
|
+
SemanticConvetion.GEN_AI_TYPE:
|
198
|
+
SemanticConvetion.GEN_AI_TYPE_CHAT,
|
199
|
+
SemanticConvetion.GEN_AI_REQUEST_MODEL:
|
200
|
+
self._kwargs.get("model",
|
201
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo")
|
202
|
+
}
|
203
|
+
|
204
|
+
metrics["genai_requests"].add(1, attributes)
|
205
|
+
metrics["genai_total_tokens"].add(
|
206
|
+
self._total_tokens, attributes
|
207
|
+
)
|
208
|
+
metrics["genai_completion_tokens"].add(
|
209
|
+
self._completion_tokens, attributes
|
210
|
+
)
|
211
|
+
metrics["genai_prompt_tokens"].add(
|
212
|
+
self._prompt_tokens, attributes
|
213
|
+
)
|
214
|
+
metrics["genai_cost"].record(cost, attributes)
|
215
|
+
|
216
|
+
except Exception as e:
|
217
|
+
handle_exception(self._span, e)
|
218
|
+
logger.error("Error in trace creation: %s", e)
|
219
|
+
finally:
|
220
|
+
self._span.end()
|
221
|
+
raise
|
222
|
+
|
223
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
224
|
+
"""
|
225
|
+
Wraps the 'chat.completions' API call to add telemetry.
|
226
|
+
|
227
|
+
This collects metrics such as execution time, cost, and token usage, and handles errors
|
228
|
+
gracefully, adding details to the trace for observability.
|
229
|
+
|
230
|
+
Args:
|
231
|
+
wrapped: The original 'chat.completions' method to be wrapped.
|
232
|
+
instance: The instance of the class where the original method is defined.
|
233
|
+
args: Positional arguments for the 'chat.completions' method.
|
234
|
+
kwargs: Keyword arguments for the 'chat.completions' method.
|
235
|
+
|
236
|
+
Returns:
|
237
|
+
The response from the original 'chat.completions' method.
|
238
|
+
"""
|
239
|
+
|
240
|
+
# Check if streaming is enabled for the API call
|
241
|
+
streaming = kwargs.get("stream", False)
|
242
|
+
|
243
|
+
# pylint: disable=no-else-return
|
244
|
+
if streaming:
|
245
|
+
# Special handling for streaming response to accommodate the nature of data flow
|
246
|
+
awaited_wrapped = await wrapped(*args, **kwargs)
|
247
|
+
span = tracer.start_span(gen_ai_endpoint, kind=SpanKind.CLIENT)
|
248
|
+
|
249
|
+
return TracedAsyncStream(awaited_wrapped, span, kwargs)
|
250
|
+
|
251
|
+
# Handling for non-streaming responses
|
252
|
+
else:
|
253
|
+
# pylint: disable=line-too-long
|
254
|
+
with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
|
255
|
+
response = await wrapped(*args, **kwargs)
|
256
|
+
|
257
|
+
response_dict = response_as_dict(response)
|
258
|
+
|
259
|
+
try:
|
260
|
+
# Format 'messages' into a single string
|
261
|
+
message_prompt = kwargs.get("messages", "")
|
262
|
+
formatted_messages = []
|
263
|
+
for message in message_prompt:
|
264
|
+
role = message["role"]
|
265
|
+
content = message["content"]
|
266
|
+
|
267
|
+
if isinstance(content, list):
|
268
|
+
content_str = ", ".join(
|
269
|
+
# pylint: disable=line-too-long
|
270
|
+
f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
|
271
|
+
if "type" in item else f'text: {item["text"]}'
|
272
|
+
for item in content
|
273
|
+
)
|
274
|
+
formatted_messages.append(f"{role}: {content_str}")
|
275
|
+
else:
|
276
|
+
formatted_messages.append(f"{role}: {content}")
|
277
|
+
prompt = "\n".join(formatted_messages)
|
278
|
+
|
279
|
+
# Set base span attribues
|
280
|
+
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
281
|
+
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
282
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
|
283
|
+
span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
284
|
+
SemanticConvetion.GEN_AI_TYPE_CHAT)
|
285
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
286
|
+
gen_ai_endpoint)
|
287
|
+
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
|
288
|
+
response_dict.get("id"))
|
289
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
|
290
|
+
environment)
|
291
|
+
span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
|
292
|
+
application_name)
|
293
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
294
|
+
kwargs.get("model",
|
295
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo"))
|
296
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
297
|
+
kwargs.get("top_p", 1.0))
|
298
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
299
|
+
kwargs.get("max_tokens", -1))
|
300
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
301
|
+
kwargs.get("user", ""))
|
302
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
303
|
+
kwargs.get("temperature", 1.0))
|
304
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
305
|
+
kwargs.get("presence_penalty", 0.0))
|
306
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
307
|
+
kwargs.get("frequency_penalty", 0.0))
|
308
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
309
|
+
kwargs.get("seed", ""))
|
310
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
311
|
+
False)
|
312
|
+
if trace_content:
|
313
|
+
span.add_event(
|
314
|
+
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
315
|
+
attributes={
|
316
|
+
SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
|
317
|
+
},
|
318
|
+
)
|
319
|
+
|
320
|
+
# Set span attributes when tools is not passed to the function call
|
321
|
+
if "tools" not in kwargs:
|
322
|
+
# Calculate cost of the operation
|
323
|
+
cost = get_chat_model_cost(kwargs.get(
|
324
|
+
"model",
|
325
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
326
|
+
),
|
327
|
+
pricing_info,
|
328
|
+
response_dict.get('usage', {}).get('prompt_tokens', None),
|
329
|
+
response_dict.get('usage', {}).get('completion_tokens', None))
|
330
|
+
|
331
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
332
|
+
response_dict.get('usage', {}).get('prompt_tokens', None))
|
333
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
|
334
|
+
response_dict.get('usage', {}).get('completion_tokens', None))
|
335
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
336
|
+
response_dict.get('usage', {}).get('total_tokens', None))
|
337
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
338
|
+
cost)
|
339
|
+
|
340
|
+
# Set span attributes for when n = 1 (default)
|
341
|
+
if "n" not in kwargs or kwargs["n"] == 1:
|
342
|
+
if trace_content:
|
343
|
+
span.add_event(
|
344
|
+
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
345
|
+
attributes={
|
346
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response_dict.get('choices', [])[0].get("message").get("content"),
|
347
|
+
},
|
348
|
+
)
|
349
|
+
|
350
|
+
# Set span attributes for when n > 0
|
351
|
+
else:
|
352
|
+
i = 0
|
353
|
+
while i < kwargs["n"] and trace_content is True:
|
354
|
+
attribute_name = f"gen_ai.content.completion.{i}"
|
355
|
+
span.add_event(
|
356
|
+
name=attribute_name,
|
357
|
+
attributes={
|
358
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response_dict.get('choices')[i].get("message").get("content"),
|
359
|
+
},
|
360
|
+
)
|
361
|
+
i += 1
|
362
|
+
|
363
|
+
# Return original response
|
364
|
+
return response
|
365
|
+
|
366
|
+
# Set span attributes when tools is passed to the function call
|
367
|
+
elif "tools" in kwargs:
|
368
|
+
# Calculate cost of the operation
|
369
|
+
cost = get_chat_model_cost(kwargs.get(
|
370
|
+
"model",
|
371
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
372
|
+
),
|
373
|
+
pricing_info,
|
374
|
+
response_dict.get('usage').get('prompt_tokens'),
|
375
|
+
response_dict.get('usage').get('completion_tokens'))
|
376
|
+
|
377
|
+
span.add_event(
|
378
|
+
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
379
|
+
attributes={
|
380
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: "Function called with tools",
|
381
|
+
},
|
382
|
+
)
|
383
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
384
|
+
response_dict.get('usage').get('prompt_tokens'))
|
385
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
|
386
|
+
response_dict.get('usage').get('completion_tokens'))
|
387
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
388
|
+
response_dict.get('usage').get('total_tokens'))
|
389
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
390
|
+
cost)
|
391
|
+
|
392
|
+
span.set_status(Status(StatusCode.OK))
|
393
|
+
|
394
|
+
if disable_metrics is False:
|
395
|
+
attributes = {
|
396
|
+
TELEMETRY_SDK_NAME:
|
397
|
+
"openlit",
|
398
|
+
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
399
|
+
application_name,
|
400
|
+
SemanticConvetion.GEN_AI_SYSTEM:
|
401
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
|
402
|
+
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
403
|
+
environment,
|
404
|
+
SemanticConvetion.GEN_AI_TYPE:
|
405
|
+
SemanticConvetion.GEN_AI_TYPE_CHAT,
|
406
|
+
SemanticConvetion.GEN_AI_REQUEST_MODEL:
|
407
|
+
kwargs.get("model", "meta-llama/Llama-3.3-70B-Instruct-Turbo")
|
408
|
+
}
|
409
|
+
|
410
|
+
metrics["genai_requests"].add(1, attributes)
|
411
|
+
metrics["genai_total_tokens"].add(
|
412
|
+
response_dict.get('usage').get('total_tokens'), attributes)
|
413
|
+
metrics["genai_completion_tokens"].add(
|
414
|
+
response_dict.get('usage').get('completion_tokens'), attributes)
|
415
|
+
metrics["genai_prompt_tokens"].add(
|
416
|
+
response_dict.get('usage').get('prompt_tokens'), attributes)
|
417
|
+
metrics["genai_cost"].record(cost, attributes)
|
418
|
+
|
419
|
+
# Return original response
|
420
|
+
return response
|
421
|
+
|
422
|
+
except Exception as e:
|
423
|
+
handle_exception(span, e)
|
424
|
+
logger.error("Error in trace creation: %s", e)
|
425
|
+
|
426
|
+
# Return original response
|
427
|
+
return response
|
428
|
+
|
429
|
+
return wrapper
|
430
|
+
|
431
|
+
def async_image_generate(gen_ai_endpoint, version, environment, application_name,
|
432
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics):
|
433
|
+
"""
|
434
|
+
Generates a telemetry wrapper for image generation to collect metrics.
|
435
|
+
|
436
|
+
Args:
|
437
|
+
gen_ai_endpoint: Endpoint identifier for logging and tracing.
|
438
|
+
version: Version of the monitoring package.
|
439
|
+
environment: Deployment environment (e.g., production, staging).
|
440
|
+
application_name: Name of the application using the Together API.
|
441
|
+
tracer: OpenTelemetry tracer for creating spans.
|
442
|
+
pricing_info: Information used for calculating the cost of Together image generation.
|
443
|
+
trace_content: Flag indicating whether to trace the input prompt and generated images.
|
444
|
+
|
445
|
+
Returns:
|
446
|
+
A function that wraps the image generation method to add telemetry.
|
447
|
+
"""
|
448
|
+
|
449
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
450
|
+
"""
|
451
|
+
Wraps the 'images.generate' API call to add telemetry.
|
452
|
+
|
453
|
+
This collects metrics such as execution time, cost, and handles errors
|
454
|
+
gracefully, adding details to the trace for observability.
|
455
|
+
|
456
|
+
Args:
|
457
|
+
wrapped: The original 'images.generate' method to be wrapped.
|
458
|
+
instance: The instance of the class where the original method is defined.
|
459
|
+
args: Positional arguments for the 'images.generate' method.
|
460
|
+
kwargs: Keyword arguments for the 'images.generate' method.
|
461
|
+
|
462
|
+
Returns:
|
463
|
+
The response from the original 'images.generate' method.
|
464
|
+
"""
|
465
|
+
|
466
|
+
with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
|
467
|
+
response = await wrapped(*args, **kwargs)
|
468
|
+
images_count = 0
|
469
|
+
|
470
|
+
try:
|
471
|
+
# Find Image format
|
472
|
+
if "response_format" in kwargs and kwargs["response_format"] == "b64_json":
|
473
|
+
image = "b64_json"
|
474
|
+
else:
|
475
|
+
image = "url"
|
476
|
+
|
477
|
+
# Calculate cost of the operation
|
478
|
+
image_size = str(kwargs.get("width", 1024)) + "x" + str(kwargs.get("height", 1024))
|
479
|
+
cost_per_million = get_image_model_cost(kwargs.get(
|
480
|
+
"model", "black-forest-labs/FLUX.1-dev"
|
481
|
+
),
|
482
|
+
pricing_info, "1000000",
|
483
|
+
kwargs.get("quality", "standard"))
|
484
|
+
pixels = kwargs.get("width", 1024) * kwargs.get("height", 1024)
|
485
|
+
cost = pixels / 1_000_000 * cost_per_million
|
486
|
+
|
487
|
+
for items in response.data:
|
488
|
+
# Set Span attributes
|
489
|
+
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
490
|
+
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
491
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
|
492
|
+
span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
493
|
+
SemanticConvetion.GEN_AI_TYPE_IMAGE)
|
494
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
495
|
+
gen_ai_endpoint)
|
496
|
+
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
|
497
|
+
response.id)
|
498
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
|
499
|
+
environment)
|
500
|
+
span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
|
501
|
+
application_name)
|
502
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
503
|
+
kwargs.get("model", "black-forest-labs/FLUX.1-dev"))
|
504
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IMAGE_SIZE,
|
505
|
+
image_size)
|
506
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IMAGE_QUALITY,
|
507
|
+
kwargs.get("quality", "standard"))
|
508
|
+
if trace_content:
|
509
|
+
span.add_event(
|
510
|
+
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
511
|
+
attributes={
|
512
|
+
SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("prompt", ""),
|
513
|
+
},
|
514
|
+
)
|
515
|
+
attribute_name = f"gen_ai.response.image.{images_count}"
|
516
|
+
span.add_event(
|
517
|
+
name=attribute_name,
|
518
|
+
attributes={
|
519
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: getattr(items, image),
|
520
|
+
},
|
521
|
+
)
|
522
|
+
|
523
|
+
images_count+=1
|
524
|
+
|
525
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
526
|
+
len(response.data) * cost)
|
527
|
+
span.set_status(Status(StatusCode.OK))
|
528
|
+
|
529
|
+
if disable_metrics is False:
|
530
|
+
attributes = {
|
531
|
+
TELEMETRY_SDK_NAME:
|
532
|
+
"openlit",
|
533
|
+
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
534
|
+
application_name,
|
535
|
+
SemanticConvetion.GEN_AI_SYSTEM:
|
536
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
|
537
|
+
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
538
|
+
environment,
|
539
|
+
SemanticConvetion.GEN_AI_TYPE:
|
540
|
+
SemanticConvetion.GEN_AI_TYPE_IMAGE,
|
541
|
+
SemanticConvetion.GEN_AI_REQUEST_MODEL:
|
542
|
+
kwargs.get("model", "black-forest-labs/FLUX.1-dev")
|
543
|
+
}
|
544
|
+
|
545
|
+
metrics["genai_requests"].add(1, attributes)
|
546
|
+
metrics["genai_cost"].record(cost, attributes)
|
547
|
+
|
548
|
+
# Return original response
|
549
|
+
return response
|
550
|
+
|
551
|
+
except Exception as e:
|
552
|
+
handle_exception(span, e)
|
553
|
+
logger.error("Error in trace creation: %s", e)
|
554
|
+
|
555
|
+
# Return original response
|
556
|
+
return response
|
557
|
+
|
558
|
+
return wrapper
|
@@ -0,0 +1,558 @@
|
|
1
|
+
# pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, too-many-branches, too-many-instance-attributes
|
2
|
+
"""
|
3
|
+
Module for monitoring Together calls.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import logging
|
7
|
+
from opentelemetry.trace import SpanKind, Status, StatusCode
|
8
|
+
from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
|
9
|
+
from openlit.__helpers import (
|
10
|
+
get_chat_model_cost,
|
11
|
+
get_image_model_cost,
|
12
|
+
handle_exception,
|
13
|
+
response_as_dict,
|
14
|
+
)
|
15
|
+
from openlit.semcov import SemanticConvetion
|
16
|
+
|
17
|
+
# Initialize logger for logging potential issues and operations
|
18
|
+
logger = logging.getLogger(__name__)
|
19
|
+
|
20
|
+
def completion(gen_ai_endpoint, version, environment, application_name,
|
21
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics):
|
22
|
+
"""
|
23
|
+
Generates a telemetry wrapper for chat completions to collect metrics.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
gen_ai_endpoint: Endpoint identifier for logging and tracing.
|
27
|
+
version: Version of the monitoring package.
|
28
|
+
environment: Deployment environment (e.g., production, staging).
|
29
|
+
application_name: Name of the application using the Together AI SDK.
|
30
|
+
tracer: OpenTelemetry tracer for creating spans.
|
31
|
+
pricing_info: Information used for calculating the cost of Together AI usage.
|
32
|
+
trace_content: Flag indicating whether to trace the actual content.
|
33
|
+
|
34
|
+
Returns:
|
35
|
+
A function that wraps the chat completions method to add telemetry.
|
36
|
+
"""
|
37
|
+
|
38
|
+
class TracedSyncStream:
|
39
|
+
"""
|
40
|
+
Wrapper for streaming responses to collect metrics and trace data.
|
41
|
+
|
42
|
+
This class implements the '__aiter__' and '__anext__' methods that
|
43
|
+
handle asynchronous streaming responses.
|
44
|
+
|
45
|
+
This class also implements '__aenter__' and '__aexit__' methods that
|
46
|
+
handle asynchronous context management protocol.
|
47
|
+
"""
|
48
|
+
def __init__(
|
49
|
+
self,
|
50
|
+
wrapped,
|
51
|
+
span,
|
52
|
+
kwargs,
|
53
|
+
**args,
|
54
|
+
):
|
55
|
+
self.__wrapped__ = wrapped
|
56
|
+
self._span = span
|
57
|
+
# Placeholder for aggregating streaming response
|
58
|
+
self._llmresponse = ""
|
59
|
+
self._response_id = ""
|
60
|
+
self._prompt_tokens = 0
|
61
|
+
self._completion_tokens = 0
|
62
|
+
self._total_tokens = 0
|
63
|
+
|
64
|
+
self._args = args
|
65
|
+
self._kwargs = kwargs
|
66
|
+
|
67
|
+
def __enter__(self):
|
68
|
+
self.__wrapped__.__enter__()
|
69
|
+
return self
|
70
|
+
|
71
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
72
|
+
self.__wrapped__.__exit__(exc_type, exc_value, traceback)
|
73
|
+
|
74
|
+
def __iter__(self):
|
75
|
+
return self
|
76
|
+
|
77
|
+
def __getattr__(self, name):
|
78
|
+
"""Delegate attribute access to the wrapped object."""
|
79
|
+
return getattr(self.__wrapped__, name)
|
80
|
+
|
81
|
+
def __next__(self):
|
82
|
+
try:
|
83
|
+
chunk = self.__wrapped__.__next__()
|
84
|
+
chunked = response_as_dict(chunk)
|
85
|
+
# Collect message IDs and aggregated response from events
|
86
|
+
if (len(chunked.get('choices')) > 0 and ('delta' in chunked.get('choices')[0] and
|
87
|
+
'content' in chunked.get('choices')[0].get('delta'))):
|
88
|
+
|
89
|
+
content = chunked.get('choices')[0].get('delta').get('content')
|
90
|
+
if content:
|
91
|
+
self._llmresponse += content
|
92
|
+
if chunked.get("usage"):
|
93
|
+
self._prompt_tokens = chunked.get("usage").get("prompt_tokens")
|
94
|
+
self._completion_tokens = chunked.get("usage").get("completion_tokens")
|
95
|
+
self._total_tokens = chunked.get("usage").get("total_tokens")
|
96
|
+
self._response_id = chunked.get('id')
|
97
|
+
return chunk
|
98
|
+
except StopIteration:
|
99
|
+
# Handling exception ensure observability without disrupting operation
|
100
|
+
try:
|
101
|
+
# Format 'messages' into a single string
|
102
|
+
message_prompt = self._kwargs.get("messages", "")
|
103
|
+
formatted_messages = []
|
104
|
+
for message in message_prompt:
|
105
|
+
role = message["role"]
|
106
|
+
content = message["content"]
|
107
|
+
|
108
|
+
if isinstance(content, list):
|
109
|
+
content_str = ", ".join(
|
110
|
+
# pylint: disable=line-too-long
|
111
|
+
f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
|
112
|
+
if "type" in item else f'text: {item["text"]}'
|
113
|
+
for item in content
|
114
|
+
)
|
115
|
+
formatted_messages.append(f"{role}: {content_str}")
|
116
|
+
else:
|
117
|
+
formatted_messages.append(f"{role}: {content}")
|
118
|
+
prompt = "\n".join(formatted_messages)
|
119
|
+
|
120
|
+
# Calculate cost of the operation
|
121
|
+
cost = get_chat_model_cost(self._kwargs.get(
|
122
|
+
"model",
|
123
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
124
|
+
),
|
125
|
+
pricing_info, self._prompt_tokens,
|
126
|
+
self._completion_tokens)
|
127
|
+
|
128
|
+
# Set Span attributes
|
129
|
+
self._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
130
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
131
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
|
132
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
133
|
+
SemanticConvetion.GEN_AI_TYPE_CHAT)
|
134
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
135
|
+
gen_ai_endpoint)
|
136
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
|
137
|
+
self._response_id)
|
138
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
|
139
|
+
environment)
|
140
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
|
141
|
+
application_name)
|
142
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
143
|
+
self._kwargs.get(
|
144
|
+
"model",
|
145
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
146
|
+
))
|
147
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
148
|
+
self._kwargs.get("user", ""))
|
149
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
150
|
+
self._kwargs.get("top_p", 1.0))
|
151
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
152
|
+
self._kwargs.get("max_tokens", -1))
|
153
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
154
|
+
self._kwargs.get("temperature", 1.0))
|
155
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
156
|
+
self._kwargs.get("presence_penalty", 0.0))
|
157
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
158
|
+
self._kwargs.get("frequency_penalty", 0.0))
|
159
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
160
|
+
self._kwargs.get("seed", ""))
|
161
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
162
|
+
True)
|
163
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
164
|
+
self._prompt_tokens)
|
165
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
|
166
|
+
self._completion_tokens)
|
167
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
168
|
+
self._total_tokens)
|
169
|
+
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
170
|
+
cost)
|
171
|
+
if trace_content:
|
172
|
+
self._span.add_event(
|
173
|
+
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
174
|
+
attributes={
|
175
|
+
SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
|
176
|
+
},
|
177
|
+
)
|
178
|
+
self._span.add_event(
|
179
|
+
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
180
|
+
attributes={
|
181
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: self._llmresponse,
|
182
|
+
},
|
183
|
+
)
|
184
|
+
|
185
|
+
self._span.set_status(Status(StatusCode.OK))
|
186
|
+
|
187
|
+
if disable_metrics is False:
|
188
|
+
attributes = {
|
189
|
+
TELEMETRY_SDK_NAME:
|
190
|
+
"openlit",
|
191
|
+
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
192
|
+
application_name,
|
193
|
+
SemanticConvetion.GEN_AI_SYSTEM:
|
194
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
|
195
|
+
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
196
|
+
environment,
|
197
|
+
SemanticConvetion.GEN_AI_TYPE:
|
198
|
+
SemanticConvetion.GEN_AI_TYPE_CHAT,
|
199
|
+
SemanticConvetion.GEN_AI_REQUEST_MODEL:
|
200
|
+
self._kwargs.get("model",
|
201
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo")
|
202
|
+
}
|
203
|
+
|
204
|
+
metrics["genai_requests"].add(1, attributes)
|
205
|
+
metrics["genai_total_tokens"].add(
|
206
|
+
self._total_tokens, attributes
|
207
|
+
)
|
208
|
+
metrics["genai_completion_tokens"].add(
|
209
|
+
self._completion_tokens, attributes
|
210
|
+
)
|
211
|
+
metrics["genai_prompt_tokens"].add(
|
212
|
+
self._prompt_tokens, attributes
|
213
|
+
)
|
214
|
+
metrics["genai_cost"].record(cost, attributes)
|
215
|
+
|
216
|
+
except Exception as e:
|
217
|
+
handle_exception(self._span, e)
|
218
|
+
logger.error("Error in trace creation: %s", e)
|
219
|
+
finally:
|
220
|
+
self._span.end()
|
221
|
+
raise
|
222
|
+
|
223
|
+
def wrapper(wrapped, instance, args, kwargs):
|
224
|
+
"""
|
225
|
+
Wraps the 'chat.completions' API call to add telemetry.
|
226
|
+
|
227
|
+
This collects metrics such as execution time, cost, and token usage, and handles errors
|
228
|
+
gracefully, adding details to the trace for observability.
|
229
|
+
|
230
|
+
Args:
|
231
|
+
wrapped: The original 'chat.completions' method to be wrapped.
|
232
|
+
instance: The instance of the class where the original method is defined.
|
233
|
+
args: Positional arguments for the 'chat.completions' method.
|
234
|
+
kwargs: Keyword arguments for the 'chat.completions' method.
|
235
|
+
|
236
|
+
Returns:
|
237
|
+
The response from the original 'chat.completions' method.
|
238
|
+
"""
|
239
|
+
|
240
|
+
# Check if streaming is enabled for the API call
|
241
|
+
streaming = kwargs.get("stream", False)
|
242
|
+
|
243
|
+
# pylint: disable=no-else-return
|
244
|
+
if streaming:
|
245
|
+
# Special handling for streaming response to accommodate the nature of data flow
|
246
|
+
awaited_wrapped = wrapped(*args, **kwargs)
|
247
|
+
span = tracer.start_span(gen_ai_endpoint, kind=SpanKind.CLIENT)
|
248
|
+
|
249
|
+
return TracedSyncStream(awaited_wrapped, span, kwargs)
|
250
|
+
|
251
|
+
# Handling for non-streaming responses
|
252
|
+
else:
|
253
|
+
# pylint: disable=line-too-long
|
254
|
+
with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
|
255
|
+
response = wrapped(*args, **kwargs)
|
256
|
+
|
257
|
+
response_dict = response_as_dict(response)
|
258
|
+
|
259
|
+
try:
|
260
|
+
# Format 'messages' into a single string
|
261
|
+
message_prompt = kwargs.get("messages", "")
|
262
|
+
formatted_messages = []
|
263
|
+
for message in message_prompt:
|
264
|
+
role = message["role"]
|
265
|
+
content = message["content"]
|
266
|
+
|
267
|
+
if isinstance(content, list):
|
268
|
+
content_str = ", ".join(
|
269
|
+
# pylint: disable=line-too-long
|
270
|
+
f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
|
271
|
+
if "type" in item else f'text: {item["text"]}'
|
272
|
+
for item in content
|
273
|
+
)
|
274
|
+
formatted_messages.append(f"{role}: {content_str}")
|
275
|
+
else:
|
276
|
+
formatted_messages.append(f"{role}: {content}")
|
277
|
+
prompt = "\n".join(formatted_messages)
|
278
|
+
|
279
|
+
# Set base span attribues
|
280
|
+
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
281
|
+
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
282
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
|
283
|
+
span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
284
|
+
SemanticConvetion.GEN_AI_TYPE_CHAT)
|
285
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
286
|
+
gen_ai_endpoint)
|
287
|
+
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
|
288
|
+
response_dict.get("id"))
|
289
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
|
290
|
+
environment)
|
291
|
+
span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
|
292
|
+
application_name)
|
293
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
294
|
+
kwargs.get("model",
|
295
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo"))
|
296
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
297
|
+
kwargs.get("top_p", 1.0))
|
298
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
299
|
+
kwargs.get("max_tokens", -1))
|
300
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
301
|
+
kwargs.get("user", ""))
|
302
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
303
|
+
kwargs.get("temperature", 1.0))
|
304
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
305
|
+
kwargs.get("presence_penalty", 0.0))
|
306
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
307
|
+
kwargs.get("frequency_penalty", 0.0))
|
308
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
309
|
+
kwargs.get("seed", ""))
|
310
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
311
|
+
False)
|
312
|
+
if trace_content:
|
313
|
+
span.add_event(
|
314
|
+
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
315
|
+
attributes={
|
316
|
+
SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
|
317
|
+
},
|
318
|
+
)
|
319
|
+
|
320
|
+
# Set span attributes when tools is not passed to the function call
|
321
|
+
if "tools" not in kwargs:
|
322
|
+
# Calculate cost of the operation
|
323
|
+
cost = get_chat_model_cost(kwargs.get(
|
324
|
+
"model",
|
325
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
326
|
+
),
|
327
|
+
pricing_info,
|
328
|
+
response_dict.get('usage', {}).get('prompt_tokens', None),
|
329
|
+
response_dict.get('usage', {}).get('completion_tokens', None))
|
330
|
+
|
331
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
332
|
+
response_dict.get('usage', {}).get('prompt_tokens', None))
|
333
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
|
334
|
+
response_dict.get('usage', {}).get('completion_tokens', None))
|
335
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
336
|
+
response_dict.get('usage', {}).get('total_tokens', None))
|
337
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
338
|
+
cost)
|
339
|
+
|
340
|
+
# Set span attributes for when n = 1 (default)
|
341
|
+
if "n" not in kwargs or kwargs["n"] == 1:
|
342
|
+
if trace_content:
|
343
|
+
span.add_event(
|
344
|
+
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
345
|
+
attributes={
|
346
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response_dict.get('choices', [])[0].get("message").get("content"),
|
347
|
+
},
|
348
|
+
)
|
349
|
+
|
350
|
+
# Set span attributes for when n > 0
|
351
|
+
else:
|
352
|
+
i = 0
|
353
|
+
while i < kwargs["n"] and trace_content is True:
|
354
|
+
attribute_name = f"gen_ai.content.completion.{i}"
|
355
|
+
span.add_event(
|
356
|
+
name=attribute_name,
|
357
|
+
attributes={
|
358
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response_dict.get('choices')[i].get("message").get("content"),
|
359
|
+
},
|
360
|
+
)
|
361
|
+
i += 1
|
362
|
+
|
363
|
+
# Return original response
|
364
|
+
return response
|
365
|
+
|
366
|
+
# Set span attributes when tools is passed to the function call
|
367
|
+
elif "tools" in kwargs:
|
368
|
+
# Calculate cost of the operation
|
369
|
+
cost = get_chat_model_cost(kwargs.get(
|
370
|
+
"model",
|
371
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
372
|
+
),
|
373
|
+
pricing_info,
|
374
|
+
response_dict.get('usage').get('prompt_tokens'),
|
375
|
+
response_dict.get('usage').get('completion_tokens'))
|
376
|
+
|
377
|
+
span.add_event(
|
378
|
+
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
379
|
+
attributes={
|
380
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: "Function called with tools",
|
381
|
+
},
|
382
|
+
)
|
383
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
384
|
+
response_dict.get('usage').get('prompt_tokens'))
|
385
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
|
386
|
+
response_dict.get('usage').get('completion_tokens'))
|
387
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
388
|
+
response_dict.get('usage').get('total_tokens'))
|
389
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
390
|
+
cost)
|
391
|
+
|
392
|
+
span.set_status(Status(StatusCode.OK))
|
393
|
+
|
394
|
+
if disable_metrics is False:
|
395
|
+
attributes = {
|
396
|
+
TELEMETRY_SDK_NAME:
|
397
|
+
"openlit",
|
398
|
+
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
399
|
+
application_name,
|
400
|
+
SemanticConvetion.GEN_AI_SYSTEM:
|
401
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
|
402
|
+
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
403
|
+
environment,
|
404
|
+
SemanticConvetion.GEN_AI_TYPE:
|
405
|
+
SemanticConvetion.GEN_AI_TYPE_CHAT,
|
406
|
+
SemanticConvetion.GEN_AI_REQUEST_MODEL:
|
407
|
+
kwargs.get("model", "meta-llama/Llama-3.3-70B-Instruct-Turbo")
|
408
|
+
}
|
409
|
+
|
410
|
+
metrics["genai_requests"].add(1, attributes)
|
411
|
+
metrics["genai_total_tokens"].add(
|
412
|
+
response_dict.get('usage').get('total_tokens'), attributes)
|
413
|
+
metrics["genai_completion_tokens"].add(
|
414
|
+
response_dict.get('usage').get('completion_tokens'), attributes)
|
415
|
+
metrics["genai_prompt_tokens"].add(
|
416
|
+
response_dict.get('usage').get('prompt_tokens'), attributes)
|
417
|
+
metrics["genai_cost"].record(cost, attributes)
|
418
|
+
|
419
|
+
# Return original response
|
420
|
+
return response
|
421
|
+
|
422
|
+
except Exception as e:
|
423
|
+
handle_exception(span, e)
|
424
|
+
logger.error("Error in trace creation: %s", e)
|
425
|
+
|
426
|
+
# Return original response
|
427
|
+
return response
|
428
|
+
|
429
|
+
return wrapper
|
430
|
+
|
431
|
+
def image_generate(gen_ai_endpoint, version, environment, application_name,
|
432
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics):
|
433
|
+
"""
|
434
|
+
Generates a telemetry wrapper for image generation to collect metrics.
|
435
|
+
|
436
|
+
Args:
|
437
|
+
gen_ai_endpoint: Endpoint identifier for logging and tracing.
|
438
|
+
version: Version of the monitoring package.
|
439
|
+
environment: Deployment environment (e.g., production, staging).
|
440
|
+
application_name: Name of the application using the Together API.
|
441
|
+
tracer: OpenTelemetry tracer for creating spans.
|
442
|
+
pricing_info: Information used for calculating the cost of Together image generation.
|
443
|
+
trace_content: Flag indicating whether to trace the input prompt and generated images.
|
444
|
+
|
445
|
+
Returns:
|
446
|
+
A function that wraps the image generation method to add telemetry.
|
447
|
+
"""
|
448
|
+
|
449
|
+
def wrapper(wrapped, instance, args, kwargs):
|
450
|
+
"""
|
451
|
+
Wraps the 'images.generate' API call to add telemetry.
|
452
|
+
|
453
|
+
This collects metrics such as execution time, cost, and handles errors
|
454
|
+
gracefully, adding details to the trace for observability.
|
455
|
+
|
456
|
+
Args:
|
457
|
+
wrapped: The original 'images.generate' method to be wrapped.
|
458
|
+
instance: The instance of the class where the original method is defined.
|
459
|
+
args: Positional arguments for the 'images.generate' method.
|
460
|
+
kwargs: Keyword arguments for the 'images.generate' method.
|
461
|
+
|
462
|
+
Returns:
|
463
|
+
The response from the original 'images.generate' method.
|
464
|
+
"""
|
465
|
+
|
466
|
+
with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
|
467
|
+
response = wrapped(*args, **kwargs)
|
468
|
+
images_count = 0
|
469
|
+
|
470
|
+
try:
|
471
|
+
# Find Image format
|
472
|
+
if "response_format" in kwargs and kwargs["response_format"] == "b64_json":
|
473
|
+
image = "b64_json"
|
474
|
+
else:
|
475
|
+
image = "url"
|
476
|
+
|
477
|
+
# Calculate cost of the operation
|
478
|
+
image_size = str(kwargs.get("width", 1024)) + "x" + str(kwargs.get("height", 1024))
|
479
|
+
cost_per_million = get_image_model_cost(kwargs.get(
|
480
|
+
"model", "black-forest-labs/FLUX.1-dev"
|
481
|
+
),
|
482
|
+
pricing_info, "1000000",
|
483
|
+
kwargs.get("quality", "standard"))
|
484
|
+
pixels = kwargs.get("width", 1024) * kwargs.get("height", 1024)
|
485
|
+
cost = pixels / 1_000_000 * cost_per_million
|
486
|
+
|
487
|
+
for items in response.data:
|
488
|
+
# Set Span attributes
|
489
|
+
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
490
|
+
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
491
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
|
492
|
+
span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
493
|
+
SemanticConvetion.GEN_AI_TYPE_IMAGE)
|
494
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
495
|
+
gen_ai_endpoint)
|
496
|
+
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
|
497
|
+
response.id)
|
498
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
|
499
|
+
environment)
|
500
|
+
span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
|
501
|
+
application_name)
|
502
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
503
|
+
kwargs.get("model", "black-forest-labs/FLUX.1-dev"))
|
504
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IMAGE_SIZE,
|
505
|
+
image_size)
|
506
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IMAGE_QUALITY,
|
507
|
+
kwargs.get("quality", "standard"))
|
508
|
+
if trace_content:
|
509
|
+
span.add_event(
|
510
|
+
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
511
|
+
attributes={
|
512
|
+
SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("prompt", ""),
|
513
|
+
},
|
514
|
+
)
|
515
|
+
attribute_name = f"gen_ai.response.image.{images_count}"
|
516
|
+
span.add_event(
|
517
|
+
name=attribute_name,
|
518
|
+
attributes={
|
519
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: getattr(items, image),
|
520
|
+
},
|
521
|
+
)
|
522
|
+
|
523
|
+
images_count+=1
|
524
|
+
|
525
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
526
|
+
len(response.data) * cost)
|
527
|
+
span.set_status(Status(StatusCode.OK))
|
528
|
+
|
529
|
+
if disable_metrics is False:
|
530
|
+
attributes = {
|
531
|
+
TELEMETRY_SDK_NAME:
|
532
|
+
"openlit",
|
533
|
+
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
534
|
+
application_name,
|
535
|
+
SemanticConvetion.GEN_AI_SYSTEM:
|
536
|
+
SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
|
537
|
+
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
538
|
+
environment,
|
539
|
+
SemanticConvetion.GEN_AI_TYPE:
|
540
|
+
SemanticConvetion.GEN_AI_TYPE_IMAGE,
|
541
|
+
SemanticConvetion.GEN_AI_REQUEST_MODEL:
|
542
|
+
kwargs.get("model", "black-forest-labs/FLUX.1-dev")
|
543
|
+
}
|
544
|
+
|
545
|
+
metrics["genai_requests"].add(1, attributes)
|
546
|
+
metrics["genai_cost"].record(cost, attributes)
|
547
|
+
|
548
|
+
# Return original response
|
549
|
+
return response
|
550
|
+
|
551
|
+
except Exception as e:
|
552
|
+
handle_exception(span, e)
|
553
|
+
logger.error("Error in trace creation: %s", e)
|
554
|
+
|
555
|
+
# Return original response
|
556
|
+
return response
|
557
|
+
|
558
|
+
return wrapper
|
openlit/semcov/__init__.py
CHANGED
@@ -134,6 +134,7 @@ class SemanticConvetion:
|
|
134
134
|
GEN_AI_SYSTEM_CRAWL4AI = "crawl4ai"
|
135
135
|
GEN_AI_SYSTEM_FIRECRAWL = "firecrawl"
|
136
136
|
GEN_AI_SYSTEM_LETTA = "letta"
|
137
|
+
GEN_AI_SYSTEM_TOGETHER = "together"
|
137
138
|
|
138
139
|
# Vector DB
|
139
140
|
DB_OPERATION_API_ENDPOINT = "db.operation.api_endpoint"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.33.1
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.10
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.11
|
14
14
|
Classifier: Programming Language :: Python :: 3.12
|
15
15
|
Classifier: Programming Language :: Python :: 3.13
|
16
|
-
Requires-Dist: anthropic (>=0.
|
16
|
+
Requires-Dist: anthropic (>=0.42.0,<0.43.0)
|
17
17
|
Requires-Dist: boto3 (>=1.34.0,<2.0.0)
|
18
18
|
Requires-Dist: botocore (>=1.34.0,<2.0.0)
|
19
19
|
Requires-Dist: openai (>=1.1.1,<2.0.0)
|
@@ -56,7 +56,7 @@ This project proudly follows and maintains the [Semantic Conventions](https://gi
|
|
56
56
|
|
57
57
|
## ⚡ Features
|
58
58
|
|
59
|
-
- 🔎 **Auto Instrumentation**: Works with
|
59
|
+
- 🔎 **Auto Instrumentation**: Works with 50+ LLM providers, Agents, Vector databases, and GPUs with just one line of code.
|
60
60
|
- 🔭 **OpenTelemetry-Native Observability SDKs**: Vendor-neutral SDKs that can send traces and metrics to your existing observability tool like Prometheus and Jaeger.
|
61
61
|
- 💲 **Cost Tracking for Custom and Fine-Tuned Models**: Pass custom pricing files for accurate budgeting of custom and fine-tuned models.
|
62
62
|
- 🚀 **Suppport for OpenLIT Features**: Includes suppprt for prompt management and secrets management features available in OpenLIT.
|
@@ -89,6 +89,7 @@ This project proudly follows and maintains the [Semantic Conventions](https://gi
|
|
89
89
|
| [✅ xAI](https://docs.openlit.io/latest/integrations/xai) | | | |
|
90
90
|
| [✅ Prem AI](https://docs.openlit.io/latest/integrations/premai) | | | |
|
91
91
|
| [✅ Assembly AI](https://docs.openlit.io/latest/integrations/assemblyai) | | | |
|
92
|
+
| [✅ Together](https://docs.openlit.io/latest/integrations/together) | | | |
|
92
93
|
|
93
94
|
## Supported Destinations
|
94
95
|
- [✅ OpenTelemetry Collector](https://docs.openlit.io/latest/connections/otelcol)
|
@@ -1,5 +1,5 @@
|
|
1
1
|
openlit/__helpers.py,sha256=bqMxdNndLW5NGO2wwpAoHEOnAFr_mhnmVLua3ifpSEc,6427
|
2
|
-
openlit/__init__.py,sha256=
|
2
|
+
openlit/__init__.py,sha256=CBo1-jSVFbyQ3dEeFog95Bhd8f2BZG8QoLsTtObfiks,21954
|
3
3
|
openlit/evals/__init__.py,sha256=nJe99nuLo1b5rf7pt9U9BCdSDedzbVi2Fj96cgl7msM,380
|
4
4
|
openlit/evals/all.py,sha256=oWrue3PotE-rB5WePG3MRYSA-ro6WivkclSHjYlAqGs,7154
|
5
5
|
openlit/evals/bias_detection.py,sha256=mCdsfK7x1vX7S3psC3g641IMlZ-7df3h-V6eiICj5N8,8154
|
@@ -103,6 +103,9 @@ openlit/instrumentation/qdrant/qdrant.py,sha256=K0cvEUbNx0hnk8AbEheYPSHcCgjFC482
|
|
103
103
|
openlit/instrumentation/reka/__init__.py,sha256=X0zZ8Q18Z_6pIpksa7pdWldK4SKZM7U24zNc2UeRXC8,1870
|
104
104
|
openlit/instrumentation/reka/async_reka.py,sha256=PDodlH_XycevE3k8u0drP7bokKtPDUcDfzfWRz6Fzt4,7439
|
105
105
|
openlit/instrumentation/reka/reka.py,sha256=CL9uNX_tYjw2eetTxLKRNRQJ-OgI_e5YRz9iu9f_gP4,7421
|
106
|
+
openlit/instrumentation/together/__init__.py,sha256=pg3gNqT4HjL3E-QHvAkM0UNdF3obii0HHp2xRx32gRc,2713
|
107
|
+
openlit/instrumentation/together/async_together.py,sha256=HUO3lCheCq1o9wRzuL11_osVr_5U3Q5VGACIGirmwgg,29120
|
108
|
+
openlit/instrumentation/together/together.py,sha256=XVeZWo6MbWGpbI705fKjyhQMF6lOnpa-jaYlTowcxOs,29024
|
106
109
|
openlit/instrumentation/transformers/__init__.py,sha256=4GBtjzcJU4XiPexIUYEqF3pNZMeQw4Gm5B-cyumaFjs,1468
|
107
110
|
openlit/instrumentation/transformers/transformers.py,sha256=MWEVkxHRWTHrpD85I1leksDIVtBiTtR5fQCO3Z62qb4,7875
|
108
111
|
openlit/instrumentation/vertexai/__init__.py,sha256=N3E9HtzefD-zC0fvmfGYiDmSqssoavp_i59wfuYLyMw,6079
|
@@ -112,8 +115,8 @@ openlit/instrumentation/vllm/__init__.py,sha256=OVWalQ1dXvip1DUsjUGaHX4J-2FrSp-T
|
|
112
115
|
openlit/instrumentation/vllm/vllm.py,sha256=lDzM7F5pgxvh8nKL0dcKB4TD0Mc9wXOWeXOsOGN7Wd8,6527
|
113
116
|
openlit/otel/metrics.py,sha256=y7SQDTyfLakMrz0V4DThN-WAeap7YZzyndeYGSP6nVg,4516
|
114
117
|
openlit/otel/tracing.py,sha256=fG3vl-flSZ30whCi7rrG25PlkIhhr8PhnfJYCkZzCD0,3895
|
115
|
-
openlit/semcov/__init__.py,sha256=
|
116
|
-
openlit-1.
|
117
|
-
openlit-1.
|
118
|
-
openlit-1.
|
119
|
-
openlit-1.
|
118
|
+
openlit/semcov/__init__.py,sha256=_kxniPeCdAYC_ZK982gqDR6RwgFCIK8xUPCzotwtt0k,10975
|
119
|
+
openlit-1.33.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
120
|
+
openlit-1.33.1.dist-info/METADATA,sha256=4VS8HZHuX0icG9X31MErjvhQCnfcDRQgLQkh4H7hx6w,22964
|
121
|
+
openlit-1.33.1.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
122
|
+
openlit-1.33.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|