openlit 1.30.4__py3-none-any.whl → 1.31.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__init__.py +7 -0
- openlit/instrumentation/ag2/__init__.py +50 -0
- openlit/instrumentation/ag2/ag2.py +98 -0
- openlit/instrumentation/langchain/langchain.py +112 -86
- openlit/instrumentation/transformers/transformers.py +9 -3
- openlit/semcov/__init__.py +1 -0
- {openlit-1.30.4.dist-info → openlit-1.31.0.dist-info}/METADATA +5 -5
- {openlit-1.30.4.dist-info → openlit-1.31.0.dist-info}/RECORD +10 -8
- {openlit-1.30.4.dist-info → openlit-1.31.0.dist-info}/LICENSE +0 -0
- {openlit-1.30.4.dist-info → openlit-1.31.0.dist-info}/WHEEL +0 -0
openlit/__init__.py
CHANGED
@@ -48,6 +48,7 @@ from openlit.instrumentation.milvus import MilvusInstrumentor
|
|
48
48
|
from openlit.instrumentation.transformers import TransformersInstrumentor
|
49
49
|
from openlit.instrumentation.litellm import LiteLLMInstrumentor
|
50
50
|
from openlit.instrumentation.crewai import CrewAIInstrumentor
|
51
|
+
from openlit.instrumentation.ag2 import AG2Instrumentor
|
51
52
|
from openlit.instrumentation.gpu import GPUInstrumentor
|
52
53
|
import openlit.guard
|
53
54
|
import openlit.evals
|
@@ -232,6 +233,9 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
|
|
232
233
|
"transformers": "transformers",
|
233
234
|
"litellm": "litellm",
|
234
235
|
"crewai": "crewai",
|
236
|
+
"ag2": "ag2",
|
237
|
+
"autogen": "autogen",
|
238
|
+
"pyautogen": "pyautogen",
|
235
239
|
}
|
236
240
|
|
237
241
|
invalid_instrumentors = [
|
@@ -311,6 +315,9 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
|
|
311
315
|
"transformers": TransformersInstrumentor(),
|
312
316
|
"litellm": LiteLLMInstrumentor(),
|
313
317
|
"crewai": CrewAIInstrumentor(),
|
318
|
+
"ag2": AG2Instrumentor(),
|
319
|
+
"autogen": AG2Instrumentor(),
|
320
|
+
"pyautogen": AG2Instrumentor(),
|
314
321
|
}
|
315
322
|
|
316
323
|
# Initialize and instrument only the enabled instrumentors
|
@@ -0,0 +1,50 @@
|
|
1
|
+
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
|
+
"""Initializer of Auto Instrumentation of AG2 Functions"""
|
3
|
+
|
4
|
+
from typing import Collection
|
5
|
+
import importlib.metadata
|
6
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
7
|
+
from wrapt import wrap_function_wrapper
|
8
|
+
|
9
|
+
from openlit.instrumentation.ag2.ag2 import (
|
10
|
+
wrap_ag2
|
11
|
+
)
|
12
|
+
|
13
|
+
_instruments = ("ag2 >= 0.3.2",)
|
14
|
+
|
15
|
+
class AG2Instrumentor(BaseInstrumentor):
|
16
|
+
"""
|
17
|
+
An instrumentor for AG2's client library.
|
18
|
+
"""
|
19
|
+
|
20
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
21
|
+
return _instruments
|
22
|
+
|
23
|
+
def _instrument(self, **kwargs):
|
24
|
+
application_name = kwargs.get("application_name", "default_application")
|
25
|
+
environment = kwargs.get("environment", "default_environment")
|
26
|
+
tracer = kwargs.get("tracer")
|
27
|
+
metrics = kwargs.get("metrics_dict")
|
28
|
+
pricing_info = kwargs.get("pricing_info", {})
|
29
|
+
trace_content = kwargs.get("trace_content", False)
|
30
|
+
disable_metrics = kwargs.get("disable_metrics")
|
31
|
+
version = importlib.metadata.version("ag2")
|
32
|
+
|
33
|
+
wrap_function_wrapper(
|
34
|
+
"autogen.agentchat.conversable_agent",
|
35
|
+
"ConversableAgent.initiate_chat",
|
36
|
+
wrap_ag2("ag2.initiate_chat", version, environment, application_name,
|
37
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
38
|
+
)
|
39
|
+
|
40
|
+
wrap_function_wrapper(
|
41
|
+
"autogen.agentchat.conversable_agent",
|
42
|
+
"ConversableAgent.generate_reply",
|
43
|
+
wrap_ag2("ag2.generate_reply", version, environment, application_name,
|
44
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
45
|
+
)
|
46
|
+
|
47
|
+
|
48
|
+
def _uninstrument(self, **kwargs):
|
49
|
+
# Proper uninstrumentation logic to revert patched methods
|
50
|
+
pass
|
@@ -0,0 +1,98 @@
|
|
1
|
+
# pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument
|
2
|
+
"""
|
3
|
+
Module for monitoring AG2.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import logging
|
7
|
+
from opentelemetry.trace import SpanKind, Status, StatusCode
|
8
|
+
from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
|
9
|
+
from openlit.__helpers import handle_exception
|
10
|
+
from openlit.semcov import SemanticConvetion
|
11
|
+
|
12
|
+
# Initialize logger for logging potential issues and operations
|
13
|
+
logger = logging.getLogger(__name__)
|
14
|
+
|
15
|
+
def wrap_ag2(gen_ai_endpoint, version, environment, application_name,
|
16
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics):
|
17
|
+
"""
|
18
|
+
Creates a wrapper around a function call to trace and log its execution metrics.
|
19
|
+
|
20
|
+
This function wraps any given function to measure its execution time,
|
21
|
+
log its operation, and trace its execution using OpenTelemetry.
|
22
|
+
|
23
|
+
Parameters:
|
24
|
+
- gen_ai_endpoint (str): A descriptor or name for the endpoint being traced.
|
25
|
+
- version (str): The version of the Langchain application.
|
26
|
+
- environment (str): The deployment environment (e.g., 'production', 'development').
|
27
|
+
- application_name (str): Name of the Langchain application.
|
28
|
+
- tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
|
29
|
+
- pricing_info (dict): Information about the pricing for internal metrics (currently not used).
|
30
|
+
- trace_content (bool): Flag indicating whether to trace the content of the response.
|
31
|
+
|
32
|
+
Returns:
|
33
|
+
- function: A higher-order function that takes a function 'wrapped' and returns
|
34
|
+
a new function that wraps 'wrapped' with additional tracing and logging.
|
35
|
+
"""
|
36
|
+
|
37
|
+
def wrapper(wrapped, instance, args, kwargs):
|
38
|
+
"""
|
39
|
+
An inner wrapper function that executes the wrapped function, measures execution
|
40
|
+
time, and records trace data using OpenTelemetry.
|
41
|
+
|
42
|
+
Parameters:
|
43
|
+
- wrapped (Callable): The original function that this wrapper will execute.
|
44
|
+
- instance (object): The instance to which the wrapped function belongs. This
|
45
|
+
is used for instance methods. For static and classmethods,
|
46
|
+
this may be None.
|
47
|
+
- args (tuple): Positional arguments passed to the wrapped function.
|
48
|
+
- kwargs (dict): Keyword arguments passed to the wrapped function.
|
49
|
+
|
50
|
+
Returns:
|
51
|
+
- The result of the wrapped function call.
|
52
|
+
|
53
|
+
The wrapper initiates a span with the provided tracer, sets various attributes
|
54
|
+
on the span based on the function's execution and response, and ensures
|
55
|
+
errors are handled and logged appropriately.
|
56
|
+
"""
|
57
|
+
|
58
|
+
with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
|
59
|
+
response = wrapped(*args, **kwargs)
|
60
|
+
|
61
|
+
if isinstance(instance.__dict__.get('llm_config'), dict):
|
62
|
+
llm_model = instance.__dict__['llm_config'].get('model', 'gpt-4')
|
63
|
+
else:
|
64
|
+
# Fallback to default if 'llm_config' is not a dictionary
|
65
|
+
llm_model = None
|
66
|
+
|
67
|
+
try:
|
68
|
+
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
69
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
70
|
+
gen_ai_endpoint)
|
71
|
+
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
72
|
+
SemanticConvetion.GEN_AI_SYSTEM_AG2)
|
73
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
|
74
|
+
environment)
|
75
|
+
span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
|
76
|
+
application_name)
|
77
|
+
span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
78
|
+
SemanticConvetion.GEN_AI_TYPE_AGENT)
|
79
|
+
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ROLE,
|
80
|
+
instance.name)
|
81
|
+
if llm_model:
|
82
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
83
|
+
llm_model)
|
84
|
+
|
85
|
+
|
86
|
+
span.set_status(Status(StatusCode.OK))
|
87
|
+
|
88
|
+
# Return original response
|
89
|
+
return response
|
90
|
+
|
91
|
+
except Exception as e:
|
92
|
+
handle_exception(span, e)
|
93
|
+
logger.error("Error in trace creation: %s", e)
|
94
|
+
|
95
|
+
# Return original response
|
96
|
+
return response
|
97
|
+
|
98
|
+
return wrapper
|
@@ -12,6 +12,26 @@ from openlit.semcov import SemanticConvetion
|
|
12
12
|
# Initialize logger for logging potential issues and operations
|
13
13
|
logger = logging.getLogger(__name__)
|
14
14
|
|
15
|
+
def get_attribute_from_instance_or_kwargs(instance, attribute_name, default=-1):
|
16
|
+
"""Return attribute from instance or kwargs"""
|
17
|
+
# Attempt to retrieve model_kwargs from the instance
|
18
|
+
model_kwargs = getattr(instance, 'model_kwargs', None)
|
19
|
+
|
20
|
+
# Check for attribute in model_kwargs if it exists
|
21
|
+
if model_kwargs and attribute_name in model_kwargs:
|
22
|
+
return model_kwargs[attribute_name]
|
23
|
+
|
24
|
+
# Attempt to get the attribute directly from the instance
|
25
|
+
try:
|
26
|
+
return getattr(instance, attribute_name)
|
27
|
+
except AttributeError:
|
28
|
+
# Special handling for 'model' attribute to consider 'model_id'
|
29
|
+
if attribute_name == 'model':
|
30
|
+
return getattr(instance, 'model_id', 'default_model_id')
|
31
|
+
|
32
|
+
# Default if the attribute isn't found in model_kwargs or the instance
|
33
|
+
return default
|
34
|
+
|
15
35
|
def general_wrap(gen_ai_endpoint, version, environment, application_name,
|
16
36
|
tracer, pricing_info, trace_content, metrics, disable_metrics):
|
17
37
|
"""
|
@@ -207,15 +227,18 @@ def allm(gen_ai_endpoint, version, environment, application_name,
|
|
207
227
|
response = await wrapped(*args, **kwargs)
|
208
228
|
|
209
229
|
try:
|
210
|
-
|
211
|
-
|
212
|
-
|
230
|
+
if args:
|
231
|
+
prompt = str(args[0]) if args[0] is not None else ""
|
232
|
+
else:
|
233
|
+
prompt = ""
|
234
|
+
input_tokens = general_tokens(prompt)
|
235
|
+
output_tokens = general_tokens(response)
|
213
236
|
|
214
|
-
#
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
237
|
+
# Calculate cost of the operation
|
238
|
+
cost = get_chat_model_cost(
|
239
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'model')),
|
240
|
+
pricing_info, input_tokens, output_tokens
|
241
|
+
)
|
219
242
|
|
220
243
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
221
244
|
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
@@ -229,23 +252,23 @@ def allm(gen_ai_endpoint, version, environment, application_name,
|
|
229
252
|
span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
|
230
253
|
application_name)
|
231
254
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
232
|
-
|
255
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'model')))
|
233
256
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
234
|
-
|
257
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'temperature')))
|
235
258
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K,
|
236
|
-
|
259
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'top_k')))
|
237
260
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
238
|
-
|
261
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'top_p')))
|
239
262
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
240
263
|
False)
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
264
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
265
|
+
input_tokens)
|
266
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
|
267
|
+
output_tokens)
|
268
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
269
|
+
input_tokens + output_tokens)
|
270
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
271
|
+
cost)
|
249
272
|
if trace_content:
|
250
273
|
span.add_event(
|
251
274
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
@@ -262,29 +285,29 @@ def allm(gen_ai_endpoint, version, environment, application_name,
|
|
262
285
|
|
263
286
|
span.set_status(Status(StatusCode.OK))
|
264
287
|
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
+
if disable_metrics is False:
|
289
|
+
attributes = {
|
290
|
+
TELEMETRY_SDK_NAME:
|
291
|
+
"openlit",
|
292
|
+
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
293
|
+
application_name,
|
294
|
+
SemanticConvetion.GEN_AI_SYSTEM:
|
295
|
+
SemanticConvetion.GEN_AI_SYSTEM_LANGCHAIN,
|
296
|
+
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
297
|
+
environment,
|
298
|
+
SemanticConvetion.GEN_AI_TYPE:
|
299
|
+
SemanticConvetion.GEN_AI_TYPE_CHAT,
|
300
|
+
SemanticConvetion.GEN_AI_REQUEST_MODEL:
|
301
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'model'))
|
302
|
+
}
|
303
|
+
|
304
|
+
metrics["genai_requests"].add(1, attributes)
|
305
|
+
metrics["genai_total_tokens"].add(
|
306
|
+
input_tokens + output_tokens, attributes
|
307
|
+
)
|
308
|
+
metrics["genai_completion_tokens"].add(output_tokens, attributes)
|
309
|
+
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
310
|
+
metrics["genai_cost"].record(cost, attributes)
|
288
311
|
|
289
312
|
# Return original response
|
290
313
|
return response
|
@@ -344,15 +367,18 @@ def llm(gen_ai_endpoint, version, environment, application_name,
|
|
344
367
|
response = wrapped(*args, **kwargs)
|
345
368
|
|
346
369
|
try:
|
347
|
-
|
348
|
-
|
349
|
-
|
370
|
+
if args:
|
371
|
+
prompt = str(args[0]) if args[0] is not None else ""
|
372
|
+
else:
|
373
|
+
prompt = ""
|
374
|
+
input_tokens = general_tokens(prompt)
|
375
|
+
output_tokens = general_tokens(response)
|
350
376
|
|
351
|
-
#
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
377
|
+
# Calculate cost of the operation
|
378
|
+
cost = get_chat_model_cost(
|
379
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'model')),
|
380
|
+
pricing_info, input_tokens, output_tokens
|
381
|
+
)
|
356
382
|
|
357
383
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
358
384
|
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
@@ -366,23 +392,23 @@ def llm(gen_ai_endpoint, version, environment, application_name,
|
|
366
392
|
span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
|
367
393
|
application_name)
|
368
394
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
369
|
-
|
395
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'model')))
|
370
396
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
371
|
-
|
397
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'temperature')))
|
372
398
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K,
|
373
|
-
|
399
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'top_k')))
|
374
400
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
375
|
-
|
401
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'top_p')))
|
376
402
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
377
403
|
False)
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
404
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
405
|
+
input_tokens)
|
406
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
|
407
|
+
output_tokens)
|
408
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
409
|
+
input_tokens + output_tokens)
|
410
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
411
|
+
cost)
|
386
412
|
if trace_content:
|
387
413
|
span.add_event(
|
388
414
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
@@ -399,29 +425,29 @@ def llm(gen_ai_endpoint, version, environment, application_name,
|
|
399
425
|
|
400
426
|
span.set_status(Status(StatusCode.OK))
|
401
427
|
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
428
|
+
if disable_metrics is False:
|
429
|
+
attributes = {
|
430
|
+
TELEMETRY_SDK_NAME:
|
431
|
+
"openlit",
|
432
|
+
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
433
|
+
application_name,
|
434
|
+
SemanticConvetion.GEN_AI_SYSTEM:
|
435
|
+
SemanticConvetion.GEN_AI_SYSTEM_LANGCHAIN,
|
436
|
+
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
437
|
+
environment,
|
438
|
+
SemanticConvetion.GEN_AI_TYPE:
|
439
|
+
SemanticConvetion.GEN_AI_TYPE_CHAT,
|
440
|
+
SemanticConvetion.GEN_AI_REQUEST_MODEL:
|
441
|
+
str(get_attribute_from_instance_or_kwargs(instance, 'model'))
|
442
|
+
}
|
443
|
+
|
444
|
+
metrics["genai_requests"].add(1, attributes)
|
445
|
+
metrics["genai_total_tokens"].add(
|
446
|
+
input_tokens + output_tokens, attributes
|
447
|
+
)
|
448
|
+
metrics["genai_completion_tokens"].add(output_tokens, attributes)
|
449
|
+
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
450
|
+
metrics["genai_cost"].record(cost, attributes)
|
425
451
|
|
426
452
|
# Return original response
|
427
453
|
return response
|
@@ -67,7 +67,7 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
67
67
|
else:
|
68
68
|
prompt = kwargs.get("args", "")
|
69
69
|
|
70
|
-
prompt_tokens = general_tokens(prompt)
|
70
|
+
prompt_tokens = general_tokens(prompt[0])
|
71
71
|
|
72
72
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
73
73
|
span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
@@ -106,14 +106,20 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
106
106
|
else:
|
107
107
|
attribute_name = SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT
|
108
108
|
if trace_content:
|
109
|
+
# pylint: disable=bare-except
|
110
|
+
try:
|
111
|
+
llm_response = completion.get('generated_text', '')
|
112
|
+
except:
|
113
|
+
llm_response = completion[i].get('generated_text', '')
|
114
|
+
|
109
115
|
span.add_event(
|
110
116
|
name=attribute_name,
|
111
117
|
attributes={
|
112
118
|
# pylint: disable=line-too-long
|
113
|
-
SemanticConvetion.GEN_AI_CONTENT_COMPLETION:
|
119
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: llm_response,
|
114
120
|
},
|
115
121
|
)
|
116
|
-
completion_tokens += general_tokens(
|
122
|
+
completion_tokens += general_tokens(llm_response)
|
117
123
|
|
118
124
|
i=i+1
|
119
125
|
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
|
openlit/semcov/__init__.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.31.0
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -66,14 +66,14 @@ This project proudly follows and maintains the [Semantic Conventions](https://gi
|
|
66
66
|
| LLMs | Vector DBs | Frameworks | GPUs |
|
67
67
|
|--------------------------------------------------------------------------|----------------------------------------------|-------------------------------------------------|---------------|
|
68
68
|
| [✅ OpenAI](https://docs.openlit.io/latest/integrations/openai) | [✅ ChromaDB](https://docs.openlit.io/latest/integrations/chromadb) | [✅ Langchain](https://docs.openlit.io/latest/integrations/langchain) | [✅ NVIDIA](https://docs.openlit.io/latest/integrations/nvidia-gpu) |
|
69
|
-
| [✅ Ollama](https://docs.openlit.io/latest/integrations/ollama) | [✅ Pinecone](https://docs.openlit.io/latest/integrations/pinecone) | [✅ LiteLLM](https://docs.openlit.io/latest/integrations/litellm) | [✅ AMD](
|
69
|
+
| [✅ Ollama](https://docs.openlit.io/latest/integrations/ollama) | [✅ Pinecone](https://docs.openlit.io/latest/integrations/pinecone) | [✅ LiteLLM](https://docs.openlit.io/latest/integrations/litellm) | [✅ AMD](https://docs.openlit.io/latest/integrations/amd-gpu) |
|
70
70
|
| [✅ Anthropic](https://docs.openlit.io/latest/integrations/anthropic) | [✅ Qdrant](https://docs.openlit.io/latest/integrations/qdrant) | [✅ LlamaIndex](https://docs.openlit.io/latest/integrations/llama-index) | |
|
71
71
|
| [✅ GPT4All](https://docs.openlit.io/latest/integrations/gpt4all) | [✅ Milvus](https://docs.openlit.io/latest/integrations/milvus) | [✅ Haystack](https://docs.openlit.io/latest/integrations/haystack) | |
|
72
72
|
| [✅ Cohere](https://docs.openlit.io/latest/integrations/cohere) | | [✅ EmbedChain](https://docs.openlit.io/latest/integrations/embedchain) | |
|
73
73
|
| [✅ Mistral](https://docs.openlit.io/latest/integrations/mistral) | | [✅ Guardrails](https://docs.openlit.io/latest/integrations/guardrails) | |
|
74
74
|
| [✅ Azure OpenAI](https://docs.openlit.io/latest/integrations/azure-openai) | | [✅ CrewAI](https://docs.openlit.io/latest/integrations/crewai) | |
|
75
|
-
| [✅ Azure AI Inference](https://docs.openlit.io/latest/integrations/azure-ai-inference) | | [✅ DSPy](https://docs.openlit.io/latest/integrations/dspy)
|
76
|
-
| [✅ GitHub AI Models](https://docs.openlit.io/latest/integrations/github-models) | |
|
75
|
+
| [✅ Azure AI Inference](https://docs.openlit.io/latest/integrations/azure-ai-inference) | | [✅ DSPy](https://docs.openlit.io/latest/integrations/dspy) | |
|
76
|
+
| [✅ GitHub AI Models](https://docs.openlit.io/latest/integrations/github-models) | | [✅ AG2](https://docs.openlit.io/latest/integrations/ag2) | |
|
77
77
|
| [✅ HuggingFace Transformers](https://docs.openlit.io/latest/integrations/huggingface) | | | |
|
78
78
|
| [✅ Amazon Bedrock](https://docs.openlit.io/latest/integrations/bedrock) | | | |
|
79
79
|
| [✅ Vertex AI](https://docs.openlit.io/latest/integrations/vertexai) | | | |
|
@@ -84,7 +84,6 @@ This project proudly follows and maintains the [Semantic Conventions](https://gi
|
|
84
84
|
| [✅ Google AI Studio](https://docs.openlit.io/latest/integrations/google-ai-studio) | | | |
|
85
85
|
| [✅ NVIDIA NIM](https://docs.openlit.io/latest/integrations/nvidia-nim) | | | |
|
86
86
|
|
87
|
-
|
88
87
|
## Supported Destinations
|
89
88
|
- [✅ OpenTelemetry Collector](https://docs.openlit.io/latest/connections/otelcol)
|
90
89
|
- [✅ Prometheus + Tempo](https://docs.openlit.io/latest/connections/prometheus-tempo)
|
@@ -92,6 +91,7 @@ This project proudly follows and maintains the [Semantic Conventions](https://gi
|
|
92
91
|
- [✅ Grafana Cloud](https://docs.openlit.io/latest/connections/grafanacloud)
|
93
92
|
- [✅ New Relic](https://docs.openlit.io/latest/connections/new-relic)
|
94
93
|
- [✅ Elastic](https://docs.openlit.io/latest/connections/elastic)
|
94
|
+
- [✅ Middleware.io](https://docs.openlit.io/latest/connections/middleware)
|
95
95
|
- [✅ HyperDX](https://docs.openlit.io/latest/connections/hyperdx)
|
96
96
|
- [✅ DataDog](https://docs.openlit.io/latest/connections/datadog)
|
97
97
|
- [✅ SigNoz](https://docs.openlit.io/latest/connections/signoz)
|
@@ -1,5 +1,5 @@
|
|
1
1
|
openlit/__helpers.py,sha256=2OkGKOdsd9Hc011WxR70OqDlO6c4mZcu6McGuW1uAdA,6316
|
2
|
-
openlit/__init__.py,sha256=
|
2
|
+
openlit/__init__.py,sha256=gLhGox66F9JdxwSBYyhCvXRmBluFJN-naNnP_5rG3jI,19940
|
3
3
|
openlit/evals/__init__.py,sha256=nJe99nuLo1b5rf7pt9U9BCdSDedzbVi2Fj96cgl7msM,380
|
4
4
|
openlit/evals/all.py,sha256=oWrue3PotE-rB5WePG3MRYSA-ro6WivkclSHjYlAqGs,7154
|
5
5
|
openlit/evals/bias_detection.py,sha256=mCdsfK7x1vX7S3psC3g641IMlZ-7df3h-V6eiICj5N8,8154
|
@@ -12,6 +12,8 @@ openlit/guard/prompt_injection.py,sha256=3e4DKxB7QDzM-xPCpwEuureiH_2s_OTJ9BSckkn
|
|
12
12
|
openlit/guard/restrict_topic.py,sha256=KTuWa7XeMsV4oXxOrD1CYZV0wXWxTfA0H3p_6q_IOsk,6444
|
13
13
|
openlit/guard/sensitive_topic.py,sha256=RgVw_laFERv0nNdzBsAd2_3yLomMOK-gVq-P7oj1bTk,5552
|
14
14
|
openlit/guard/utils.py,sha256=x0-_hAtNa_ogYR2GfnwiBF1rlqaXtaJ-rJeGguTDe-Q,7663
|
15
|
+
openlit/instrumentation/ag2/__init__.py,sha256=Nf9cDoXB16NYgZisvVQduFYJ5fpU90CNlMrIF4pSH-Y,1827
|
16
|
+
openlit/instrumentation/ag2/ag2.py,sha256=_ncg8RqUH-wXMYfaOYx2bcQOrOrDMVVm0EZAEkWdBn0,4444
|
15
17
|
openlit/instrumentation/anthropic/__init__.py,sha256=oaU53BOPyfUKbEzYvLr1DPymDluurSnwo4Hernf2XdU,1955
|
16
18
|
openlit/instrumentation/anthropic/anthropic.py,sha256=y7CEGhKOGHWt8G_5Phr4qPJTfPGRJIAr9Yk6nM3CcvM,16775
|
17
19
|
openlit/instrumentation/anthropic/async_anthropic.py,sha256=Zz1KRKIG9wGn0quOoLvjORC-49IvHQpJ6GBdB-4PfCQ,16816
|
@@ -43,7 +45,7 @@ openlit/instrumentation/groq/groq.py,sha256=m4gFPbYzjUUIgjXZ0Alu2Zy1HcO5takCFA2X
|
|
43
45
|
openlit/instrumentation/haystack/__init__.py,sha256=QK6XxxZUHX8vMv2Crk7rNBOc64iOOBLhJGL_lPlAZ8s,1758
|
44
46
|
openlit/instrumentation/haystack/haystack.py,sha256=oQIZiDhdp3gnJnhYQ1OouJMc9YT0pQ-_31cmNuopa68,3891
|
45
47
|
openlit/instrumentation/langchain/__init__.py,sha256=0AI2Dnqw81IcJw3jM--gGkv_HRh2GtosOGJjvOpw7Zk,3431
|
46
|
-
openlit/instrumentation/langchain/langchain.py,sha256=
|
48
|
+
openlit/instrumentation/langchain/langchain.py,sha256=jZgWBBWYHYSNnkf5wKyNFF_z9M9YxaZKGI_uyfvtMBU,36909
|
47
49
|
openlit/instrumentation/litellm/__init__.py,sha256=Z-LsVHKJdPganHfJA_rWg7xAfQYkvLfpLdF-eckU4qY,2401
|
48
50
|
openlit/instrumentation/litellm/async_litellm.py,sha256=1MKNZbvKaf1lFWbXi1MQy3qFNNeXawav34SDlOQ_H3w,27544
|
49
51
|
openlit/instrumentation/litellm/litellm.py,sha256=4YqCQ4CEQ4sfDu7pTlnflL_AfUqYEQdJDTO7nHJ6noY,27450
|
@@ -68,7 +70,7 @@ openlit/instrumentation/qdrant/__init__.py,sha256=GMlZgRBKoQMgrL4cFbAKwytfdTHLzJ
|
|
68
70
|
openlit/instrumentation/qdrant/async_qdrant.py,sha256=Xuyw2N75mRIjltrmY8wJes5DHal0Ku3A8VcUqfbsOl0,15071
|
69
71
|
openlit/instrumentation/qdrant/qdrant.py,sha256=K0cvEUbNx0hnk8AbEheYPSHcCgjFC482IZyHF9-P_b8,15488
|
70
72
|
openlit/instrumentation/transformers/__init__.py,sha256=4GBtjzcJU4XiPexIUYEqF3pNZMeQw4Gm5B-cyumaFjs,1468
|
71
|
-
openlit/instrumentation/transformers/transformers.py,sha256=
|
73
|
+
openlit/instrumentation/transformers/transformers.py,sha256=MWEVkxHRWTHrpD85I1leksDIVtBiTtR5fQCO3Z62qb4,7875
|
72
74
|
openlit/instrumentation/vertexai/__init__.py,sha256=N3E9HtzefD-zC0fvmfGYiDmSqssoavp_i59wfuYLyMw,6079
|
73
75
|
openlit/instrumentation/vertexai/async_vertexai.py,sha256=8JwSwLPPA4lAatf4w_5kJ5_YZDLwl5yG8N59cTD-EZM,55198
|
74
76
|
openlit/instrumentation/vertexai/vertexai.py,sha256=R6dDQfC3YFoZDygxU2fkflcMsqIv8AVoU3XOwWSvpwA,54951
|
@@ -76,8 +78,8 @@ openlit/instrumentation/vllm/__init__.py,sha256=OVWalQ1dXvip1DUsjUGaHX4J-2FrSp-T
|
|
76
78
|
openlit/instrumentation/vllm/vllm.py,sha256=lDzM7F5pgxvh8nKL0dcKB4TD0Mc9wXOWeXOsOGN7Wd8,6527
|
77
79
|
openlit/otel/metrics.py,sha256=y7SQDTyfLakMrz0V4DThN-WAeap7YZzyndeYGSP6nVg,4516
|
78
80
|
openlit/otel/tracing.py,sha256=fG3vl-flSZ30whCi7rrG25PlkIhhr8PhnfJYCkZzCD0,3895
|
79
|
-
openlit/semcov/__init__.py,sha256=
|
80
|
-
openlit-1.
|
81
|
-
openlit-1.
|
82
|
-
openlit-1.
|
83
|
-
openlit-1.
|
81
|
+
openlit/semcov/__init__.py,sha256=mXDJNyz6dFAaNPtu90iWYBclP8tz0Ia22QVjHq1Mxz8,9167
|
82
|
+
openlit-1.31.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
83
|
+
openlit-1.31.0.dist-info/METADATA,sha256=Jn8RmER5JZRfC0PJ1Kpvm-pozhh6pHDPhWM4N9ro5ns,21046
|
84
|
+
openlit-1.31.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
85
|
+
openlit-1.31.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|