openlit 1.34.2__py3-none-any.whl → 1.34.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/instrumentation/openai/__init__.py +17 -2
- openlit/instrumentation/openai/async_openai.py +161 -0
- openlit/instrumentation/openai/openai.py +161 -0
- {openlit-1.34.2.dist-info → openlit-1.34.3.dist-info}/METADATA +1 -1
- {openlit-1.34.2.dist-info → openlit-1.34.3.dist-info}/RECORD +7 -7
- {openlit-1.34.2.dist-info → openlit-1.34.3.dist-info}/LICENSE +0 -0
- {openlit-1.34.2.dist-info → openlit-1.34.3.dist-info}/WHEEL +0 -0
@@ -5,9 +5,10 @@ import importlib.metadata
|
|
5
5
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
6
6
|
from wrapt import wrap_function_wrapper
|
7
7
|
|
8
|
-
from openlit.instrumentation.openai.openai import chat_completions, embedding, responses
|
8
|
+
from openlit.instrumentation.openai.openai import chat_completions, embedding, responses, chat_completions_parse
|
9
9
|
from openlit.instrumentation.openai.openai import image_generate, image_variatons, audio_create
|
10
|
-
from openlit.instrumentation.openai.async_openai import async_chat_completions, async_embedding
|
10
|
+
from openlit.instrumentation.openai.async_openai import (async_chat_completions, async_embedding,
|
11
|
+
async_chat_completions_parse)
|
11
12
|
from openlit.instrumentation.openai.async_openai import async_image_generate, async_image_variatons
|
12
13
|
from openlit.instrumentation.openai.async_openai import async_audio_create, async_responses
|
13
14
|
|
@@ -127,6 +128,20 @@ class OpenAIInstrumentor(BaseInstrumentor):
|
|
127
128
|
metrics, disable_metrics),
|
128
129
|
)
|
129
130
|
|
131
|
+
wrap_function_wrapper(
|
132
|
+
"openai.resources.beta.chat.completions",
|
133
|
+
"Completions.parse",
|
134
|
+
chat_completions_parse(version, environment, application_name, tracer, pricing_info,
|
135
|
+
capture_message_content, metrics, disable_metrics),
|
136
|
+
)
|
137
|
+
|
138
|
+
wrap_function_wrapper(
|
139
|
+
"openai.resources.beta.chat.completions",
|
140
|
+
"AsyncCompletions.parse",
|
141
|
+
async_chat_completions_parse(version, environment, application_name, tracer, pricing_info,
|
142
|
+
capture_message_content, metrics, disable_metrics),
|
143
|
+
)
|
144
|
+
|
130
145
|
@staticmethod
|
131
146
|
def _uninstrument(self, **kwargs):
|
132
147
|
pass
|
@@ -882,6 +882,167 @@ def async_chat_completions(version, environment, application_name,
|
|
882
882
|
|
883
883
|
return wrapper
|
884
884
|
|
885
|
+
def async_chat_completions_parse(version, environment, application_name, tracer, pricing_info, capture_message_content,
|
886
|
+
metrics, disable_metrics):
|
887
|
+
"""
|
888
|
+
Generates a telemetry wrapper for chat completions parse to collect metrics.
|
889
|
+
|
890
|
+
Args:
|
891
|
+
version: Version of the monitoring package.
|
892
|
+
environment: Deployment environment (e.g., production, staging).
|
893
|
+
application_name: Name of the application using the OpenAI API.
|
894
|
+
tracer: OpenTelemetry tracer for creating spans.
|
895
|
+
pricing_info: Information used for calculating the cost of OpenAI usage.
|
896
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
897
|
+
|
898
|
+
Returns:
|
899
|
+
A function that wraps the chat completions parse method to add telemetry.
|
900
|
+
"""
|
901
|
+
|
902
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
903
|
+
"""
|
904
|
+
Wraps the 'chat.completions.parse' API call to add telemetry.
|
905
|
+
|
906
|
+
This collects metrics such as execution time, cost, and token usage, and handles errors
|
907
|
+
gracefully, adding details to the trace for observability.
|
908
|
+
|
909
|
+
Args:
|
910
|
+
wrapped: The original 'chat.completions' method to be wrapped.
|
911
|
+
instance: The instance of the class where the original method is defined.
|
912
|
+
args: Positional arguments for the 'chat.completions' method.
|
913
|
+
kwargs: Keyword arguments for the 'chat.completions' method.
|
914
|
+
|
915
|
+
Returns:
|
916
|
+
The response from the original 'chat.completions.parse' method.
|
917
|
+
"""
|
918
|
+
server_address, server_port = set_server_address_and_port(instance, "api.openai.com", 443)
|
919
|
+
request_model = kwargs.get("model", "gpt-4o")
|
920
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
921
|
+
|
922
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
923
|
+
start_time = time.time()
|
924
|
+
try:
|
925
|
+
# Execute the original 'parse' method
|
926
|
+
response = await wrapped(*args, **kwargs)
|
927
|
+
end_time = time.time()
|
928
|
+
|
929
|
+
response_dict = response_as_dict(response)
|
930
|
+
|
931
|
+
# Format 'messages' from kwargs to calculate input tokens
|
932
|
+
message_prompt = kwargs.get("messages", "")
|
933
|
+
formatted_messages = []
|
934
|
+
for message in message_prompt:
|
935
|
+
role = message.get("role")
|
936
|
+
content = message.get("content")
|
937
|
+
if content:
|
938
|
+
formatted_messages.append(f"{role}: {content}")
|
939
|
+
prompt = "\n".join(formatted_messages)
|
940
|
+
|
941
|
+
input_tokens = response_dict.get('usage').get('prompt_tokens')
|
942
|
+
output_tokens = response_dict.get('usage').get('completion_tokens')
|
943
|
+
|
944
|
+
# Calculate cost
|
945
|
+
cost = get_chat_model_cost(request_model,
|
946
|
+
pricing_info, input_tokens,
|
947
|
+
output_tokens)
|
948
|
+
|
949
|
+
# Set base span attribues (OTel Semconv)
|
950
|
+
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
951
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPERATION, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT)
|
952
|
+
span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_OPENAI)
|
953
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
|
954
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, str(kwargs.get("seed", "")))
|
955
|
+
span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
|
956
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
957
|
+
str(kwargs.get("frequency_penalty", 0.0)))
|
958
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, str(kwargs.get("max_tokens", -1)))
|
959
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
960
|
+
str(kwargs.get("presence_penalty", 0.0)))
|
961
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, str(kwargs.get("stop", [])))
|
962
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, str(kwargs.get("temperature", 1.0)))
|
963
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, str(kwargs.get("top_p", 1.0)))
|
964
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, response_dict.get("id"))
|
965
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, response_dict.get('model'))
|
966
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
|
967
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
|
968
|
+
span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
|
969
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SERVICE_TIER,
|
970
|
+
str(kwargs.get("service_tier", "auto")))
|
971
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SERVICE_TIER,
|
972
|
+
response_dict.get('service_tier', 'auto'))
|
973
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
|
974
|
+
str(response_dict.get('system_fingerprint', '')))
|
975
|
+
|
976
|
+
# Set base span attribues (Extras)
|
977
|
+
span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
978
|
+
span.set_attribute(SERVICE_NAME, application_name)
|
979
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, kwargs.get("user", ""))
|
980
|
+
span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
981
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, False)
|
982
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_TOTAL_TOKENS, input_tokens + output_tokens)
|
983
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
984
|
+
span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, end_time - start_time)
|
985
|
+
|
986
|
+
if capture_message_content:
|
987
|
+
span.add_event(
|
988
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
989
|
+
attributes={SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt},
|
990
|
+
)
|
991
|
+
|
992
|
+
for i in range(kwargs.get('n', 1)):
|
993
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON,
|
994
|
+
[response_dict.get('choices')[i].get('finish_reason')])
|
995
|
+
if capture_message_content:
|
996
|
+
span.add_event(
|
997
|
+
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
998
|
+
attributes={
|
999
|
+
# pylint: disable=line-too-long
|
1000
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION: str(
|
1001
|
+
response_dict.get('choices')[i].get('message').get('content')),
|
1002
|
+
},
|
1003
|
+
)
|
1004
|
+
if kwargs.get('tools'):
|
1005
|
+
span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALLS,
|
1006
|
+
str(response_dict.get('choices')[i].get('message').get('tool_calls')))
|
1007
|
+
|
1008
|
+
if isinstance(response_dict.get('choices')[i].get('message').get('content'), str):
|
1009
|
+
span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
|
1010
|
+
"text")
|
1011
|
+
elif response_dict.get('choices')[i].get('message').get('content') is not None:
|
1012
|
+
span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
|
1013
|
+
"json")
|
1014
|
+
|
1015
|
+
span.set_status(Status(StatusCode.OK))
|
1016
|
+
|
1017
|
+
if not disable_metrics:
|
1018
|
+
attributes = create_metrics_attributes(
|
1019
|
+
service_name=application_name,
|
1020
|
+
deployment_environment=environment,
|
1021
|
+
operation=SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
|
1022
|
+
system=SemanticConvention.GEN_AI_SYSTEM_OPENAI,
|
1023
|
+
request_model=request_model,
|
1024
|
+
server_address=server_address,
|
1025
|
+
server_port=server_port,
|
1026
|
+
response_model=response_dict.get('model'),
|
1027
|
+
)
|
1028
|
+
metrics["genai_client_usage_tokens"].record(input_tokens + output_tokens, attributes)
|
1029
|
+
metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
|
1030
|
+
metrics["genai_server_ttft"].record( end_time - start_time, attributes)
|
1031
|
+
metrics["genai_requests"].add(1, attributes)
|
1032
|
+
metrics["genai_completion_tokens"].add(output_tokens, attributes)
|
1033
|
+
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
1034
|
+
metrics["genai_cost"].record(cost, attributes)
|
1035
|
+
|
1036
|
+
return response
|
1037
|
+
|
1038
|
+
except Exception as e:
|
1039
|
+
handle_exception(span, e)
|
1040
|
+
logger.error("Error in 'parse' trace creation: %s", e)
|
1041
|
+
# Re-raise the exception to not interfere with the application flow
|
1042
|
+
raise
|
1043
|
+
|
1044
|
+
return wrapper
|
1045
|
+
|
885
1046
|
def async_embedding(version, environment, application_name,
|
886
1047
|
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
887
1048
|
"""
|
@@ -882,6 +882,167 @@ def chat_completions(version, environment, application_name,
|
|
882
882
|
|
883
883
|
return wrapper
|
884
884
|
|
885
|
+
def chat_completions_parse(version, environment, application_name, tracer, pricing_info, capture_message_content,
|
886
|
+
metrics, disable_metrics):
|
887
|
+
"""
|
888
|
+
Generates a telemetry wrapper for chat completions parse to collect metrics.
|
889
|
+
|
890
|
+
Args:
|
891
|
+
version: Version of the monitoring package.
|
892
|
+
environment: Deployment environment (e.g., production, staging).
|
893
|
+
application_name: Name of the application using the OpenAI API.
|
894
|
+
tracer: OpenTelemetry tracer for creating spans.
|
895
|
+
pricing_info: Information used for calculating the cost of OpenAI usage.
|
896
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
897
|
+
|
898
|
+
Returns:
|
899
|
+
A function that wraps the chat completions parse method to add telemetry.
|
900
|
+
"""
|
901
|
+
|
902
|
+
def wrapper(wrapped, instance, args, kwargs):
|
903
|
+
"""
|
904
|
+
Wraps the 'chat.completions.parse' API call to add telemetry.
|
905
|
+
|
906
|
+
This collects metrics such as execution time, cost, and token usage, and handles errors
|
907
|
+
gracefully, adding details to the trace for observability.
|
908
|
+
|
909
|
+
Args:
|
910
|
+
wrapped: The original 'chat.completions' method to be wrapped.
|
911
|
+
instance: The instance of the class where the original method is defined.
|
912
|
+
args: Positional arguments for the 'chat.completions' method.
|
913
|
+
kwargs: Keyword arguments for the 'chat.completions' method.
|
914
|
+
|
915
|
+
Returns:
|
916
|
+
The response from the original 'chat.completions.parse' method.
|
917
|
+
"""
|
918
|
+
server_address, server_port = set_server_address_and_port(instance, "api.openai.com", 443)
|
919
|
+
request_model = kwargs.get("model", "gpt-4o")
|
920
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
921
|
+
|
922
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
923
|
+
start_time = time.time()
|
924
|
+
try:
|
925
|
+
# Execute the original 'parse' method
|
926
|
+
response = wrapped(*args, **kwargs)
|
927
|
+
end_time = time.time()
|
928
|
+
|
929
|
+
response_dict = response_as_dict(response)
|
930
|
+
|
931
|
+
# Format 'messages' from kwargs to calculate input tokens
|
932
|
+
message_prompt = kwargs.get("messages", "")
|
933
|
+
formatted_messages = []
|
934
|
+
for message in message_prompt:
|
935
|
+
role = message.get("role")
|
936
|
+
content = message.get("content")
|
937
|
+
if content:
|
938
|
+
formatted_messages.append(f"{role}: {content}")
|
939
|
+
prompt = "\n".join(formatted_messages)
|
940
|
+
|
941
|
+
input_tokens = response_dict.get('usage').get('prompt_tokens')
|
942
|
+
output_tokens = response_dict.get('usage').get('completion_tokens')
|
943
|
+
|
944
|
+
# Calculate cost
|
945
|
+
cost = get_chat_model_cost(request_model,
|
946
|
+
pricing_info, input_tokens,
|
947
|
+
output_tokens)
|
948
|
+
|
949
|
+
# Set base span attribues (OTel Semconv)
|
950
|
+
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
951
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPERATION, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT)
|
952
|
+
span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_OPENAI)
|
953
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
|
954
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, str(kwargs.get("seed", "")))
|
955
|
+
span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
|
956
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
957
|
+
str(kwargs.get("frequency_penalty", 0.0)))
|
958
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, str(kwargs.get("max_tokens", -1)))
|
959
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
960
|
+
str(kwargs.get("presence_penalty", 0.0)))
|
961
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, str(kwargs.get("stop", [])))
|
962
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, str(kwargs.get("temperature", 1.0)))
|
963
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, str(kwargs.get("top_p", 1.0)))
|
964
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, response_dict.get("id"))
|
965
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, response_dict.get('model'))
|
966
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
|
967
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
|
968
|
+
span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
|
969
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SERVICE_TIER,
|
970
|
+
str(kwargs.get("service_tier", "auto")))
|
971
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SERVICE_TIER,
|
972
|
+
response_dict.get('service_tier', 'auto'))
|
973
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
|
974
|
+
str(response_dict.get('system_fingerprint', '')))
|
975
|
+
|
976
|
+
# Set base span attribues (Extras)
|
977
|
+
span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
978
|
+
span.set_attribute(SERVICE_NAME, application_name)
|
979
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, kwargs.get("user", ""))
|
980
|
+
span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
981
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, False)
|
982
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_TOTAL_TOKENS, input_tokens + output_tokens)
|
983
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
984
|
+
span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, end_time - start_time)
|
985
|
+
|
986
|
+
if capture_message_content:
|
987
|
+
span.add_event(
|
988
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
989
|
+
attributes={SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt},
|
990
|
+
)
|
991
|
+
|
992
|
+
for i in range(kwargs.get('n', 1)):
|
993
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON,
|
994
|
+
[response_dict.get('choices')[i].get('finish_reason')])
|
995
|
+
if capture_message_content:
|
996
|
+
span.add_event(
|
997
|
+
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
998
|
+
attributes={
|
999
|
+
# pylint: disable=line-too-long
|
1000
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION: str(
|
1001
|
+
response_dict.get('choices')[i].get('message').get('content')),
|
1002
|
+
},
|
1003
|
+
)
|
1004
|
+
if kwargs.get('tools'):
|
1005
|
+
span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALLS,
|
1006
|
+
str(response_dict.get('choices')[i].get('message').get('tool_calls')))
|
1007
|
+
|
1008
|
+
if isinstance(response_dict.get('choices')[i].get('message').get('content'), str):
|
1009
|
+
span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
|
1010
|
+
"text")
|
1011
|
+
elif response_dict.get('choices')[i].get('message').get('content') is not None:
|
1012
|
+
span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
|
1013
|
+
"json")
|
1014
|
+
|
1015
|
+
span.set_status(Status(StatusCode.OK))
|
1016
|
+
|
1017
|
+
if not disable_metrics:
|
1018
|
+
attributes = create_metrics_attributes(
|
1019
|
+
service_name=application_name,
|
1020
|
+
deployment_environment=environment,
|
1021
|
+
operation=SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
|
1022
|
+
system=SemanticConvention.GEN_AI_SYSTEM_OPENAI,
|
1023
|
+
request_model=request_model,
|
1024
|
+
server_address=server_address,
|
1025
|
+
server_port=server_port,
|
1026
|
+
response_model=response_dict.get('model'),
|
1027
|
+
)
|
1028
|
+
metrics["genai_client_usage_tokens"].record(input_tokens + output_tokens, attributes)
|
1029
|
+
metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
|
1030
|
+
metrics["genai_server_ttft"].record( end_time - start_time, attributes)
|
1031
|
+
metrics["genai_requests"].add(1, attributes)
|
1032
|
+
metrics["genai_completion_tokens"].add(output_tokens, attributes)
|
1033
|
+
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
1034
|
+
metrics["genai_cost"].record(cost, attributes)
|
1035
|
+
|
1036
|
+
return response
|
1037
|
+
|
1038
|
+
except Exception as e:
|
1039
|
+
handle_exception(span, e)
|
1040
|
+
logger.error("Error in 'parse' trace creation: %s", e)
|
1041
|
+
# Re-raise the exception to not interfere with the application flow
|
1042
|
+
raise
|
1043
|
+
|
1044
|
+
return wrapper
|
1045
|
+
|
885
1046
|
def embedding(version, environment, application_name,
|
886
1047
|
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
887
1048
|
"""
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.34.
|
3
|
+
Version: 1.34.3
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -95,9 +95,9 @@ openlit/instrumentation/ollama/__init__.py,sha256=v7VhVxHw_c6QtMznxe6a7z6QrYHZsH
|
|
95
95
|
openlit/instrumentation/ollama/async_ollama.py,sha256=zJPDr2ROh1nvFGoxgdTbe04Zr1KhmgJUYFPeuRLQGLk,6667
|
96
96
|
openlit/instrumentation/ollama/ollama.py,sha256=MNUltiP9XVT4azmO_-E2vjhFaoHQyJ0Z6c-HnB0_jCE,6563
|
97
97
|
openlit/instrumentation/ollama/utils.py,sha256=41uvYaYkGwWfRyHYqhOOwrFy6cMzBlG1urJYUat9Q24,14819
|
98
|
-
openlit/instrumentation/openai/__init__.py,sha256=
|
99
|
-
openlit/instrumentation/openai/async_openai.py,sha256=
|
100
|
-
openlit/instrumentation/openai/openai.py,sha256=
|
98
|
+
openlit/instrumentation/openai/__init__.py,sha256=y9Ox5aYWTb2nAa_d0ic3Mkv4wEKmUGqslW9nHKg6NnY,6320
|
99
|
+
openlit/instrumentation/openai/async_openai.py,sha256=JkpVcyOhGvPzhqxzeP01MwwfaYhddNsSUQqgfF8hU8I,81390
|
100
|
+
openlit/instrumentation/openai/openai.py,sha256=5fgRyK5dUN2zUdrN0vBSZFnSEAXf2dKS0qnq_85-mQE,81175
|
101
101
|
openlit/instrumentation/openai_agents/__init__.py,sha256=tRTSIrUtkXc_lfQnVanXmQLd2Sy9RqBNTHF5FhhZx7o,1530
|
102
102
|
openlit/instrumentation/openai_agents/openai_agents.py,sha256=kRWPgjofcOviMi3w7CsRvJO3SCjqPmuq-PM800vIM7g,2678
|
103
103
|
openlit/instrumentation/phidata/__init__.py,sha256=tqls5-UI6FzbjxYgq_qqAfALhWJm8dHn2NtgqiQA4f8,1557
|
@@ -131,7 +131,7 @@ openlit/otel/events.py,sha256=VrMjTpvnLtYRBHCiFwJojTQqqNpRCxoD4yJYeQrtPsk,3560
|
|
131
131
|
openlit/otel/metrics.py,sha256=GM2PDloBGRhBTkHHkYaqmOwIAQkY124ZhW4sEqW1Fgk,7086
|
132
132
|
openlit/otel/tracing.py,sha256=tjV2bEbEDPUB1Z46gE-UsJsb04sRdFrfbhIDkxViZc0,3103
|
133
133
|
openlit/semcov/__init__.py,sha256=ptyo37PY-FHDx_PShEvbdns71cD4YvvXw15bCRXKCKM,13461
|
134
|
-
openlit-1.34.
|
135
|
-
openlit-1.34.
|
136
|
-
openlit-1.34.
|
137
|
-
openlit-1.34.
|
134
|
+
openlit-1.34.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
135
|
+
openlit-1.34.3.dist-info/METADATA,sha256=8_jDnUBC1cxAr2DNwkg5IXbNQX2qru-_nC7OpwC6Jh8,23469
|
136
|
+
openlit-1.34.3.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
137
|
+
openlit-1.34.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|