openlit 1.33.9__py3-none-any.whl → 1.33.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. openlit/__helpers.py +5 -0
  2. openlit/__init__.py +3 -2
  3. openlit/instrumentation/ag2/ag2.py +3 -3
  4. openlit/instrumentation/ai21/ai21.py +1 -1
  5. openlit/instrumentation/ai21/async_ai21.py +1 -1
  6. openlit/instrumentation/anthropic/anthropic.py +1 -1
  7. openlit/instrumentation/anthropic/async_anthropic.py +1 -1
  8. openlit/instrumentation/astra/astra.py +5 -5
  9. openlit/instrumentation/astra/async_astra.py +5 -5
  10. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +3 -3
  11. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +3 -3
  12. openlit/instrumentation/chroma/chroma.py +5 -5
  13. openlit/instrumentation/cohere/async_cohere.py +1 -1
  14. openlit/instrumentation/cohere/cohere.py +2 -2
  15. openlit/instrumentation/controlflow/controlflow.py +3 -3
  16. openlit/instrumentation/crawl4ai/async_crawl4ai.py +3 -3
  17. openlit/instrumentation/crawl4ai/crawl4ai.py +3 -3
  18. openlit/instrumentation/crewai/crewai.py +4 -2
  19. openlit/instrumentation/dynamiq/dynamiq.py +3 -3
  20. openlit/instrumentation/elevenlabs/async_elevenlabs.py +1 -2
  21. openlit/instrumentation/elevenlabs/elevenlabs.py +1 -2
  22. openlit/instrumentation/embedchain/embedchain.py +5 -5
  23. openlit/instrumentation/firecrawl/firecrawl.py +3 -3
  24. openlit/instrumentation/gpt4all/__init__.py +2 -2
  25. openlit/instrumentation/gpt4all/gpt4all.py +345 -220
  26. openlit/instrumentation/gpu/__init__.py +5 -5
  27. openlit/instrumentation/groq/__init__.py +2 -2
  28. openlit/instrumentation/groq/async_groq.py +356 -240
  29. openlit/instrumentation/groq/groq.py +356 -240
  30. openlit/instrumentation/haystack/haystack.py +3 -3
  31. openlit/instrumentation/julep/async_julep.py +3 -3
  32. openlit/instrumentation/julep/julep.py +3 -3
  33. openlit/instrumentation/langchain/__init__.py +13 -7
  34. openlit/instrumentation/langchain/async_langchain.py +384 -0
  35. openlit/instrumentation/langchain/langchain.py +98 -490
  36. openlit/instrumentation/letta/letta.py +5 -3
  37. openlit/instrumentation/litellm/__init__.py +4 -5
  38. openlit/instrumentation/litellm/async_litellm.py +316 -245
  39. openlit/instrumentation/litellm/litellm.py +312 -241
  40. openlit/instrumentation/llamaindex/llamaindex.py +3 -3
  41. openlit/instrumentation/mem0/mem0.py +3 -3
  42. openlit/instrumentation/milvus/milvus.py +5 -5
  43. openlit/instrumentation/mistral/__init__.py +6 -6
  44. openlit/instrumentation/mistral/async_mistral.py +421 -248
  45. openlit/instrumentation/mistral/mistral.py +418 -244
  46. openlit/instrumentation/multion/async_multion.py +4 -2
  47. openlit/instrumentation/multion/multion.py +4 -2
  48. openlit/instrumentation/ollama/__init__.py +8 -30
  49. openlit/instrumentation/ollama/async_ollama.py +385 -417
  50. openlit/instrumentation/ollama/ollama.py +384 -417
  51. openlit/instrumentation/openai/async_openai.py +7 -9
  52. openlit/instrumentation/openai/openai.py +7 -9
  53. openlit/instrumentation/phidata/phidata.py +4 -2
  54. openlit/instrumentation/pinecone/pinecone.py +5 -5
  55. openlit/instrumentation/premai/__init__.py +2 -2
  56. openlit/instrumentation/premai/premai.py +262 -213
  57. openlit/instrumentation/qdrant/async_qdrant.py +5 -5
  58. openlit/instrumentation/qdrant/qdrant.py +5 -5
  59. openlit/instrumentation/reka/__init__.py +2 -2
  60. openlit/instrumentation/reka/async_reka.py +90 -52
  61. openlit/instrumentation/reka/reka.py +90 -52
  62. openlit/instrumentation/together/__init__.py +4 -4
  63. openlit/instrumentation/together/async_together.py +278 -236
  64. openlit/instrumentation/together/together.py +278 -236
  65. openlit/instrumentation/transformers/__init__.py +1 -1
  66. openlit/instrumentation/transformers/transformers.py +75 -44
  67. openlit/instrumentation/vertexai/__init__.py +14 -64
  68. openlit/instrumentation/vertexai/async_vertexai.py +329 -986
  69. openlit/instrumentation/vertexai/vertexai.py +329 -986
  70. openlit/instrumentation/vllm/__init__.py +1 -1
  71. openlit/instrumentation/vllm/vllm.py +62 -32
  72. openlit/semcov/__init__.py +3 -3
  73. {openlit-1.33.9.dist-info → openlit-1.33.10.dist-info}/METADATA +1 -1
  74. openlit-1.33.10.dist-info/RECORD +122 -0
  75. openlit-1.33.9.dist-info/RECORD +0 -121
  76. {openlit-1.33.9.dist-info → openlit-1.33.10.dist-info}/LICENSE +0 -0
  77. {openlit-1.33.9.dist-info → openlit-1.33.10.dist-info}/WHEEL +0 -0
@@ -5,7 +5,7 @@ Module for monitoring Haystack applications.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -63,11 +63,11 @@ def join_data(gen_ai_endpoint, version, environment, application_name,
63
63
  SemanticConvetion.GEN_AI_SYSTEM_HAYSTACK)
64
64
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
65
65
  gen_ai_endpoint)
66
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
66
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
67
67
  environment)
68
68
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
69
69
  SemanticConvetion.GEN_AI_OPERATION_TYPE_FRAMEWORK)
70
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
70
+ span.set_attribute(SERVICE_NAME,
71
71
  application_name)
72
72
  span.set_status(Status(StatusCode.OK))
73
73
 
@@ -5,7 +5,7 @@ Module for monitoring Julep.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -64,9 +64,9 @@ def async_wrap_julep(gen_ai_endpoint, version, environment, application_name,
64
64
  gen_ai_endpoint)
65
65
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
66
66
  SemanticConvetion.GEN_AI_SYSTEM_JULEP)
67
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
68
  environment)
69
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
69
+ span.set_attribute(SERVICE_NAME,
70
70
  application_name)
71
71
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
72
72
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
@@ -5,7 +5,7 @@ Module for monitoring Julep.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -64,9 +64,9 @@ def wrap_julep(gen_ai_endpoint, version, environment, application_name,
64
64
  gen_ai_endpoint)
65
65
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
66
66
  SemanticConvetion.GEN_AI_SYSTEM_JULEP)
67
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
68
  environment)
69
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
69
+ span.set_attribute(SERVICE_NAME,
70
70
  application_name)
71
71
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
72
72
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
@@ -8,10 +8,10 @@ from wrapt import wrap_function_wrapper
8
8
  from openlit.instrumentation.langchain.langchain import (
9
9
  general_wrap,
10
10
  hub,
11
- llm,
12
- allm,
13
- chat,
14
- achat
11
+ chat
12
+ )
13
+ from openlit.instrumentation.langchain.async_langchain import (
14
+ async_chat
15
15
  )
16
16
 
17
17
  _instruments = ("langchain >= 0.1.20",)
@@ -51,13 +51,13 @@ WRAPPED_METHODS = [
51
51
  "package": "langchain_core.language_models.llms",
52
52
  "object": "BaseLLM.invoke",
53
53
  "endpoint": "langchain.llm",
54
- "wrapper": llm,
54
+ "wrapper": chat,
55
55
  },
56
56
  {
57
57
  "package": "langchain_core.language_models.llms",
58
58
  "object": "BaseLLM.ainvoke",
59
59
  "endpoint": "langchain.llm",
60
- "wrapper": allm,
60
+ "wrapper": async_chat,
61
61
  },
62
62
  {
63
63
  "package": "langchain_core.language_models.chat_models",
@@ -69,13 +69,19 @@ WRAPPED_METHODS = [
69
69
  "package": "langchain_core.language_models.chat_models",
70
70
  "object": "BaseChatModel.ainvoke",
71
71
  "endpoint": "langchain.chat_models",
72
- "wrapper": achat,
72
+ "wrapper": async_chat,
73
73
  },
74
74
  {
75
75
  "package": "langchain.chains.base",
76
76
  "object": "Chain.invoke",
77
77
  "endpoint": "langchain.chain.invoke",
78
78
  "wrapper": chat,
79
+ },
80
+ {
81
+ "package": "langchain.chains.base",
82
+ "object": "Chain.invoke",
83
+ "endpoint": "langchain.chain.invoke",
84
+ "wrapper": async_chat,
79
85
  }
80
86
  ]
81
87
 
@@ -0,0 +1,384 @@
1
+ # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, unused-import, too-many-function-args
2
+ """
3
+ Module for monitoring Langchain applications.
4
+ """
5
+
6
+ import logging
7
+ import time
8
+ from opentelemetry.trace import SpanKind, Status, StatusCode
9
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
10
+ from openlit.__helpers import (
11
+ get_chat_model_cost,
12
+ handle_exception,
13
+ general_tokens,
14
+ calculate_ttft,
15
+ calculate_tbt,
16
+ create_metrics_attributes,
17
+ )
18
+ from openlit.semcov import SemanticConvetion
19
+
20
+ # Initialize logger for logging potential issues and operations
21
+ logger = logging.getLogger(__name__)
22
+
23
+ def get_attribute_from_instance_or_kwargs(instance, attribute_name, default=-1):
24
+ """Return attribute from instance or kwargs"""
25
+ # Attempt to retrieve model_kwargs from the instance
26
+ model_kwargs = getattr(instance, 'model_kwargs', None)
27
+
28
+ # Check for attribute in model_kwargs if it exists
29
+ if model_kwargs and attribute_name in model_kwargs:
30
+ return model_kwargs[attribute_name]
31
+
32
+ # Attempt to get the attribute directly from the instance
33
+ try:
34
+ return getattr(instance, attribute_name)
35
+ except AttributeError:
36
+ # Special handling for 'model' attribute to consider 'model_id'
37
+ if attribute_name == 'model':
38
+ return getattr(instance, 'model_id', 'default_model_id')
39
+
40
+ # Default if the attribute isn't found in model_kwargs or the instance
41
+ return default
42
+
43
+ def async_general_wrap(gen_ai_endpoint, version, environment, application_name,
44
+ tracer, pricing_info, trace_content, metrics, disable_metrics):
45
+ """
46
+ Creates a wrapper around a function call to trace and log its execution metrics.
47
+
48
+ This function wraps any given function to measure its execution time,
49
+ log its operation, and trace its execution using OpenTelemetry.
50
+
51
+ Parameters:
52
+ - gen_ai_endpoint (str): A descriptor or name for the endpoint being traced.
53
+ - version (str): The version of the Langchain application.
54
+ - environment (str): The deployment environment (e.g., 'production', 'development').
55
+ - application_name (str): Name of the Langchain application.
56
+ - tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
57
+ - pricing_info (dict): Information about the pricing for internal metrics (currently not used).
58
+ - trace_content (bool): Flag indicating whether to trace the content of the response.
59
+
60
+ Returns:
61
+ - function: A higher-order function that takes a function 'wrapped' and returns
62
+ a new function that wraps 'wrapped' with additional tracing and logging.
63
+ """
64
+
65
+ async def wrapper(wrapped, instance, args, kwargs):
66
+ """
67
+ An inner wrapper function that executes the wrapped function, measures execution
68
+ time, and records trace data using OpenTelemetry.
69
+
70
+ Parameters:
71
+ - wrapped (Callable): The original function that this wrapper will execute.
72
+ - instance (object): The instance to which the wrapped function belongs. This
73
+ is used for instance methods. For static and classmethods,
74
+ this may be None.
75
+ - args (tuple): Positional arguments passed to the wrapped function.
76
+ - kwargs (dict): Keyword arguments passed to the wrapped function.
77
+
78
+ Returns:
79
+ - The result of the wrapped function call.
80
+
81
+ The wrapper initiates a span with the provided tracer, sets various attributes
82
+ on the span based on the function's execution and response, and ensures
83
+ errors are handled and logged appropriately.
84
+ """
85
+ with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
86
+ response = await wrapped(*args, **kwargs)
87
+
88
+ try:
89
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
90
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
91
+ SemanticConvetion.GEN_AI_SYSTEM_LANGCHAIN)
92
+ span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
93
+ gen_ai_endpoint)
94
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
95
+ environment)
96
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
97
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_FRAMEWORK)
98
+ span.set_attribute(SERVICE_NAME,
99
+ application_name)
100
+ span.set_attribute(SemanticConvetion.GEN_AI_RETRIEVAL_SOURCE,
101
+ response[0].metadata["source"])
102
+ span.set_status(Status(StatusCode.OK))
103
+
104
+ # Return original response
105
+ return response
106
+
107
+ except Exception as e:
108
+ handle_exception(span, e)
109
+ logger.error("Error in trace creation: %s", e)
110
+
111
+ # Return original response
112
+ return response
113
+
114
+ return wrapper
115
+
116
+ def async_hub(gen_ai_endpoint, version, environment, application_name, tracer,
117
+ pricing_info, trace_content, metrics, disable_metrics):
118
+ """
119
+ Creates a wrapper around Langchain hub operations for tracing and logging.
120
+
121
+ Similar to `general_wrap`, this function focuses on wrapping functions involved
122
+ in interacting with the Langchain hub, adding specific metadata relevant to
123
+ hub operations to the span attributes.
124
+
125
+ Parameters:
126
+ - gen_ai_endpoint (str): A descriptor or name for the Langchain hub endpoint.
127
+ - version (str): The version of the Langchain application.
128
+ - environment (str): The deployment environment, such as 'production' or 'development'.
129
+ - application_name (str): Name of the Langchain application.
130
+ - tracer (opentelemetry.trace.Tracer): The tracer for OpenTelemetry tracing.
131
+ - pricing_info (dict): Pricing information for the operation (not currently used).
132
+ - trace_content (bool): Indicates if the content of the response should be traced.
133
+
134
+ Returns:
135
+ - function: A new function that wraps the original hub operation call with added
136
+ logging, tracing, and metric calculation functionalities.
137
+ """
138
+
139
+ async def wrapper(wrapped, instance, args, kwargs):
140
+ """
141
+ An inner wrapper specifically designed for Langchain hub operations,
142
+ providing tracing, logging, and execution metrics.
143
+
144
+ Parameters:
145
+ - wrapped (Callable): The original hub operation function to be executed.
146
+ - instance (object): The instance of the class where the hub operation
147
+ method is defined. May be None for static or class methods.
148
+ - args (tuple): Positional arguments to pass to the hub operation function.
149
+ - kwargs (dict): Keyword arguments to pass to the hub operation function.
150
+
151
+ Returns:
152
+ - The result of executing the hub operation function.
153
+
154
+ This wrapper captures additional metadata relevant to Langchain hub operations,
155
+ creating spans with specific attributes and metrics that reflect the nature of
156
+ each hub call.
157
+ """
158
+
159
+ with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
160
+ response = await wrapped(*args, **kwargs)
161
+
162
+ try:
163
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
164
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
165
+ SemanticConvetion.GEN_AI_SYSTEM_LANGCHAIN)
166
+ span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
167
+ gen_ai_endpoint)
168
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
169
+ environment)
170
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
171
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_FRAMEWORK)
172
+ span.set_attribute(SERVICE_NAME,
173
+ application_name)
174
+ span.set_attribute(SemanticConvetion.GEN_AI_HUB_OWNER,
175
+ response.metadata["lc_hub_owner"])
176
+ span.set_attribute(SemanticConvetion.GEN_AI_HUB_REPO,
177
+ response.metadata["lc_hub_repo"])
178
+ span.set_status(Status(StatusCode.OK))
179
+
180
+ return response
181
+
182
+ except Exception as e:
183
+ handle_exception(span, e)
184
+ logger.error("Error in trace creation: %s", e)
185
+
186
+ # Return original response
187
+ return response
188
+
189
+ return wrapper
190
+
191
+ def async_chat(gen_ai_endpoint, version, environment, application_name,
192
+ tracer, pricing_info, trace_content, metrics, disable_metrics):
193
+ """
194
+ Creates a wrapper around a function call to trace and log its execution metrics.
195
+
196
+ This function wraps any given function to measure its execution time,
197
+ log its operation, and trace its execution using OpenTelemetry.
198
+
199
+ Parameters:
200
+ - version (str): The version of the Langchain application.
201
+ - environment (str): The deployment environment (e.g., 'production', 'development').
202
+ - application_name (str): Name of the Langchain application.
203
+ - tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
204
+ - pricing_info (dict): Information about the pricing for internal metrics (currently not used).
205
+ - trace_content (bool): Flag indicating whether to trace the content of the response.
206
+
207
+ Returns:
208
+ - function: A higher-order function that takes a function 'wrapped' and returns
209
+ a new function that wraps 'wrapped' with additional tracing and logging.
210
+ """
211
+
212
+ async def wrapper(wrapped, instance, args, kwargs):
213
+ """
214
+ An inner wrapper function that executes the wrapped function, measures execution
215
+ time, and records trace data using OpenTelemetry.
216
+
217
+ Parameters:
218
+ - wrapped (Callable): The original function that this wrapper will execute.
219
+ - instance (object): The instance to which the wrapped function belongs. This
220
+ is used for instance methods. For static and classmethods,
221
+ this may be None.
222
+ - args (tuple): Positional arguments passed to the wrapped function.
223
+ - kwargs (dict): Keyword arguments passed to the wrapped function.
224
+
225
+ Returns:
226
+ - The result of the wrapped function call.
227
+
228
+ The wrapper initiates a span with the provided tracer, sets various attributes
229
+ on the span based on the function's execution and response, and ensures
230
+ errors are handled and logged appropriately.
231
+ """
232
+
233
+ server_address, server_port = "NOT_FOUND", "NOT_FOUND"
234
+
235
+ if hasattr(instance, "model_id"):
236
+ request_model = instance.model_id
237
+ elif hasattr(instance, "model"):
238
+ request_model = instance.model
239
+ elif hasattr(instance, "model_name"):
240
+ request_model = instance.model_name
241
+ else:
242
+ request_model = "NOT_FOUND"
243
+
244
+ span_name = f"{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
245
+
246
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
247
+ start_time = time.time()
248
+ response = await wrapped(*args, **kwargs)
249
+ end_time = time.time()
250
+
251
+ try:
252
+ # Format 'messages' into a single string
253
+ message_prompt = kwargs.get("messages", "") or args[0]
254
+ formatted_messages = []
255
+
256
+ for message in message_prompt:
257
+ # Handle the case where message is a tuple
258
+ if isinstance(message, tuple) and len(message) == 2:
259
+ role, content = message
260
+ # Handle the case where message is a dictionary
261
+ elif isinstance(message, dict):
262
+ role = message["role"]
263
+ content = message["content"]
264
+ else:
265
+ continue
266
+
267
+ # Check if the content is a list
268
+ if isinstance(content, list):
269
+ content_str = ", ".join(
270
+ f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
271
+ if "type" in item else f'text: {item["text"]}'
272
+ for item in content
273
+ )
274
+ formatted_messages.append(f"{role}: {content_str}")
275
+ else:
276
+ formatted_messages.append(f"{role}: {content}")
277
+
278
+ # Join all formatted messages with newline
279
+ prompt = "\n".join(formatted_messages)
280
+
281
+ input_tokens = general_tokens(str(prompt))
282
+ output_tokens = general_tokens(str(response))
283
+
284
+ # Calculate cost of the operation
285
+ cost = get_chat_model_cost(
286
+ request_model,
287
+ pricing_info, input_tokens, output_tokens
288
+ )
289
+
290
+ # Set base span attribues (OTel Semconv)
291
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
292
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
293
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
294
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
295
+ SemanticConvetion.GEN_AI_SYSTEM_LANGCHAIN)
296
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
297
+ request_model)
298
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
299
+ request_model)
300
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
301
+ str(getattr(instance, 'temperature', 1)))
302
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K,
303
+ str(getattr(instance, 'top_k', 1)))
304
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
305
+ str(getattr(instance, 'top_p', 1)))
306
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
307
+ input_tokens)
308
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
309
+ output_tokens)
310
+ span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
311
+ server_address)
312
+ span.set_attribute(SemanticConvetion.SERVER_PORT,
313
+ server_port)
314
+
315
+ # Set base span attribues (Extras)
316
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
317
+ environment)
318
+ span.set_attribute(SERVICE_NAME,
319
+ application_name)
320
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
321
+ False)
322
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
323
+ input_tokens + output_tokens)
324
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
325
+ cost)
326
+ span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT,
327
+ end_time - start_time)
328
+ span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
329
+ version)
330
+
331
+ if trace_content:
332
+ span.add_event(
333
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
334
+ attributes={
335
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
336
+ },
337
+ )
338
+ completion_content = getattr(response, 'content', "")
339
+ span.add_event(
340
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
341
+ attributes={
342
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: completion_content,
343
+ },
344
+ )
345
+
346
+ span.set_status(Status(StatusCode.OK))
347
+
348
+ if disable_metrics is False:
349
+ attributes = create_metrics_attributes(
350
+ service_name=application_name,
351
+ deployment_environment=environment,
352
+ operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
353
+ system=SemanticConvetion.GEN_AI_SYSTEM_LANGCHAIN,
354
+ request_model=request_model,
355
+ server_address=server_address,
356
+ server_port=server_port,
357
+ response_model=request_model,
358
+ )
359
+
360
+ metrics["genai_client_usage_tokens"].record(
361
+ input_tokens + output_tokens, attributes
362
+ )
363
+ metrics["genai_client_operation_duration"].record(
364
+ end_time - start_time, attributes
365
+ )
366
+ metrics["genai_server_ttft"].record(
367
+ end_time - start_time, attributes
368
+ )
369
+ metrics["genai_requests"].add(1, attributes)
370
+ metrics["genai_completion_tokens"].add(output_tokens, attributes)
371
+ metrics["genai_prompt_tokens"].add(input_tokens, attributes)
372
+ metrics["genai_cost"].record(cost, attributes)
373
+
374
+ # Return original response
375
+ return response
376
+
377
+ except Exception as e:
378
+ handle_exception(span, e)
379
+ logger.error("Error in trace creation: %s", e)
380
+
381
+ # Return original response
382
+ return response
383
+
384
+ return wrapper