langtrace-python-sdk 2.3.20__py3-none-any.whl → 2.3.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,651 @@
1
+ import json
2
+ from typing import Any, Dict, List, Optional, Callable, Awaitable, Union
3
+ from langtrace.trace_attributes import (
4
+ LLMSpanAttributes,
5
+ SpanAttributes,
6
+ )
7
+ from langtrace_python_sdk.utils import set_span_attribute
8
+ from langtrace_python_sdk.utils.silently_fail import silently_fail
9
+ from opentelemetry import trace
10
+ from opentelemetry.trace import SpanKind, Tracer, Span
11
+ from opentelemetry.trace.status import Status, StatusCode
12
+ from opentelemetry.trace.propagation import set_span_in_context
13
+ from langtrace_python_sdk.constants.instrumentation.common import (
14
+ SERVICE_PROVIDERS,
15
+ )
16
+ from langtrace_python_sdk.constants.instrumentation.litellm import APIS
17
+ from langtrace_python_sdk.utils.llm import (
18
+ calculate_prompt_tokens,
19
+ get_base_url,
20
+ get_extra_attributes,
21
+ get_langtrace_attributes,
22
+ get_llm_request_attributes,
23
+ get_span_name,
24
+ get_tool_calls,
25
+ is_streaming,
26
+ set_event_completion,
27
+ StreamWrapper,
28
+ set_span_attributes,
29
+ )
30
+ from langtrace_python_sdk.types import NOT_GIVEN
31
+
32
+ from langtrace_python_sdk.instrumentation.openai.types import (
33
+ ImagesGenerateKwargs,
34
+ ChatCompletionsCreateKwargs,
35
+ EmbeddingsCreateKwargs,
36
+ ImagesEditKwargs,
37
+ ResultType,
38
+ ContentItem,
39
+ )
40
+
41
+
42
+ def filter_valid_attributes(attributes):
43
+ """Filter attributes where value is not None, not an empty string."""
44
+ return {
45
+ key: value
46
+ for key, value in attributes.items()
47
+ if value is not None and value != ""
48
+ }
49
+
50
+
51
+ def images_generate(version: str, tracer: Tracer) -> Callable:
52
+ """
53
+ Wrap the `generate` method of the `Images` class to trace it.
54
+ """
55
+
56
+ def traced_method(
57
+ wrapped: Callable, instance: Any, args: List[Any], kwargs: ImagesGenerateKwargs
58
+ ) -> Any:
59
+ service_provider = SERVICE_PROVIDERS["LITELLM"]
60
+ span_attributes = {
61
+ **get_langtrace_attributes(version, service_provider, vendor_type="llm"),
62
+ **get_llm_request_attributes(kwargs, operation_name="images_generate"),
63
+ SpanAttributes.LLM_URL: "not available",
64
+ SpanAttributes.LLM_PATH: APIS["IMAGES_GENERATION"]["ENDPOINT"],
65
+ **get_extra_attributes(), # type: ignore
66
+ }
67
+
68
+ attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
69
+
70
+ with tracer.start_as_current_span(
71
+ name=get_span_name(APIS["IMAGES_GENERATION"]["METHOD"]),
72
+ kind=SpanKind.CLIENT,
73
+ context=set_span_in_context(trace.get_current_span()),
74
+ ) as span:
75
+ set_span_attributes(span, attributes)
76
+ try:
77
+ # Attempt to call the original method
78
+ result = wrapped(*args, **kwargs)
79
+ if not is_streaming(kwargs):
80
+ data: Optional[ContentItem] = (
81
+ result.data[0]
82
+ if hasattr(result, "data") and len(result.data) > 0
83
+ else None
84
+ )
85
+ response = [
86
+ {
87
+ "role": "assistant",
88
+ "content": {
89
+ "url": getattr(data, "url", ""),
90
+ "revised_prompt": getattr(data, "revised_prompt", ""),
91
+ },
92
+ }
93
+ ]
94
+ set_event_completion(span, response)
95
+
96
+ span.set_status(StatusCode.OK)
97
+ return result
98
+ except Exception as err:
99
+ # Record the exception in the span
100
+ span.record_exception(err)
101
+
102
+ # Set the span status to indicate an error
103
+ span.set_status(Status(StatusCode.ERROR, str(err)))
104
+
105
+ # Reraise the exception to ensure it's not swallowed
106
+ raise
107
+
108
+ return traced_method
109
+
110
+
111
+ def async_images_generate(version: str, tracer: Tracer) -> Callable:
112
+ """
113
+ Wrap the `generate` method of the `Images` class to trace it.
114
+ """
115
+
116
+ async def traced_method(
117
+ wrapped: Callable, instance: Any, args: List[Any], kwargs: ImagesGenerateKwargs
118
+ ) -> Awaitable[Any]:
119
+ service_provider = SERVICE_PROVIDERS["LITELLM"]
120
+
121
+ span_attributes = {
122
+ **get_langtrace_attributes(version, service_provider, vendor_type="llm"),
123
+ **get_llm_request_attributes(kwargs, operation_name="images_generate"),
124
+ SpanAttributes.LLM_URL: "not available",
125
+ SpanAttributes.LLM_PATH: APIS["IMAGES_GENERATION"]["ENDPOINT"],
126
+ **get_extra_attributes(), # type: ignore
127
+ }
128
+
129
+ attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
130
+
131
+ with tracer.start_as_current_span(
132
+ name=get_span_name(APIS["IMAGES_GENERATION"]["METHOD"]),
133
+ kind=SpanKind.CLIENT,
134
+ context=set_span_in_context(trace.get_current_span()),
135
+ ) as span:
136
+ set_span_attributes(span, attributes)
137
+ try:
138
+ # Attempt to call the original method
139
+ result = await wrapped(*args, **kwargs)
140
+ if not is_streaming(kwargs):
141
+ data: Optional[ContentItem] = (
142
+ result.data[0]
143
+ if hasattr(result, "data") and len(result.data) > 0
144
+ else None
145
+ )
146
+ response = [
147
+ {
148
+ "role": "assistant",
149
+ "content": {
150
+ "url": getattr(data, "url", ""),
151
+ "revised_prompt": getattr(data, "revised_prompt", ""),
152
+ },
153
+ }
154
+ ]
155
+ set_event_completion(span, response)
156
+
157
+ span.set_status(StatusCode.OK)
158
+ return result
159
+ except Exception as err:
160
+ # Record the exception in the span
161
+ span.record_exception(err)
162
+
163
+ # Set the span status to indicate an error
164
+ span.set_status(Status(StatusCode.ERROR, str(err)))
165
+
166
+ # Reraise the exception to ensure it's not swallowed
167
+ raise
168
+
169
+ return traced_method
170
+
171
+
172
+ def images_edit(version: str, tracer: Tracer) -> Callable:
173
+ """
174
+ Wrap the `edit` method of the `Images` class to trace it.
175
+ """
176
+
177
+ def traced_method(
178
+ wrapped: Callable, instance: Any, args: List[Any], kwargs: ImagesEditKwargs
179
+ ) -> Any:
180
+ service_provider = SERVICE_PROVIDERS["LITELLM"]
181
+
182
+ span_attributes = {
183
+ **get_langtrace_attributes(version, service_provider, vendor_type="llm"),
184
+ **get_llm_request_attributes(kwargs, operation_name="images_edit"),
185
+ SpanAttributes.LLM_URL: "not available",
186
+ SpanAttributes.LLM_PATH: APIS["IMAGES_EDIT"]["ENDPOINT"],
187
+ SpanAttributes.LLM_RESPONSE_FORMAT: kwargs.get("response_format"),
188
+ SpanAttributes.LLM_IMAGE_SIZE: kwargs.get("size"),
189
+ **get_extra_attributes(), # type: ignore
190
+ }
191
+
192
+ attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
193
+
194
+ with tracer.start_as_current_span(
195
+ name=APIS["IMAGES_EDIT"]["METHOD"],
196
+ kind=SpanKind.CLIENT,
197
+ context=set_span_in_context(trace.get_current_span()),
198
+ ) as span:
199
+ set_span_attributes(span, attributes)
200
+ try:
201
+ # Attempt to call the original method
202
+ result = wrapped(*args, **kwargs)
203
+
204
+ response = []
205
+ # Parse each image object
206
+ for each_data in result.data:
207
+ response.append(
208
+ {
209
+ "role": "assistant",
210
+ "content": {
211
+ "url": each_data.url,
212
+ "revised_prompt": each_data.revised_prompt,
213
+ "base64": each_data.b64_json,
214
+ },
215
+ }
216
+ )
217
+
218
+ set_event_completion(span, response)
219
+
220
+ span.set_status(StatusCode.OK)
221
+ return result
222
+ except Exception as err:
223
+ # Record the exception in the span
224
+ span.record_exception(err)
225
+
226
+ # Set the span status to indicate an error
227
+ span.set_status(Status(StatusCode.ERROR, str(err)))
228
+
229
+ # Reraise the exception to ensure it's not swallowed
230
+ raise
231
+
232
+ return traced_method
233
+
234
+
235
+ def chat_completions_create(version: str, tracer: Tracer) -> Callable:
236
+ """Wrap the `create` method of the `ChatCompletion` class to trace it."""
237
+
238
+ def traced_method(
239
+ wrapped: Callable,
240
+ instance: Any,
241
+ args: List[Any],
242
+ kwargs: ChatCompletionsCreateKwargs,
243
+ ) -> Any:
244
+ service_provider = SERVICE_PROVIDERS["LITELLM"]
245
+ if "perplexity" in get_base_url(instance):
246
+ service_provider = SERVICE_PROVIDERS["PPLX"]
247
+ elif "azure" in get_base_url(instance):
248
+ service_provider = SERVICE_PROVIDERS["AZURE"]
249
+ elif "groq" in get_base_url(instance):
250
+ service_provider = SERVICE_PROVIDERS["GROQ"]
251
+ llm_prompts = []
252
+ for item in kwargs.get("messages", []):
253
+ tools = get_tool_calls(item)
254
+ if tools is not None:
255
+ tool_calls = []
256
+ for tool_call in tools:
257
+ tool_call_dict = {
258
+ "id": getattr(tool_call, "id", ""),
259
+ "type": getattr(tool_call, "type", ""),
260
+ }
261
+ if hasattr(tool_call, "function"):
262
+ tool_call_dict["function"] = {
263
+ "name": getattr(tool_call.function, "name", ""),
264
+ "arguments": getattr(tool_call.function, "arguments", ""),
265
+ }
266
+ tool_calls.append(tool_call_dict)
267
+ llm_prompts.append(tool_calls)
268
+ else:
269
+ llm_prompts.append(item)
270
+
271
+ span_attributes = {
272
+ **get_langtrace_attributes(version, service_provider, vendor_type="llm"),
273
+ **get_llm_request_attributes(kwargs, prompts=llm_prompts),
274
+ SpanAttributes.LLM_URL: "not available",
275
+ SpanAttributes.LLM_PATH: APIS["CHAT_COMPLETION"]["ENDPOINT"],
276
+ **get_extra_attributes(), # type: ignore
277
+ }
278
+
279
+ attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
280
+
281
+ span = tracer.start_span(
282
+ name=get_span_name(APIS["CHAT_COMPLETION"]["METHOD"]),
283
+ kind=SpanKind.CLIENT,
284
+ context=set_span_in_context(trace.get_current_span()),
285
+ )
286
+ _set_input_attributes(span, kwargs, attributes)
287
+
288
+ try:
289
+ result = wrapped(*args, **kwargs)
290
+ if is_streaming(kwargs):
291
+ prompt_tokens = 0
292
+ for message in kwargs.get("messages", {}):
293
+ prompt_tokens += calculate_prompt_tokens(
294
+ json.dumps(str(message)), kwargs.get("model")
295
+ )
296
+ functions = kwargs.get("functions")
297
+ if functions is not None and functions != NOT_GIVEN:
298
+ for function in functions:
299
+ prompt_tokens += calculate_prompt_tokens(
300
+ json.dumps(function), kwargs.get("model")
301
+ )
302
+
303
+ return StreamWrapper(
304
+ result,
305
+ span,
306
+ prompt_tokens,
307
+ function_call=kwargs.get("functions") is not None,
308
+ tool_calls=kwargs.get("tools") is not None,
309
+ )
310
+ else:
311
+ _set_response_attributes(span, result)
312
+ span.set_status(StatusCode.OK)
313
+ span.end()
314
+ return result
315
+
316
+ except Exception as error:
317
+ span.record_exception(error)
318
+ span.set_status(Status(StatusCode.ERROR, str(error)))
319
+ span.end()
320
+ raise
321
+
322
+ return traced_method
323
+
324
+
325
+ def async_chat_completions_create(version: str, tracer: Tracer) -> Callable:
326
+ """Wrap the `create` method of the `ChatCompletion` class to trace it."""
327
+
328
+ async def traced_method(
329
+ wrapped: Callable,
330
+ instance: Any,
331
+ args: List[Any],
332
+ kwargs: ChatCompletionsCreateKwargs,
333
+ ) -> Awaitable[Any]:
334
+ service_provider = SERVICE_PROVIDERS["LITELLM"]
335
+ if "perplexity" in get_base_url(instance):
336
+ service_provider = SERVICE_PROVIDERS["PPLX"]
337
+ elif "azure" in get_base_url(instance):
338
+ service_provider = SERVICE_PROVIDERS["AZURE"]
339
+ llm_prompts = []
340
+ for item in kwargs.get("messages", []):
341
+ tools = get_tool_calls(item)
342
+ if tools is not None:
343
+ tool_calls = []
344
+ for tool_call in tools:
345
+ tool_call_dict = {
346
+ "id": getattr(tool_call, "id", ""),
347
+ "type": getattr(tool_call, "type", ""),
348
+ }
349
+ if hasattr(tool_call, "function"):
350
+ tool_call_dict["function"] = {
351
+ "name": getattr(tool_call.function, "name", ""),
352
+ "arguments": getattr(tool_call.function, "arguments", ""),
353
+ }
354
+ tool_calls.append(json.dumps(tool_call_dict))
355
+ llm_prompts.append(tool_calls)
356
+ else:
357
+ llm_prompts.append(item)
358
+
359
+ span_attributes = {
360
+ **get_langtrace_attributes(version, service_provider, vendor_type="llm"),
361
+ **get_llm_request_attributes(kwargs, prompts=llm_prompts),
362
+ SpanAttributes.LLM_URL: "not available",
363
+ SpanAttributes.LLM_PATH: APIS["CHAT_COMPLETION"]["ENDPOINT"],
364
+ **get_extra_attributes(), # type: ignore
365
+ }
366
+
367
+ attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
368
+
369
+ span = tracer.start_span(
370
+ name=get_span_name(APIS["CHAT_COMPLETION"]["METHOD"]),
371
+ kind=SpanKind.CLIENT,
372
+ context=set_span_in_context(trace.get_current_span()),
373
+ )
374
+ _set_input_attributes(span, kwargs, attributes)
375
+
376
+ try:
377
+ result = await wrapped(*args, **kwargs)
378
+ if is_streaming(kwargs):
379
+ prompt_tokens = 0
380
+ for message in kwargs.get("messages", {}):
381
+ prompt_tokens += calculate_prompt_tokens(
382
+ json.dumps((str(message))), kwargs.get("model")
383
+ )
384
+
385
+ functions = kwargs.get("functions")
386
+ if functions is not None and functions != NOT_GIVEN:
387
+ for function in functions:
388
+ prompt_tokens += calculate_prompt_tokens(
389
+ json.dumps(function), kwargs.get("model")
390
+ )
391
+
392
+ return StreamWrapper(
393
+ result,
394
+ span,
395
+ prompt_tokens,
396
+ function_call=kwargs.get("functions") is not None,
397
+ tool_calls=kwargs.get("tools") is not None,
398
+ ) # type: ignore
399
+ else:
400
+ _set_response_attributes(span, result)
401
+ span.set_status(StatusCode.OK)
402
+ span.end()
403
+ return result
404
+
405
+ except Exception as error:
406
+ span.record_exception(error)
407
+ span.set_status(Status(StatusCode.ERROR, str(error)))
408
+ span.end()
409
+ raise
410
+
411
+ return traced_method
412
+
413
+
414
+ def embeddings_create(version: str, tracer: Tracer) -> Callable:
415
+ """
416
+ Wrap the `create` method of the `Embeddings` class to trace it.
417
+ """
418
+
419
+ def traced_method(
420
+ wrapped: Callable,
421
+ instance: Any,
422
+ args: List[Any],
423
+ kwargs: EmbeddingsCreateKwargs,
424
+ ) -> Any:
425
+ service_provider = SERVICE_PROVIDERS["LITELLM"]
426
+
427
+ span_attributes = {
428
+ **get_langtrace_attributes(version, service_provider, vendor_type="llm"),
429
+ **get_llm_request_attributes(kwargs, operation_name="embed"),
430
+ SpanAttributes.LLM_URL: "not available",
431
+ SpanAttributes.LLM_PATH: APIS["EMBEDDINGS_CREATE"]["ENDPOINT"],
432
+ SpanAttributes.LLM_REQUEST_DIMENSIONS: kwargs.get("dimensions"),
433
+ **get_extra_attributes(), # type: ignore
434
+ }
435
+
436
+ encoding_format = kwargs.get("encoding_format")
437
+ if encoding_format is not None:
438
+ if not isinstance(encoding_format, list):
439
+ encoding_format = [encoding_format]
440
+ span_attributes[SpanAttributes.LLM_REQUEST_ENCODING_FORMATS] = (
441
+ encoding_format
442
+ )
443
+
444
+ if kwargs.get("input") is not None:
445
+ span_attributes[SpanAttributes.LLM_REQUEST_EMBEDDING_INPUTS] = json.dumps(
446
+ [kwargs.get("input", "")]
447
+ )
448
+
449
+ attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
450
+
451
+ with tracer.start_as_current_span(
452
+ name=get_span_name(APIS["EMBEDDINGS_CREATE"]["METHOD"]),
453
+ kind=SpanKind.CLIENT,
454
+ context=set_span_in_context(trace.get_current_span()),
455
+ ) as span:
456
+
457
+ set_span_attributes(span, attributes)
458
+ try:
459
+ # Attempt to call the original method
460
+ result = wrapped(*args, **kwargs)
461
+ span.set_status(StatusCode.OK)
462
+ return result
463
+ except Exception as err:
464
+ # Record the exception in the span
465
+ span.record_exception(err)
466
+
467
+ # Set the span status to indicate an error
468
+ span.set_status(Status(StatusCode.ERROR, str(err)))
469
+
470
+ # Reraise the exception to ensure it's not swallowed
471
+ raise
472
+
473
+ return traced_method
474
+
475
+
476
+ def async_embeddings_create(version: str, tracer: Tracer) -> Callable:
477
+ """
478
+ Wrap the `create` method of the `Embeddings` class to trace it.
479
+ """
480
+
481
+ async def traced_method(
482
+ wrapped: Callable,
483
+ instance: Any,
484
+ args: List[Any],
485
+ kwargs: EmbeddingsCreateKwargs,
486
+ ) -> Awaitable[Any]:
487
+
488
+ service_provider = SERVICE_PROVIDERS["LITELLM"]
489
+
490
+ span_attributes = {
491
+ **get_langtrace_attributes(version, service_provider, vendor_type="llm"),
492
+ **get_llm_request_attributes(kwargs, operation_name="embed"),
493
+ SpanAttributes.LLM_PATH: APIS["EMBEDDINGS_CREATE"]["ENDPOINT"],
494
+ SpanAttributes.LLM_REQUEST_DIMENSIONS: kwargs.get("dimensions"),
495
+ **get_extra_attributes(), # type: ignore
496
+ }
497
+
498
+ attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
499
+
500
+ encoding_format = kwargs.get("encoding_format")
501
+ if encoding_format is not None:
502
+ if not isinstance(encoding_format, list):
503
+ encoding_format = [encoding_format]
504
+ span_attributes[SpanAttributes.LLM_REQUEST_ENCODING_FORMATS] = (
505
+ encoding_format
506
+ )
507
+
508
+ if kwargs.get("input") is not None:
509
+ span_attributes[SpanAttributes.LLM_REQUEST_EMBEDDING_INPUTS] = json.dumps(
510
+ [kwargs.get("input", "")]
511
+ )
512
+
513
+ with tracer.start_as_current_span(
514
+ name=get_span_name(APIS["EMBEDDINGS_CREATE"]["METHOD"]),
515
+ kind=SpanKind.CLIENT,
516
+ context=set_span_in_context(trace.get_current_span()),
517
+ ) as span:
518
+
519
+ set_span_attributes(span, attributes)
520
+ try:
521
+ # Attempt to call the original method
522
+ result = await wrapped(*args, **kwargs)
523
+ span.set_status(StatusCode.OK)
524
+ return result
525
+ except Exception as err:
526
+ # Record the exception in the span
527
+ span.record_exception(err)
528
+
529
+ # Set the span status to indicate an error
530
+ span.set_status(Status(StatusCode.ERROR, str(err)))
531
+
532
+ # Reraise the exception to ensure it's not swallowed
533
+ raise
534
+
535
+ return traced_method
536
+
537
+
538
+ def extract_content(choice: Any) -> Union[str, List[Dict[str, Any]], Dict[str, Any]]:
539
+ # Check if choice.message exists and has a content attribute
540
+ if (
541
+ hasattr(choice, "message")
542
+ and hasattr(choice.message, "content")
543
+ and choice.message.content is not None
544
+ ):
545
+ return choice.message.content
546
+
547
+ # Check if choice.message has tool_calls and extract information accordingly
548
+ elif (
549
+ hasattr(choice, "message")
550
+ and hasattr(choice.message, "tool_calls")
551
+ and choice.message.tool_calls is not None
552
+ ):
553
+ result = [
554
+ {
555
+ "id": tool_call.id,
556
+ "type": tool_call.type,
557
+ "function": {
558
+ "name": tool_call.function.name,
559
+ "arguments": tool_call.function.arguments,
560
+ },
561
+ }
562
+ for tool_call in choice.message.tool_calls
563
+ ]
564
+ return result
565
+
566
+ # Check if choice.message has a function_call and extract information accordingly
567
+ elif (
568
+ hasattr(choice, "message")
569
+ and hasattr(choice.message, "function_call")
570
+ and choice.message.function_call is not None
571
+ ):
572
+ return {
573
+ "name": choice.message.function_call.name,
574
+ "arguments": choice.message.function_call.arguments,
575
+ }
576
+
577
+ # Return an empty string if none of the above conditions are met
578
+ else:
579
+ return ""
580
+
581
+
582
+ @silently_fail
583
+ def _set_input_attributes(
584
+ span: Span, kwargs: ChatCompletionsCreateKwargs, attributes: LLMSpanAttributes
585
+ ) -> None:
586
+ tools = []
587
+ for field, value in attributes.model_dump(by_alias=True).items():
588
+ set_span_attribute(span, field, value)
589
+ functions = kwargs.get("functions")
590
+ if functions is not None and functions != NOT_GIVEN:
591
+ for function in functions:
592
+ tools.append(json.dumps({"type": "function", "function": function}))
593
+
594
+ if kwargs.get("tools") is not None and kwargs.get("tools") != NOT_GIVEN:
595
+ tools.append(json.dumps(kwargs.get("tools")))
596
+
597
+ if tools:
598
+ set_span_attribute(span, SpanAttributes.LLM_TOOLS, json.dumps(tools))
599
+
600
+
601
+ @silently_fail
602
+ def _set_response_attributes(span: Span, result: ResultType) -> None:
603
+ set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, result.model)
604
+ if hasattr(result, "choices") and result.choices is not None:
605
+ responses = [
606
+ {
607
+ "role": (
608
+ choice.message.role
609
+ if choice.message and choice.message.role
610
+ else "assistant"
611
+ ),
612
+ "content": extract_content(choice),
613
+ **(
614
+ {"content_filter_results": choice.content_filter_results}
615
+ if hasattr(choice, "content_filter_results")
616
+ else {}
617
+ ),
618
+ }
619
+ for choice in result.choices
620
+ ]
621
+ set_event_completion(span, responses)
622
+
623
+ if (
624
+ hasattr(result, "system_fingerprint")
625
+ and result.system_fingerprint is not None
626
+ and result.system_fingerprint != NOT_GIVEN
627
+ ):
628
+ set_span_attribute(
629
+ span,
630
+ SpanAttributes.LLM_SYSTEM_FINGERPRINT,
631
+ result.system_fingerprint,
632
+ )
633
+ # Get the usage
634
+ if hasattr(result, "usage") and result.usage is not None:
635
+ usage = result.usage
636
+ if usage is not None:
637
+ set_span_attribute(
638
+ span,
639
+ SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
640
+ result.usage.prompt_tokens,
641
+ )
642
+ set_span_attribute(
643
+ span,
644
+ SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
645
+ result.usage.completion_tokens,
646
+ )
647
+ set_span_attribute(
648
+ span,
649
+ SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
650
+ result.usage.total_tokens,
651
+ )