openlit 1.16.2__py3-none-any.whl → 1.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. openlit/__init__.py +3 -0
  2. openlit/instrumentation/anthropic/anthropic.py +28 -10
  3. openlit/instrumentation/anthropic/async_anthropic.py +27 -10
  4. openlit/instrumentation/bedrock/bedrock.py +13 -5
  5. openlit/instrumentation/cohere/cohere.py +33 -12
  6. openlit/instrumentation/elevenlabs/async_elevenlabs.py +6 -2
  7. openlit/instrumentation/elevenlabs/elevenlabs.py +6 -2
  8. openlit/instrumentation/gpt4all/gpt4all.py +30 -10
  9. openlit/instrumentation/groq/async_groq.py +31 -11
  10. openlit/instrumentation/groq/groq.py +31 -11
  11. openlit/instrumentation/mistral/async_mistral.py +33 -12
  12. openlit/instrumentation/mistral/mistral.py +33 -12
  13. openlit/instrumentation/ollama/async_ollama.py +57 -20
  14. openlit/instrumentation/ollama/ollama.py +57 -20
  15. openlit/instrumentation/openai/async_azure_openai.py +94 -35
  16. openlit/instrumentation/openai/async_openai.py +68 -27
  17. openlit/instrumentation/openai/azure_openai.py +89 -31
  18. openlit/instrumentation/openai/openai.py +68 -29
  19. openlit/instrumentation/transformers/transformers.py +20 -16
  20. openlit/instrumentation/vertexai/async_vertexai.py +104 -35
  21. openlit/instrumentation/vertexai/vertexai.py +104 -35
  22. openlit/instrumentation/vllm/__init__.py +43 -0
  23. openlit/instrumentation/vllm/vllm.py +143 -0
  24. openlit/semcov/__init__.py +4 -1
  25. {openlit-1.16.2.dist-info → openlit-1.18.0.dist-info}/METADATA +3 -1
  26. {openlit-1.16.2.dist-info → openlit-1.18.0.dist-info}/RECORD +28 -26
  27. {openlit-1.16.2.dist-info → openlit-1.18.0.dist-info}/LICENSE +0 -0
  28. {openlit-1.16.2.dist-info → openlit-1.18.0.dist-info}/WHEEL +0 -0
@@ -144,10 +144,18 @@ def async_chat_completions(gen_ai_endpoint, version, environment, application_na
144
144
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
145
145
  cost)
146
146
  if trace_content:
147
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
148
- prompt)
149
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
150
- llmresponse)
147
+ span.add_event(
148
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
149
+ attributes={
150
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
151
+ },
152
+ )
153
+ span.add_event(
154
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
155
+ attributes={
156
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: llmresponse,
157
+ },
158
+ )
151
159
 
152
160
  span.set_status(Status(StatusCode.OK))
153
161
 
@@ -240,8 +248,12 @@ def async_chat_completions(gen_ai_endpoint, version, environment, application_na
240
248
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
241
249
  False)
242
250
  if trace_content:
243
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
244
- prompt)
251
+ span.add_event(
252
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
253
+ attributes={
254
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
255
+ },
256
+ )
245
257
 
246
258
  span.set_status(Status(StatusCode.OK))
247
259
 
@@ -259,23 +271,31 @@ def async_chat_completions(gen_ai_endpoint, version, environment, application_na
259
271
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
260
272
  response.usage.total_tokens)
261
273
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
262
- response.choices[0].finish_reason)
274
+ [response.choices[0].finish_reason])
263
275
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
264
276
  cost)
265
277
 
266
278
  # Set span attributes for when n = 1 (default)
267
279
  if "n" not in kwargs or kwargs["n"] == 1:
268
280
  if trace_content:
269
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
270
- response.choices[0].message.content)
281
+ span.add_event(
282
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
283
+ attributes={
284
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.choices[0].message.content,
285
+ },
286
+ )
271
287
 
272
288
  # Set span attributes for when n > 0
273
289
  else:
274
290
  i = 0
275
291
  while i < kwargs["n"] and trace_content is True:
276
- attribute_name = f"gen_ai.completion.{i}"
277
- span.set_attribute(attribute_name,
278
- response.choices[i].message.content)
292
+ attribute_name = f"gen_ai.content.completion.{i}"
293
+ span.add_event(
294
+ name=attribute_name,
295
+ attributes={
296
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.choices[i].message.content,
297
+ },
298
+ )
279
299
  i += 1
280
300
 
281
301
  # Return original response
@@ -288,8 +308,12 @@ def async_chat_completions(gen_ai_endpoint, version, environment, application_na
288
308
  pricing_info, response.usage.prompt_tokens,
289
309
  response.usage.completion_tokens)
290
310
 
291
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
292
- "Function called with tools")
311
+ span.add_event(
312
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
313
+ attributes={
314
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: "Function called with tools",
315
+ },
316
+ )
293
317
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
294
318
  response.usage.prompt_tokens)
295
319
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
@@ -405,8 +429,12 @@ def async_embedding(gen_ai_endpoint, version, environment, application_name,
405
429
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
406
430
  cost)
407
431
  if trace_content:
408
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
409
- kwargs.get("input", ""))
432
+ span.add_event(
433
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
434
+ attributes={
435
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("input", ""),
436
+ },
437
+ )
410
438
 
411
439
  span.set_status(Status(StatusCode.OK))
412
440
 
@@ -611,12 +639,19 @@ def async_image_generate(gen_ai_endpoint, version, environment, application_name
611
639
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
612
640
  kwargs.get("user", ""))
613
641
  if trace_content:
614
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
615
- kwargs.get("prompt", ""))
616
-
642
+ span.add_event(
643
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
644
+ attributes={
645
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("prompt", ""),
646
+ },
647
+ )
617
648
  attribute_name = f"gen_ai.response.image.{images_count}"
618
- span.set_attribute(attribute_name,
619
- getattr(items, image))
649
+ span.add_event(
650
+ name=attribute_name,
651
+ attributes={
652
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: getattr(items, image),
653
+ },
654
+ )
620
655
 
621
656
  images_count+=1
622
657
 
@@ -729,11 +764,13 @@ def async_image_variatons(gen_ai_endpoint, version, environment, application_nam
729
764
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IMAGE_QUALITY,
730
765
  "standard")
731
766
  if trace_content:
732
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
733
- kwargs.get(SemanticConvetion.GEN_AI_TYPE_IMAGE, ""))
734
-
735
767
  attribute_name = f"gen_ai.response.image.{images_count}"
736
- span.set_attribute(attribute_name, getattr(items, image))
768
+ span.add_event(
769
+ name=attribute_name,
770
+ attributes={
771
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: getattr(items, image),
772
+ },
773
+ )
737
774
 
738
775
  images_count+=1
739
776
 
@@ -838,8 +875,12 @@ def async_audio_create(gen_ai_endpoint, version, environment, application_name,
838
875
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
839
876
  cost)
840
877
  if trace_content:
841
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
842
- kwargs.get("input", ""))
878
+ span.add_event(
879
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
880
+ attributes={
881
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("input", ""),
882
+ },
883
+ )
843
884
 
844
885
  span.set_status(Status(StatusCode.OK))
845
886
 
@@ -143,10 +143,18 @@ def azure_chat_completions(gen_ai_endpoint, version, environment, application_na
143
143
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
144
144
  cost)
145
145
  if trace_content:
146
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
147
- prompt)
148
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
149
- llmresponse)
146
+ span.add_event(
147
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
148
+ attributes={
149
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
150
+ },
151
+ )
152
+ span.add_event(
153
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
154
+ attributes={
155
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: llmresponse,
156
+ },
157
+ )
150
158
 
151
159
  span.set_status(Status(StatusCode.OK))
152
160
 
@@ -237,6 +245,13 @@ def azure_chat_completions(gen_ai_endpoint, version, environment, application_na
237
245
  kwargs.get("seed", ""))
238
246
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
239
247
  False)
248
+ if trace_content:
249
+ span.add_event(
250
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
251
+ attributes={
252
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
253
+ },
254
+ )
240
255
  if trace_content:
241
256
  span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
242
257
  prompt)
@@ -255,23 +270,31 @@ def azure_chat_completions(gen_ai_endpoint, version, environment, application_na
255
270
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
256
271
  response.usage.total_tokens)
257
272
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
258
- response.choices[0].finish_reason)
273
+ [response.choices[0].finish_reason])
259
274
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
260
275
  cost)
261
276
 
262
277
  # Set span attributes for when n = 1 (default)
263
278
  if "n" not in kwargs or kwargs["n"] == 1:
264
279
  if trace_content:
265
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
266
- response.choices[0].message.content)
280
+ span.add_event(
281
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
282
+ attributes={
283
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.choices[0].message.content,
284
+ },
285
+ )
267
286
 
268
287
  # Set span attributes for when n > 0
269
288
  else:
270
289
  i = 0
271
290
  while i < kwargs["n"] and trace_content is True:
272
- attribute_name = f"gen_ai.completion.{i}"
273
- span.set_attribute(attribute_name,
274
- response.choices[i].message.content)
291
+ attribute_name = f"gen_ai.content.completion.{i}"
292
+ span.add_event(
293
+ name=attribute_name,
294
+ attributes={
295
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.choices[i].message.content,
296
+ },
297
+ )
275
298
  i += 1
276
299
 
277
300
  # Return original response
@@ -284,8 +307,12 @@ def azure_chat_completions(gen_ai_endpoint, version, environment, application_na
284
307
  response.usage.prompt_tokens,
285
308
  response.usage.completion_tokens)
286
309
 
287
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
288
- "Function called with tools")
310
+ span.add_event(
311
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
312
+ attributes={
313
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: "Function called with tools",
314
+ },
315
+ )
289
316
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
290
317
  response.usage.prompt_tokens)
291
318
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
@@ -441,10 +468,18 @@ def azure_completions(gen_ai_endpoint, version, environment, application_name,
441
468
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
442
469
  cost)
443
470
  if trace_content:
444
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
445
- prompt)
446
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
447
- llmresponse)
471
+ span.add_event(
472
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
473
+ attributes={
474
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
475
+ },
476
+ )
477
+ span.add_event(
478
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
479
+ attributes={
480
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: llmresponse,
481
+ },
482
+ )
448
483
 
449
484
  span.set_status(Status(StatusCode.OK))
450
485
 
@@ -517,8 +552,12 @@ def azure_completions(gen_ai_endpoint, version, environment, application_name,
517
552
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
518
553
  False)
519
554
  if trace_content:
520
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
521
- kwargs.get("prompt", ""))
555
+ span.add_event(
556
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
557
+ attributes={
558
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("prompt", ""),
559
+ },
560
+ )
522
561
 
523
562
  # Set span attributes when tools is not passed to the function call
524
563
  if "tools" not in kwargs:
@@ -534,23 +573,31 @@ def azure_completions(gen_ai_endpoint, version, environment, application_name,
534
573
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
535
574
  response.usage.total_tokens)
536
575
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
537
- response.choices[0].finish_reason)
576
+ [response.choices[0].finish_reason])
538
577
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
539
578
  cost)
540
579
 
541
580
  # Set span attributes for when n = 1 (default)
542
581
  if "n" not in kwargs or kwargs["n"] == 1:
543
582
  if trace_content:
544
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
545
- response.choices[0].text)
583
+ span.add_event(
584
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
585
+ attributes={
586
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.choices[0].text,
587
+ },
588
+ )
546
589
 
547
590
  # Set span attributes for when n > 0
548
591
  else:
549
592
  i = 0
550
593
  while i < kwargs["n"] and trace_content is True:
551
- attribute_name = f"gen_ai.completion.{i}"
552
- span.set_attribute(attribute_name,
553
- response.choices[i].text)
594
+ attribute_name = f"gen_ai.content.completion.{i}"
595
+ span.add_event(
596
+ name=attribute_name,
597
+ attributes={
598
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.choices[i].text,
599
+ },
600
+ )
554
601
  i += 1
555
602
  return response
556
603
 
@@ -678,8 +725,12 @@ def azure_embedding(gen_ai_endpoint, version, environment, application_name,
678
725
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
679
726
  cost)
680
727
  if trace_content:
681
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
682
- kwargs.get("input", ""))
728
+ span.add_event(
729
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
730
+ attributes={
731
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("input", ""),
732
+ },
733
+ )
683
734
 
684
735
  span.set_status(Status(StatusCode.OK))
685
736
 
@@ -795,12 +846,19 @@ def azure_image_generate(gen_ai_endpoint, version, environment, application_name
795
846
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
796
847
  kwargs.get("user", ""))
797
848
  if trace_content:
798
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
799
- kwargs.get("prompt", ""))
800
-
849
+ span.add_event(
850
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
851
+ attributes={
852
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("prompt", ""),
853
+ },
854
+ )
801
855
  attribute_name = f"gen_ai.response.image.{images_count}"
802
- span.set_attribute(attribute_name,
803
- getattr(items, image))
856
+ span.add_event(
857
+ name=attribute_name,
858
+ attributes={
859
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: getattr(items, image),
860
+ },
861
+ )
804
862
 
805
863
  images_count+=1
806
864
 
@@ -145,10 +145,18 @@ def chat_completions(gen_ai_endpoint, version, environment, application_name,
145
145
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
146
146
  cost)
147
147
  if trace_content:
148
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
149
- prompt)
150
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
151
- llmresponse)
148
+ span.add_event(
149
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
150
+ attributes={
151
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
152
+ },
153
+ )
154
+ span.add_event(
155
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
156
+ attributes={
157
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: llmresponse,
158
+ },
159
+ )
152
160
 
153
161
  span.set_status(Status(StatusCode.OK))
154
162
 
@@ -241,8 +249,12 @@ def chat_completions(gen_ai_endpoint, version, environment, application_name,
241
249
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
242
250
  False)
243
251
  if trace_content:
244
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
245
- prompt)
252
+ span.add_event(
253
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
254
+ attributes={
255
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
256
+ },
257
+ )
246
258
 
247
259
  # Set span attributes when tools is not passed to the function call
248
260
  if "tools" not in kwargs:
@@ -258,23 +270,31 @@ def chat_completions(gen_ai_endpoint, version, environment, application_name,
258
270
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
259
271
  response.usage.total_tokens)
260
272
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
261
- response.choices[0].finish_reason)
273
+ [response.choices[0].finish_reason])
262
274
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
263
275
  cost)
264
276
 
265
277
  # Set span attributes for when n = 1 (default)
266
278
  if "n" not in kwargs or kwargs["n"] == 1:
267
279
  if trace_content:
268
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
269
- response.choices[0].message.content)
280
+ span.add_event(
281
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
282
+ attributes={
283
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.choices[0].message.content,
284
+ },
285
+ )
270
286
 
271
287
  # Set span attributes for when n > 0
272
288
  else:
273
289
  i = 0
274
290
  while i < kwargs["n"] and trace_content is True:
275
- attribute_name = f"gen_ai.completion.{i}"
276
- span.set_attribute(attribute_name,
277
- response.choices[i].message.content)
291
+ attribute_name = f"gen_ai.content.completion.{i}"
292
+ span.add_event(
293
+ name=attribute_name,
294
+ attributes={
295
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.choices[i].message.content,
296
+ },
297
+ )
278
298
  i += 1
279
299
 
280
300
  # Return original response
@@ -286,9 +306,12 @@ def chat_completions(gen_ai_endpoint, version, environment, application_name,
286
306
  cost = get_chat_model_cost(kwargs.get("model", "gpt-3.5-turbo"),
287
307
  pricing_info, response.usage.prompt_tokens,
288
308
  response.usage.completion_tokens)
289
-
290
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
291
- "Function called with tools")
309
+ span.add_event(
310
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
311
+ attributes={
312
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: "Function called with tools",
313
+ },
314
+ )
292
315
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
293
316
  response.usage.prompt_tokens)
294
317
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
@@ -404,8 +427,12 @@ def embedding(gen_ai_endpoint, version, environment, application_name,
404
427
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
405
428
  cost)
406
429
  if trace_content:
407
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
408
- kwargs.get("input", ""))
430
+ span.add_event(
431
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
432
+ attributes={
433
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("input", ""),
434
+ },
435
+ )
409
436
 
410
437
  span.set_status(Status(StatusCode.OK))
411
438
 
@@ -626,12 +653,19 @@ def image_generate(gen_ai_endpoint, version, environment, application_name,
626
653
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
627
654
  kwargs.get("user", ""))
628
655
  if trace_content:
629
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
630
- kwargs.get("prompt", ""))
631
-
656
+ span.add_event(
657
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
658
+ attributes={
659
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("prompt", ""),
660
+ },
661
+ )
632
662
  attribute_name = f"gen_ai.response.image.{images_count}"
633
- span.set_attribute(attribute_name,
634
- getattr(items, image))
663
+ span.add_event(
664
+ name=attribute_name,
665
+ attributes={
666
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: getattr(items, image),
667
+ },
668
+ )
635
669
 
636
670
  images_count+=1
637
671
 
@@ -744,12 +778,13 @@ def image_variatons(gen_ai_endpoint, version, environment, application_name,
744
778
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IMAGE_QUALITY,
745
779
  "standard")
746
780
  if trace_content:
747
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
748
- kwargs.get(SemanticConvetion.GEN_AI_TYPE_IMAGE, ""))
749
-
750
781
  attribute_name = f"gen_ai.response.image.{images_count}"
751
- span.set_attribute(attribute_name,
752
- getattr(items, image))
782
+ span.add_event(
783
+ name=attribute_name,
784
+ attributes={
785
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: getattr(items, image),
786
+ },
787
+ )
753
788
 
754
789
  images_count+=1
755
790
 
@@ -854,8 +889,12 @@ def audio_create(gen_ai_endpoint, version, environment, application_name,
854
889
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
855
890
  cost)
856
891
  if trace_content:
857
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
858
- kwargs.get("input", ""))
892
+ span.add_event(
893
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
894
+ attributes={
895
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("input", ""),
896
+ },
897
+ )
859
898
 
860
899
  span.set_status(Status(StatusCode.OK))
861
900
 
@@ -88,29 +88,33 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
88
88
  forward_params.get("top_p", "null"))
89
89
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
90
90
  forward_params.get("max_length", -1))
91
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
92
- prompt)
93
91
  if trace_content:
94
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
95
- prompt_tokens)
92
+ span.add_event(
93
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
94
+ attributes={
95
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
96
+ },
97
+ )
98
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
99
+ prompt_tokens)
96
100
 
97
101
  i = 0
98
102
  completion_tokens = 0
99
103
  for completion in response:
100
104
  if len(response) > 1:
101
- attribute_name = f"gen_ai.completion.{i}"
105
+ attribute_name = f"gen_ai.content.completion.{i}"
102
106
  else:
103
- attribute_name = SemanticConvetion.GEN_AI_CONTENT_COMPLETION
104
- if i == 0:
105
- if trace_content:
106
- span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
107
- completion["generated_text"])
108
- completion_tokens += general_tokens(completion["generated_text"])
109
- else:
110
- if trace_content:
111
- span.set_attribute(attribute_name,
112
- completion["generated_text"])
113
- completion_tokens += general_tokens(completion["generated_text"])
107
+ attribute_name = SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT
108
+ if trace_content:
109
+ span.add_event(
110
+ name=attribute_name,
111
+ attributes={
112
+ # pylint: disable=line-too-long
113
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: completion["generated_text"],
114
+ },
115
+ )
116
+ completion_tokens += general_tokens(completion["generated_text"])
117
+
114
118
  i=i+1
115
119
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
116
120
  completion_tokens)