openlit 1.33.8__py3-none-any.whl → 1.33.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. openlit/__helpers.py +83 -0
  2. openlit/__init__.py +1 -1
  3. openlit/instrumentation/ag2/ag2.py +2 -2
  4. openlit/instrumentation/ai21/__init__.py +4 -4
  5. openlit/instrumentation/ai21/ai21.py +370 -319
  6. openlit/instrumentation/ai21/async_ai21.py +371 -319
  7. openlit/instrumentation/anthropic/__init__.py +4 -4
  8. openlit/instrumentation/anthropic/anthropic.py +321 -189
  9. openlit/instrumentation/anthropic/async_anthropic.py +323 -190
  10. openlit/instrumentation/assemblyai/__init__.py +1 -1
  11. openlit/instrumentation/assemblyai/assemblyai.py +59 -43
  12. openlit/instrumentation/astra/astra.py +4 -4
  13. openlit/instrumentation/astra/async_astra.py +4 -4
  14. openlit/instrumentation/azure_ai_inference/__init__.py +4 -4
  15. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +406 -252
  16. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +406 -252
  17. openlit/instrumentation/bedrock/__init__.py +1 -1
  18. openlit/instrumentation/bedrock/bedrock.py +115 -58
  19. openlit/instrumentation/chroma/chroma.py +4 -4
  20. openlit/instrumentation/cohere/__init__.py +33 -10
  21. openlit/instrumentation/cohere/async_cohere.py +610 -0
  22. openlit/instrumentation/cohere/cohere.py +410 -219
  23. openlit/instrumentation/controlflow/controlflow.py +2 -2
  24. openlit/instrumentation/crawl4ai/async_crawl4ai.py +2 -2
  25. openlit/instrumentation/crawl4ai/crawl4ai.py +2 -2
  26. openlit/instrumentation/crewai/crewai.py +2 -2
  27. openlit/instrumentation/dynamiq/dynamiq.py +2 -2
  28. openlit/instrumentation/elevenlabs/async_elevenlabs.py +73 -47
  29. openlit/instrumentation/elevenlabs/elevenlabs.py +73 -52
  30. openlit/instrumentation/embedchain/embedchain.py +4 -4
  31. openlit/instrumentation/firecrawl/firecrawl.py +2 -2
  32. openlit/instrumentation/google_ai_studio/__init__.py +9 -9
  33. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +183 -219
  34. openlit/instrumentation/google_ai_studio/google_ai_studio.py +183 -220
  35. openlit/instrumentation/gpt4all/gpt4all.py +17 -17
  36. openlit/instrumentation/groq/async_groq.py +14 -14
  37. openlit/instrumentation/groq/groq.py +14 -14
  38. openlit/instrumentation/haystack/haystack.py +2 -2
  39. openlit/instrumentation/julep/async_julep.py +2 -2
  40. openlit/instrumentation/julep/julep.py +2 -2
  41. openlit/instrumentation/langchain/langchain.py +36 -31
  42. openlit/instrumentation/letta/letta.py +6 -6
  43. openlit/instrumentation/litellm/async_litellm.py +20 -20
  44. openlit/instrumentation/litellm/litellm.py +20 -20
  45. openlit/instrumentation/llamaindex/llamaindex.py +2 -2
  46. openlit/instrumentation/mem0/mem0.py +2 -2
  47. openlit/instrumentation/milvus/milvus.py +4 -4
  48. openlit/instrumentation/mistral/async_mistral.py +18 -18
  49. openlit/instrumentation/mistral/mistral.py +18 -18
  50. openlit/instrumentation/multion/async_multion.py +2 -2
  51. openlit/instrumentation/multion/multion.py +2 -2
  52. openlit/instrumentation/ollama/async_ollama.py +29 -29
  53. openlit/instrumentation/ollama/ollama.py +29 -29
  54. openlit/instrumentation/openai/__init__.py +11 -230
  55. openlit/instrumentation/openai/async_openai.py +434 -409
  56. openlit/instrumentation/openai/openai.py +415 -393
  57. openlit/instrumentation/phidata/phidata.py +2 -2
  58. openlit/instrumentation/pinecone/pinecone.py +4 -4
  59. openlit/instrumentation/premai/premai.py +20 -20
  60. openlit/instrumentation/qdrant/async_qdrant.py +4 -4
  61. openlit/instrumentation/qdrant/qdrant.py +4 -4
  62. openlit/instrumentation/reka/async_reka.py +6 -6
  63. openlit/instrumentation/reka/reka.py +6 -6
  64. openlit/instrumentation/together/async_together.py +18 -18
  65. openlit/instrumentation/together/together.py +18 -18
  66. openlit/instrumentation/transformers/transformers.py +6 -6
  67. openlit/instrumentation/vertexai/async_vertexai.py +53 -53
  68. openlit/instrumentation/vertexai/vertexai.py +53 -53
  69. openlit/instrumentation/vllm/vllm.py +6 -6
  70. openlit/otel/metrics.py +98 -7
  71. openlit/semcov/__init__.py +113 -80
  72. {openlit-1.33.8.dist-info → openlit-1.33.9.dist-info}/METADATA +1 -1
  73. openlit-1.33.9.dist-info/RECORD +121 -0
  74. {openlit-1.33.8.dist-info → openlit-1.33.9.dist-info}/WHEEL +1 -1
  75. openlit/instrumentation/openai/async_azure_openai.py +0 -900
  76. openlit/instrumentation/openai/azure_openai.py +0 -898
  77. openlit-1.33.8.dist-info/RECORD +0 -122
  78. {openlit-1.33.8.dist-info → openlit-1.33.9.dist-info}/LICENSE +0 -0
@@ -58,8 +58,8 @@ def phidata_wrap(gen_ai_endpoint, version, environment, application_name,
58
58
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
59
59
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
60
60
  SemanticConvetion.GEN_AI_SYSTEM_PHIDATA)
61
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
62
- SemanticConvetion.GEN_AI_TYPE_AGENT)
61
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
62
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
63
63
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
64
  gen_ai_endpoint)
65
65
  span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
@@ -76,8 +76,8 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
76
76
  environment)
77
77
  span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
78
78
  application_name)
79
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
80
- SemanticConvetion.GEN_AI_TYPE_VECTORDB)
79
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
80
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB)
81
81
  span.set_attribute(SemanticConvetion.DB_SYSTEM,
82
82
  SemanticConvetion.DB_SYSTEM_PINECONE)
83
83
 
@@ -152,8 +152,8 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
152
152
  SemanticConvetion.DB_SYSTEM_PINECONE,
153
153
  SemanticConvetion.GEN_AI_ENVIRONMENT:
154
154
  environment,
155
- SemanticConvetion.GEN_AI_TYPE:
156
- SemanticConvetion.GEN_AI_TYPE_VECTORDB,
155
+ SemanticConvetion.GEN_AI_OPERATION:
156
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB,
157
157
  SemanticConvetion.DB_OPERATION:
158
158
  db_operation
159
159
  }
@@ -113,8 +113,8 @@ def chat(gen_ai_endpoint, version, environment, application_name,
113
113
  self._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
114
114
  self._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
115
115
  SemanticConvetion.GEN_AI_SYSTEM_PREMAI)
116
- self._span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
117
- SemanticConvetion.GEN_AI_TYPE_CHAT)
116
+ self._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
117
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
118
118
  self._span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
119
119
  gen_ai_endpoint)
120
120
  self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
@@ -141,9 +141,9 @@ def chat(gen_ai_endpoint, version, environment, application_name,
141
141
  self._kwargs.get("seed", ""))
142
142
  self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
143
143
  True)
144
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
144
+ self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
145
145
  prompt_tokens)
146
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
146
+ self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
147
147
  completion_tokens)
148
148
  self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
149
149
  prompt_tokens + completion_tokens)
@@ -175,8 +175,8 @@ def chat(gen_ai_endpoint, version, environment, application_name,
175
175
  SemanticConvetion.GEN_AI_SYSTEM_PREMAI,
176
176
  SemanticConvetion.GEN_AI_ENVIRONMENT:
177
177
  environment,
178
- SemanticConvetion.GEN_AI_TYPE:
179
- SemanticConvetion.GEN_AI_TYPE_CHAT,
178
+ SemanticConvetion.GEN_AI_OPERATION:
179
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
180
180
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
181
181
  self._kwargs.get("model", "gpt-3.5-turbo")
182
182
  }
@@ -255,8 +255,8 @@ def chat(gen_ai_endpoint, version, environment, application_name,
255
255
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
256
256
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
257
257
  SemanticConvetion.GEN_AI_SYSTEM_PREMAI)
258
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
259
- SemanticConvetion.GEN_AI_TYPE_CHAT)
258
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
259
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
260
260
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
261
261
  gen_ai_endpoint)
262
262
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
@@ -298,9 +298,9 @@ def chat(gen_ai_endpoint, version, environment, application_name,
298
298
  pricing_info, response_dict.usage.prompt_tokens,
299
299
  response_dict.usage.completion_tokens)
300
300
 
301
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
301
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
302
302
  response_dict.usage.prompt_tokens)
303
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
303
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
304
304
  response_dict.usage.completion_tokens)
305
305
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
306
306
  response_dict.usage.total_tokens)
@@ -347,9 +347,9 @@ def chat(gen_ai_endpoint, version, environment, application_name,
347
347
  SemanticConvetion.GEN_AI_CONTENT_COMPLETION: "Function called with tools",
348
348
  },
349
349
  )
350
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
350
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
351
351
  response_dict.usage.prompt_tokens)
352
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
352
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
353
353
  response_dict.usage.completion_tokens)
354
354
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
355
355
  response_dict.usage.total_tokens)
@@ -368,8 +368,8 @@ def chat(gen_ai_endpoint, version, environment, application_name,
368
368
  SemanticConvetion.GEN_AI_SYSTEM_PREMAI,
369
369
  SemanticConvetion.GEN_AI_ENVIRONMENT:
370
370
  environment,
371
- SemanticConvetion.GEN_AI_TYPE:
372
- SemanticConvetion.GEN_AI_TYPE_CHAT,
371
+ SemanticConvetion.GEN_AI_OPERATION:
372
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
373
373
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
374
374
  kwargs.get("model", "gpt-3.5-turbo")
375
375
  }
@@ -439,8 +439,8 @@ def embedding(gen_ai_endpoint, version, environment, application_name,
439
439
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
440
440
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
441
441
  SemanticConvetion.GEN_AI_SYSTEM_PREMAI)
442
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
443
- SemanticConvetion.GEN_AI_TYPE_EMBEDDING)
442
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
443
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_EMBEDDING)
444
444
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
445
445
  gen_ai_endpoint)
446
446
  span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
@@ -449,13 +449,13 @@ def embedding(gen_ai_endpoint, version, environment, application_name,
449
449
  application_name)
450
450
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
451
451
  kwargs.get("model", "text-embedding-3-large"))
452
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_EMBEDDING_FORMAT,
452
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_ENCODING_FORMATS,
453
453
  kwargs.get("encoding_format", "float"))
454
454
  # span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_EMBEDDING_DIMENSION,
455
455
  # kwargs.get("dimensions", "null"))
456
456
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
457
457
  kwargs.get("user", ""))
458
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
458
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
459
459
  response_dict.usage.prompt_tokens)
460
460
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
461
461
  response_dict.usage.total_tokens)
@@ -481,8 +481,8 @@ def embedding(gen_ai_endpoint, version, environment, application_name,
481
481
  SemanticConvetion.GEN_AI_SYSTEM_PREMAI,
482
482
  SemanticConvetion.GEN_AI_ENVIRONMENT:
483
483
  environment,
484
- SemanticConvetion.GEN_AI_TYPE:
485
- SemanticConvetion.GEN_AI_TYPE_EMBEDDING,
484
+ SemanticConvetion.GEN_AI_OPERATION:
485
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_EMBEDDING,
486
486
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
487
487
  kwargs.get("model", "text-embedding-ada-002")
488
488
  }
@@ -77,8 +77,8 @@ def async_general_wrap(gen_ai_endpoint, version, environment, application_name,
77
77
  environment)
78
78
  span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
79
79
  application_name)
80
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
81
- SemanticConvetion.GEN_AI_TYPE_VECTORDB)
80
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
81
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB)
82
82
  span.set_attribute(SemanticConvetion.DB_SYSTEM,
83
83
  SemanticConvetion.DB_SYSTEM_QDRANT)
84
84
 
@@ -247,8 +247,8 @@ def async_general_wrap(gen_ai_endpoint, version, environment, application_name,
247
247
  SemanticConvetion.DB_SYSTEM_QDRANT,
248
248
  SemanticConvetion.GEN_AI_ENVIRONMENT:
249
249
  environment,
250
- SemanticConvetion.GEN_AI_TYPE:
251
- SemanticConvetion.GEN_AI_TYPE_VECTORDB,
250
+ SemanticConvetion.GEN_AI_OPERATION:
251
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB,
252
252
  SemanticConvetion.DB_OPERATION:
253
253
  db_operation
254
254
  }
@@ -77,8 +77,8 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
77
77
  environment)
78
78
  span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
79
79
  application_name)
80
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
81
- SemanticConvetion.GEN_AI_TYPE_VECTORDB)
80
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
81
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB)
82
82
  span.set_attribute(SemanticConvetion.DB_SYSTEM,
83
83
  SemanticConvetion.DB_SYSTEM_QDRANT)
84
84
 
@@ -254,8 +254,8 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
254
254
  SemanticConvetion.DB_SYSTEM_QDRANT,
255
255
  SemanticConvetion.GEN_AI_ENVIRONMENT:
256
256
  environment,
257
- SemanticConvetion.GEN_AI_TYPE:
258
- SemanticConvetion.GEN_AI_TYPE_VECTORDB,
257
+ SemanticConvetion.GEN_AI_OPERATION:
258
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB,
259
259
  SemanticConvetion.DB_OPERATION:
260
260
  db_operation
261
261
  }
@@ -77,8 +77,8 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
77
77
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
78
78
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
79
79
  SemanticConvetion.GEN_AI_SYSTEM_REKAAI)
80
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
81
- SemanticConvetion.GEN_AI_TYPE_CHAT)
80
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
81
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
82
82
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
83
83
  gen_ai_endpoint)
84
84
  span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
@@ -111,9 +111,9 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
111
111
  cost = get_chat_model_cost(kwargs.get("model", "reka-core"),
112
112
  pricing_info, prompt_tokens, completion_tokens)
113
113
 
114
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
114
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
115
115
  prompt_tokens)
116
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
116
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
117
117
  completion_tokens)
118
118
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
119
119
  total_tokens)
@@ -134,8 +134,8 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
134
134
  SemanticConvetion.GEN_AI_SYSTEM_REKAAI,
135
135
  SemanticConvetion.GEN_AI_ENVIRONMENT:
136
136
  environment,
137
- SemanticConvetion.GEN_AI_TYPE:
138
- SemanticConvetion.GEN_AI_TYPE_CHAT,
137
+ SemanticConvetion.GEN_AI_OPERATION:
138
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
139
139
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
140
140
  kwargs.get("model", "reka-core")
141
141
  }
@@ -77,8 +77,8 @@ def chat(gen_ai_endpoint, version, environment, application_name,
77
77
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
78
78
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
79
79
  SemanticConvetion.GEN_AI_SYSTEM_REKAAI)
80
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
81
- SemanticConvetion.GEN_AI_TYPE_CHAT)
80
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
81
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
82
82
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
83
83
  gen_ai_endpoint)
84
84
  span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
@@ -111,9 +111,9 @@ def chat(gen_ai_endpoint, version, environment, application_name,
111
111
  cost = get_chat_model_cost(kwargs.get("model", "reka-core"),
112
112
  pricing_info, prompt_tokens, completion_tokens)
113
113
 
114
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
114
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
115
115
  prompt_tokens)
116
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
116
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
117
117
  completion_tokens)
118
118
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
119
119
  total_tokens)
@@ -134,8 +134,8 @@ def chat(gen_ai_endpoint, version, environment, application_name,
134
134
  SemanticConvetion.GEN_AI_SYSTEM_REKAAI,
135
135
  SemanticConvetion.GEN_AI_ENVIRONMENT:
136
136
  environment,
137
- SemanticConvetion.GEN_AI_TYPE:
138
- SemanticConvetion.GEN_AI_TYPE_CHAT,
137
+ SemanticConvetion.GEN_AI_OPERATION:
138
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
139
139
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
140
140
  kwargs.get("model", "reka-core")
141
141
  }
@@ -129,8 +129,8 @@ def async_completion(gen_ai_endpoint, version, environment, application_name,
129
129
  self._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
130
130
  self._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
131
131
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
132
- self._span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
133
- SemanticConvetion.GEN_AI_TYPE_CHAT)
132
+ self._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
133
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
134
134
  self._span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
135
135
  gen_ai_endpoint)
136
136
  self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
@@ -160,9 +160,9 @@ def async_completion(gen_ai_endpoint, version, environment, application_name,
160
160
  self._kwargs.get("seed", ""))
161
161
  self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
162
162
  True)
163
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
163
+ self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
164
164
  self._prompt_tokens)
165
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
165
+ self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
166
166
  self._completion_tokens)
167
167
  self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
168
168
  self._total_tokens)
@@ -194,8 +194,8 @@ def async_completion(gen_ai_endpoint, version, environment, application_name,
194
194
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
195
195
  SemanticConvetion.GEN_AI_ENVIRONMENT:
196
196
  environment,
197
- SemanticConvetion.GEN_AI_TYPE:
198
- SemanticConvetion.GEN_AI_TYPE_CHAT,
197
+ SemanticConvetion.GEN_AI_OPERATION:
198
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
199
199
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
200
200
  self._kwargs.get("model",
201
201
  "meta-llama/Llama-3.3-70B-Instruct-Turbo")
@@ -280,8 +280,8 @@ def async_completion(gen_ai_endpoint, version, environment, application_name,
280
280
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
281
281
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
282
282
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
283
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
284
- SemanticConvetion.GEN_AI_TYPE_CHAT)
283
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
284
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
285
285
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
286
286
  gen_ai_endpoint)
287
287
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
@@ -328,9 +328,9 @@ def async_completion(gen_ai_endpoint, version, environment, application_name,
328
328
  response_dict.get('usage', {}).get('prompt_tokens', None),
329
329
  response_dict.get('usage', {}).get('completion_tokens', None))
330
330
 
331
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
331
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
332
332
  response_dict.get('usage', {}).get('prompt_tokens', None))
333
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
333
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
334
334
  response_dict.get('usage', {}).get('completion_tokens', None))
335
335
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
336
336
  response_dict.get('usage', {}).get('total_tokens', None))
@@ -380,9 +380,9 @@ def async_completion(gen_ai_endpoint, version, environment, application_name,
380
380
  SemanticConvetion.GEN_AI_CONTENT_COMPLETION: "Function called with tools",
381
381
  },
382
382
  )
383
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
383
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
384
384
  response_dict.get('usage').get('prompt_tokens'))
385
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
385
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
386
386
  response_dict.get('usage').get('completion_tokens'))
387
387
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
388
388
  response_dict.get('usage').get('total_tokens'))
@@ -401,8 +401,8 @@ def async_completion(gen_ai_endpoint, version, environment, application_name,
401
401
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
402
402
  SemanticConvetion.GEN_AI_ENVIRONMENT:
403
403
  environment,
404
- SemanticConvetion.GEN_AI_TYPE:
405
- SemanticConvetion.GEN_AI_TYPE_CHAT,
404
+ SemanticConvetion.GEN_AI_OPERATION:
405
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
406
406
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
407
407
  kwargs.get("model", "meta-llama/Llama-3.3-70B-Instruct-Turbo")
408
408
  }
@@ -489,8 +489,8 @@ def async_image_generate(gen_ai_endpoint, version, environment, application_name
489
489
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
490
490
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
491
491
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
492
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
493
- SemanticConvetion.GEN_AI_TYPE_IMAGE)
492
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
493
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_IMAGE)
494
494
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
495
495
  gen_ai_endpoint)
496
496
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
@@ -536,8 +536,8 @@ def async_image_generate(gen_ai_endpoint, version, environment, application_name
536
536
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
537
537
  SemanticConvetion.GEN_AI_ENVIRONMENT:
538
538
  environment,
539
- SemanticConvetion.GEN_AI_TYPE:
540
- SemanticConvetion.GEN_AI_TYPE_IMAGE,
539
+ SemanticConvetion.GEN_AI_OPERATION:
540
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_IMAGE,
541
541
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
542
542
  kwargs.get("model", "black-forest-labs/FLUX.1-dev")
543
543
  }
@@ -129,8 +129,8 @@ def completion(gen_ai_endpoint, version, environment, application_name,
129
129
  self._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
130
130
  self._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
131
131
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
132
- self._span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
133
- SemanticConvetion.GEN_AI_TYPE_CHAT)
132
+ self._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
133
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
134
134
  self._span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
135
135
  gen_ai_endpoint)
136
136
  self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
@@ -160,9 +160,9 @@ def completion(gen_ai_endpoint, version, environment, application_name,
160
160
  self._kwargs.get("seed", ""))
161
161
  self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
162
162
  True)
163
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
163
+ self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
164
164
  self._prompt_tokens)
165
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
165
+ self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
166
166
  self._completion_tokens)
167
167
  self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
168
168
  self._total_tokens)
@@ -194,8 +194,8 @@ def completion(gen_ai_endpoint, version, environment, application_name,
194
194
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
195
195
  SemanticConvetion.GEN_AI_ENVIRONMENT:
196
196
  environment,
197
- SemanticConvetion.GEN_AI_TYPE:
198
- SemanticConvetion.GEN_AI_TYPE_CHAT,
197
+ SemanticConvetion.GEN_AI_OPERATION:
198
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
199
199
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
200
200
  self._kwargs.get("model",
201
201
  "meta-llama/Llama-3.3-70B-Instruct-Turbo")
@@ -280,8 +280,8 @@ def completion(gen_ai_endpoint, version, environment, application_name,
280
280
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
281
281
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
282
282
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
283
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
284
- SemanticConvetion.GEN_AI_TYPE_CHAT)
283
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
284
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
285
285
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
286
286
  gen_ai_endpoint)
287
287
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
@@ -328,9 +328,9 @@ def completion(gen_ai_endpoint, version, environment, application_name,
328
328
  response_dict.get('usage', {}).get('prompt_tokens', None),
329
329
  response_dict.get('usage', {}).get('completion_tokens', None))
330
330
 
331
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
331
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
332
332
  response_dict.get('usage', {}).get('prompt_tokens', None))
333
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
333
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
334
334
  response_dict.get('usage', {}).get('completion_tokens', None))
335
335
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
336
336
  response_dict.get('usage', {}).get('total_tokens', None))
@@ -380,9 +380,9 @@ def completion(gen_ai_endpoint, version, environment, application_name,
380
380
  SemanticConvetion.GEN_AI_CONTENT_COMPLETION: "Function called with tools",
381
381
  },
382
382
  )
383
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
383
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
384
384
  response_dict.get('usage').get('prompt_tokens'))
385
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
385
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
386
386
  response_dict.get('usage').get('completion_tokens'))
387
387
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
388
388
  response_dict.get('usage').get('total_tokens'))
@@ -401,8 +401,8 @@ def completion(gen_ai_endpoint, version, environment, application_name,
401
401
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
402
402
  SemanticConvetion.GEN_AI_ENVIRONMENT:
403
403
  environment,
404
- SemanticConvetion.GEN_AI_TYPE:
405
- SemanticConvetion.GEN_AI_TYPE_CHAT,
404
+ SemanticConvetion.GEN_AI_OPERATION:
405
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
406
406
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
407
407
  kwargs.get("model", "meta-llama/Llama-3.3-70B-Instruct-Turbo")
408
408
  }
@@ -489,8 +489,8 @@ def image_generate(gen_ai_endpoint, version, environment, application_name,
489
489
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
490
490
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
491
491
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER)
492
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
493
- SemanticConvetion.GEN_AI_TYPE_IMAGE)
492
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
493
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_IMAGE)
494
494
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
495
495
  gen_ai_endpoint)
496
496
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
@@ -536,8 +536,8 @@ def image_generate(gen_ai_endpoint, version, environment, application_name,
536
536
  SemanticConvetion.GEN_AI_SYSTEM_TOGETHER,
537
537
  SemanticConvetion.GEN_AI_ENVIRONMENT:
538
538
  environment,
539
- SemanticConvetion.GEN_AI_TYPE:
540
- SemanticConvetion.GEN_AI_TYPE_IMAGE,
539
+ SemanticConvetion.GEN_AI_OPERATION:
540
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_IMAGE,
541
541
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
542
542
  kwargs.get("model", "black-forest-labs/FLUX.1-dev")
543
543
  }
@@ -78,8 +78,8 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
78
78
  environment)
79
79
  span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
80
80
  application_name)
81
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
82
- SemanticConvetion.GEN_AI_TYPE_CHAT)
81
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
82
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
83
83
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
84
84
  instance.model.config.name_or_path)
85
85
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
@@ -95,7 +95,7 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
95
95
  SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
96
96
  },
97
97
  )
98
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
98
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
99
99
  prompt_tokens)
100
100
 
101
101
  i = 0
@@ -122,7 +122,7 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
122
122
  completion_tokens += general_tokens(llm_response)
123
123
 
124
124
  i=i+1
125
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
125
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
126
126
  completion_tokens)
127
127
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
128
128
  prompt_tokens + completion_tokens)
@@ -138,8 +138,8 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
138
138
  SemanticConvetion.GEN_AI_SYSTEM_HUGGING_FACE,
139
139
  SemanticConvetion.GEN_AI_ENVIRONMENT:
140
140
  environment,
141
- SemanticConvetion.GEN_AI_TYPE:
142
- SemanticConvetion.GEN_AI_TYPE_CHAT,
141
+ SemanticConvetion.GEN_AI_OPERATION:
142
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
143
143
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
144
144
  instance.model.config.name_or_path
145
145
  }