openlit 1.33.19__py3-none-any.whl → 1.33.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. openlit/__helpers.py +7 -7
  2. openlit/__init__.py +3 -3
  3. openlit/evals/utils.py +7 -7
  4. openlit/guard/utils.py +7 -7
  5. openlit/instrumentation/ag2/ag2.py +24 -24
  6. openlit/instrumentation/ai21/ai21.py +3 -3
  7. openlit/instrumentation/ai21/async_ai21.py +3 -3
  8. openlit/instrumentation/ai21/utils.py +59 -59
  9. openlit/instrumentation/anthropic/anthropic.py +2 -2
  10. openlit/instrumentation/anthropic/async_anthropic.py +2 -2
  11. openlit/instrumentation/anthropic/utils.py +34 -34
  12. openlit/instrumentation/assemblyai/assemblyai.py +24 -24
  13. openlit/instrumentation/astra/astra.py +3 -3
  14. openlit/instrumentation/astra/async_astra.py +3 -3
  15. openlit/instrumentation/astra/utils.py +39 -39
  16. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +2 -2
  17. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +2 -2
  18. openlit/instrumentation/azure_ai_inference/utils.py +36 -36
  19. openlit/instrumentation/bedrock/bedrock.py +2 -2
  20. openlit/instrumentation/bedrock/utils.py +35 -35
  21. openlit/instrumentation/chroma/chroma.py +57 -57
  22. openlit/instrumentation/cohere/async_cohere.py +88 -88
  23. openlit/instrumentation/cohere/cohere.py +88 -88
  24. openlit/instrumentation/controlflow/controlflow.py +15 -15
  25. openlit/instrumentation/crawl4ai/async_crawl4ai.py +14 -14
  26. openlit/instrumentation/crawl4ai/crawl4ai.py +14 -14
  27. openlit/instrumentation/crewai/crewai.py +22 -22
  28. openlit/instrumentation/dynamiq/dynamiq.py +19 -19
  29. openlit/instrumentation/elevenlabs/async_elevenlabs.py +24 -25
  30. openlit/instrumentation/elevenlabs/elevenlabs.py +23 -25
  31. openlit/instrumentation/embedchain/embedchain.py +15 -15
  32. openlit/instrumentation/firecrawl/firecrawl.py +10 -10
  33. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +33 -33
  34. openlit/instrumentation/google_ai_studio/google_ai_studio.py +33 -33
  35. openlit/instrumentation/gpt4all/gpt4all.py +78 -78
  36. openlit/instrumentation/gpu/__init__.py +8 -8
  37. openlit/instrumentation/groq/async_groq.py +74 -74
  38. openlit/instrumentation/groq/groq.py +74 -74
  39. openlit/instrumentation/haystack/haystack.py +6 -6
  40. openlit/instrumentation/julep/async_julep.py +14 -14
  41. openlit/instrumentation/julep/julep.py +14 -14
  42. openlit/instrumentation/langchain/async_langchain.py +39 -39
  43. openlit/instrumentation/langchain/langchain.py +39 -39
  44. openlit/instrumentation/letta/letta.py +26 -26
  45. openlit/instrumentation/litellm/async_litellm.py +94 -94
  46. openlit/instrumentation/litellm/litellm.py +94 -94
  47. openlit/instrumentation/llamaindex/llamaindex.py +7 -7
  48. openlit/instrumentation/mem0/mem0.py +13 -13
  49. openlit/instrumentation/milvus/milvus.py +47 -47
  50. openlit/instrumentation/mistral/async_mistral.py +88 -88
  51. openlit/instrumentation/mistral/mistral.py +88 -88
  52. openlit/instrumentation/multion/async_multion.py +21 -21
  53. openlit/instrumentation/multion/multion.py +21 -21
  54. openlit/instrumentation/ollama/async_ollama.py +3 -3
  55. openlit/instrumentation/ollama/ollama.py +3 -3
  56. openlit/instrumentation/ollama/utils.py +50 -50
  57. openlit/instrumentation/openai/async_openai.py +225 -225
  58. openlit/instrumentation/openai/openai.py +225 -225
  59. openlit/instrumentation/openai_agents/openai_agents.py +11 -11
  60. openlit/instrumentation/phidata/phidata.py +15 -15
  61. openlit/instrumentation/pinecone/pinecone.py +43 -43
  62. openlit/instrumentation/premai/premai.py +86 -86
  63. openlit/instrumentation/qdrant/async_qdrant.py +95 -95
  64. openlit/instrumentation/qdrant/qdrant.py +99 -99
  65. openlit/instrumentation/reka/async_reka.py +33 -33
  66. openlit/instrumentation/reka/reka.py +33 -33
  67. openlit/instrumentation/together/async_together.py +90 -90
  68. openlit/instrumentation/together/together.py +90 -90
  69. openlit/instrumentation/transformers/transformers.py +26 -26
  70. openlit/instrumentation/vertexai/async_vertexai.py +64 -64
  71. openlit/instrumentation/vertexai/vertexai.py +64 -64
  72. openlit/instrumentation/vllm/vllm.py +24 -24
  73. openlit/otel/metrics.py +11 -11
  74. openlit/semcov/__init__.py +3 -3
  75. {openlit-1.33.19.dist-info → openlit-1.33.20.dist-info}/METADATA +8 -8
  76. openlit-1.33.20.dist-info/RECORD +131 -0
  77. {openlit-1.33.19.dist-info → openlit-1.33.20.dist-info}/WHEEL +1 -1
  78. openlit-1.33.19.dist-info/RECORD +0 -131
  79. {openlit-1.33.19.dist-info → openlit-1.33.20.dist-info}/LICENSE +0 -0
@@ -16,7 +16,7 @@ from openlit.__helpers import (
16
16
  otel_event,
17
17
  concatenate_all_contents
18
18
  )
19
- from openlit.semcov import SemanticConvetion
19
+ from openlit.semcov import SemanticConvention
20
20
 
21
21
  def process_chunk(self, chunk):
22
22
  """
@@ -76,20 +76,20 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
76
76
 
77
77
  # Set Span attributes (OTel Semconv)
78
78
  scope._span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
79
- scope._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION, SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
80
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM, SemanticConvetion.GEN_AI_SYSTEM_AWS_BEDROCK)
81
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL, request_model)
82
- scope._span.set_attribute(SemanticConvetion.SERVER_PORT, scope._server_port)
79
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OPERATION, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT)
80
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_AWS_BEDROCK)
81
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
82
+ scope._span.set_attribute(SemanticConvention.SERVER_PORT, scope._server_port)
83
83
 
84
84
  # List of attributes and their config keys
85
85
  attributes = [
86
- (SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY, 'frequencyPenalty'),
87
- (SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS, 'maxTokens'),
88
- (SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY, 'presencePenalty'),
89
- (SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES, 'stopSequences'),
90
- (SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE, 'temperature'),
91
- (SemanticConvetion.GEN_AI_REQUEST_TOP_P, 'topP'),
92
- (SemanticConvetion.GEN_AI_REQUEST_TOP_K, 'topK'),
86
+ (SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY, 'frequencyPenalty'),
87
+ (SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, 'maxTokens'),
88
+ (SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, 'presencePenalty'),
89
+ (SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, 'stopSequences'),
90
+ (SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, 'temperature'),
91
+ (SemanticConvention.GEN_AI_REQUEST_TOP_P, 'topP'),
92
+ (SemanticConvention.GEN_AI_REQUEST_TOP_K, 'topK'),
93
93
  ]
94
94
 
95
95
  # Set each attribute if the corresponding value exists and is not None
@@ -98,38 +98,38 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
98
98
  if value is not None:
99
99
  scope._span.set_attribute(attribute, value)
100
100
 
101
- scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
102
- scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID, scope._response_id)
103
- scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL, scope._response_model)
104
- scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
105
- scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
106
- scope._span.set_attribute(SemanticConvetion.SERVER_ADDRESS, scope._server_address)
101
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
102
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
103
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, scope._response_model)
104
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
105
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
106
+ scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, scope._server_address)
107
107
 
108
- scope._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
108
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
109
109
  'text' if isinstance(scope._llmresponse, str) else 'json')
110
110
 
111
111
  scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
112
112
  scope._span.set_attribute(SERVICE_NAME, application_name)
113
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM, is_stream)
114
- scope._span.set_attribute(SemanticConvetion.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
115
- scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST, cost)
116
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TBT, scope._tbt)
117
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT, scope._ttft)
118
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION, version)
113
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
114
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
115
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
116
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT, scope._tbt)
117
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, scope._ttft)
118
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
119
119
 
120
120
  # To be removed one the change to log events (from span events) is complete
121
121
  prompt = concatenate_all_contents(formatted_messages)
122
122
  if capture_message_content:
123
123
  scope._span.add_event(
124
- name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
124
+ name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
125
125
  attributes={
126
- SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
126
+ SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt,
127
127
  },
128
128
  )
129
129
  scope._span.add_event(
130
- name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
130
+ name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
131
131
  attributes={
132
- SemanticConvetion.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
132
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
133
133
  },
134
134
  )
135
135
 
@@ -146,9 +146,9 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
146
146
  for role in ['user', 'system', 'assistant', 'tool']:
147
147
  if formatted_messages.get(role, {}).get('content', ''):
148
148
  event = otel_event(
149
- name=getattr(SemanticConvetion, f'GEN_AI_{role.upper()}_MESSAGE'),
149
+ name=getattr(SemanticConvention, f'GEN_AI_{role.upper()}_MESSAGE'),
150
150
  attributes={
151
- SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_AWS_BEDROCK
151
+ SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AWS_BEDROCK
152
152
  },
153
153
  body = {
154
154
  # pylint: disable=line-too-long
@@ -173,9 +173,9 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
173
173
  event_provider.emit(event)
174
174
 
175
175
  choice_event = otel_event(
176
- name=SemanticConvetion.GEN_AI_CHOICE,
176
+ name=SemanticConvention.GEN_AI_CHOICE,
177
177
  attributes={
178
- SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_AWS_BEDROCK
178
+ SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AWS_BEDROCK
179
179
  },
180
180
  body=choice_event_body
181
181
  )
@@ -187,8 +187,8 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
187
187
  metrics_attributes = create_metrics_attributes(
188
188
  service_name=application_name,
189
189
  deployment_environment=environment,
190
- operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
191
- system=SemanticConvetion.GEN_AI_SYSTEM_AWS_BEDROCK,
190
+ operation=SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
191
+ system=SemanticConvention.GEN_AI_SYSTEM_AWS_BEDROCK,
192
192
  request_model=request_model,
193
193
  server_address=scope._server_address,
194
194
  server_port=scope._server_port,
@@ -7,7 +7,7 @@ import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
8
  from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
- from openlit.semcov import SemanticConvetion
10
+ from openlit.semcov import SemanticConvention
11
11
 
12
12
  # Initialize logger for logging potential issues and operations
13
13
  logger = logging.getLogger(__name__)
@@ -71,101 +71,101 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
71
71
 
72
72
  try:
73
73
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
74
- span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
74
+ span.set_attribute(SemanticConvention.GEN_AI_ENDPOINT,
75
75
  gen_ai_endpoint)
76
76
  span.set_attribute(DEPLOYMENT_ENVIRONMENT,
77
77
  environment)
78
78
  span.set_attribute(SERVICE_NAME,
79
79
  application_name)
80
- span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
81
- SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB)
82
- span.set_attribute(SemanticConvetion.DB_SYSTEM_NAME,
83
- SemanticConvetion.DB_SYSTEM_CHROMA)
84
- span.set_attribute(SemanticConvetion.DB_COLLECTION_NAME,
80
+ span.set_attribute(SemanticConvention.GEN_AI_OPERATION,
81
+ SemanticConvention.GEN_AI_OPERATION_TYPE_VECTORDB)
82
+ span.set_attribute(SemanticConvention.DB_SYSTEM_NAME,
83
+ SemanticConvention.DB_SYSTEM_CHROMA)
84
+ span.set_attribute(SemanticConvention.DB_COLLECTION_NAME,
85
85
  instance.name)
86
86
 
87
87
  if gen_ai_endpoint == "chroma.add":
88
- db_operation = SemanticConvetion.DB_OPERATION_ADD
89
- span.set_attribute(SemanticConvetion.DB_OPERATION_NAME,
90
- SemanticConvetion.DB_OPERATION_ADD)
91
- span.set_attribute(SemanticConvetion.DB_ID_COUNT,
88
+ db_operation = SemanticConvention.DB_OPERATION_ADD
89
+ span.set_attribute(SemanticConvention.DB_OPERATION_NAME,
90
+ SemanticConvention.DB_OPERATION_ADD)
91
+ span.set_attribute(SemanticConvention.DB_ID_COUNT,
92
92
  object_count(kwargs.get("ids", [])))
93
- span.set_attribute(SemanticConvetion.DB_VECTOR_COUNT,
93
+ span.set_attribute(SemanticConvention.DB_VECTOR_COUNT,
94
94
  object_count(kwargs.get("embeddings", [])))
95
- span.set_attribute(SemanticConvetion.DB_VECTOR_COUNT,
95
+ span.set_attribute(SemanticConvention.DB_VECTOR_COUNT,
96
96
  object_count(kwargs.get("metadatas", [])))
97
- span.set_attribute(SemanticConvetion.DB_DOCUMENTS_COUNT,
97
+ span.set_attribute(SemanticConvention.DB_DOCUMENTS_COUNT,
98
98
  object_count(kwargs.get("documents", [])))
99
99
 
100
100
  elif gen_ai_endpoint == "chroma.get":
101
- db_operation = SemanticConvetion.DB_OPERATION_GET
102
- span.set_attribute(SemanticConvetion.DB_OPERATION_NAME,
103
- SemanticConvetion.DB_OPERATION_GET)
104
- span.set_attribute(SemanticConvetion.DB_ID_COUNT,
101
+ db_operation = SemanticConvention.DB_OPERATION_GET
102
+ span.set_attribute(SemanticConvention.DB_OPERATION_NAME,
103
+ SemanticConvention.DB_OPERATION_GET)
104
+ span.set_attribute(SemanticConvention.DB_ID_COUNT,
105
105
  object_count(kwargs.get("ids", [])))
106
- span.set_attribute(SemanticConvetion.DB_QUERY_LIMIT,
106
+ span.set_attribute(SemanticConvention.DB_QUERY_LIMIT,
107
107
  kwargs.get("limit", ""))
108
- span.set_attribute(SemanticConvetion.DB_OFFSET,
108
+ span.set_attribute(SemanticConvention.DB_OFFSET,
109
109
  kwargs.get("offset", ""))
110
- span.set_attribute(SemanticConvetion.DB_WHERE_DOCUMENT,
110
+ span.set_attribute(SemanticConvention.DB_WHERE_DOCUMENT,
111
111
  str(kwargs.get("where_document", "")))
112
112
 
113
113
  elif gen_ai_endpoint == "chroma.query":
114
- db_operation = SemanticConvetion.DB_OPERATION_QUERY
115
- span.set_attribute(SemanticConvetion.DB_OPERATION_NAME,
116
- SemanticConvetion.DB_OPERATION_QUERY)
117
- span.set_attribute(SemanticConvetion.DB_STATEMENT,
114
+ db_operation = SemanticConvention.DB_OPERATION_QUERY
115
+ span.set_attribute(SemanticConvention.DB_OPERATION_NAME,
116
+ SemanticConvention.DB_OPERATION_QUERY)
117
+ span.set_attribute(SemanticConvention.DB_STATEMENT,
118
118
  str(kwargs.get("query_texts", "")))
119
- span.set_attribute(SemanticConvetion.DB_N_RESULTS,
119
+ span.set_attribute(SemanticConvention.DB_N_RESULTS,
120
120
  kwargs.get("n_results", ""))
121
- span.set_attribute(SemanticConvetion.DB_FILTER,
121
+ span.set_attribute(SemanticConvention.DB_FILTER,
122
122
  str(kwargs.get("where", "")))
123
- span.set_attribute(SemanticConvetion.DB_WHERE_DOCUMENT,
123
+ span.set_attribute(SemanticConvention.DB_WHERE_DOCUMENT,
124
124
  str(kwargs.get("where_document", "")))
125
125
 
126
126
  elif gen_ai_endpoint == "chroma.update":
127
- db_operation = SemanticConvetion.DB_OPERATION_UPDATE
128
- span.set_attribute(SemanticConvetion.DB_OPERATION_NAME,
129
- SemanticConvetion.DB_OPERATION_UPDATE)
130
- span.set_attribute(SemanticConvetion.DB_VECTOR_COUNT,
127
+ db_operation = SemanticConvention.DB_OPERATION_UPDATE
128
+ span.set_attribute(SemanticConvention.DB_OPERATION_NAME,
129
+ SemanticConvention.DB_OPERATION_UPDATE)
130
+ span.set_attribute(SemanticConvention.DB_VECTOR_COUNT,
131
131
  object_count(kwargs.get("embeddings", [])))
132
- span.set_attribute(SemanticConvetion.DB_VECTOR_COUNT,
132
+ span.set_attribute(SemanticConvention.DB_VECTOR_COUNT,
133
133
  object_count(kwargs.get("metadatas", [])))
134
- span.set_attribute(SemanticConvetion.DB_ID_COUNT,
134
+ span.set_attribute(SemanticConvention.DB_ID_COUNT,
135
135
  object_count(kwargs.get("ids", [])))
136
- span.set_attribute(SemanticConvetion.DB_DOCUMENTS_COUNT,
136
+ span.set_attribute(SemanticConvention.DB_DOCUMENTS_COUNT,
137
137
  object_count(kwargs.get("documents", [])))
138
138
 
139
139
  elif gen_ai_endpoint == "chroma.upsert":
140
- db_operation = SemanticConvetion.DB_OPERATION_UPSERT
141
- span.set_attribute(SemanticConvetion.DB_OPERATION_NAME,
142
- SemanticConvetion.DB_OPERATION_UPSERT)
143
- span.set_attribute(SemanticConvetion.DB_VECTOR_COUNT,
140
+ db_operation = SemanticConvention.DB_OPERATION_UPSERT
141
+ span.set_attribute(SemanticConvention.DB_OPERATION_NAME,
142
+ SemanticConvention.DB_OPERATION_UPSERT)
143
+ span.set_attribute(SemanticConvention.DB_VECTOR_COUNT,
144
144
  object_count(kwargs.get("embeddings", [])))
145
- span.set_attribute(SemanticConvetion.DB_VECTOR_COUNT,
145
+ span.set_attribute(SemanticConvention.DB_VECTOR_COUNT,
146
146
  object_count(kwargs.get("metadatas", [])))
147
- span.set_attribute(SemanticConvetion.DB_ID_COUNT,
147
+ span.set_attribute(SemanticConvention.DB_ID_COUNT,
148
148
  object_count(kwargs.get("ids", [])))
149
- span.set_attribute(SemanticConvetion.DB_DOCUMENTS_COUNT,
149
+ span.set_attribute(SemanticConvention.DB_DOCUMENTS_COUNT,
150
150
  object_count(kwargs.get("documents", [])))
151
151
 
152
152
  elif gen_ai_endpoint == "chroma.delete":
153
- db_operation = SemanticConvetion.DB_OPERATION_DELETE
154
- span.set_attribute(SemanticConvetion.DB_OPERATION_NAME,
155
- SemanticConvetion.DB_OPERATION_DELETE)
156
- span.set_attribute(SemanticConvetion.DB_ID_COUNT,
153
+ db_operation = SemanticConvention.DB_OPERATION_DELETE
154
+ span.set_attribute(SemanticConvention.DB_OPERATION_NAME,
155
+ SemanticConvention.DB_OPERATION_DELETE)
156
+ span.set_attribute(SemanticConvention.DB_ID_COUNT,
157
157
  object_count(kwargs.get("ids", [])))
158
- span.set_attribute(SemanticConvetion.DB_FILTER,
158
+ span.set_attribute(SemanticConvention.DB_FILTER,
159
159
  str(kwargs.get("where", "")))
160
- span.set_attribute(SemanticConvetion.DB_DELETE_ALL,
160
+ span.set_attribute(SemanticConvention.DB_DELETE_ALL,
161
161
  kwargs.get("delete_all", False))
162
- span.set_attribute(SemanticConvetion.DB_WHERE_DOCUMENT,
162
+ span.set_attribute(SemanticConvention.DB_WHERE_DOCUMENT,
163
163
  str(kwargs.get("where_document", "")))
164
164
 
165
165
  elif gen_ai_endpoint == "chroma.peek":
166
- db_operation = SemanticConvetion.DB_OPERATION_PEEK
167
- span.set_attribute(SemanticConvetion.DB_OPERATION_NAME,
168
- SemanticConvetion.DB_OPERATION_PEEK)
166
+ db_operation = SemanticConvention.DB_OPERATION_PEEK
167
+ span.set_attribute(SemanticConvention.DB_OPERATION_NAME,
168
+ SemanticConvention.DB_OPERATION_PEEK)
169
169
 
170
170
  span.set_status(Status(StatusCode.OK))
171
171
 
@@ -175,13 +175,13 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
175
175
  "openlit",
176
176
  SERVICE_NAME:
177
177
  application_name,
178
- SemanticConvetion.DB_SYSTEM_NAME:
179
- SemanticConvetion.DB_SYSTEM_CHROMA,
178
+ SemanticConvention.DB_SYSTEM_NAME:
179
+ SemanticConvention.DB_SYSTEM_CHROMA,
180
180
  DEPLOYMENT_ENVIRONMENT:
181
181
  environment,
182
- SemanticConvetion.GEN_AI_OPERATION:
183
- SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB,
184
- SemanticConvetion.DB_OPERATION_NAME:
182
+ SemanticConvention.GEN_AI_OPERATION:
183
+ SemanticConvention.GEN_AI_OPERATION_TYPE_VECTORDB,
184
+ SemanticConvention.DB_OPERATION_NAME:
185
185
  db_operation
186
186
  }
187
187