openlit 1.33.18__py3-none-any.whl → 1.33.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. openlit/__helpers.py +11 -41
  2. openlit/__init__.py +3 -3
  3. openlit/evals/utils.py +7 -7
  4. openlit/guard/utils.py +7 -7
  5. openlit/instrumentation/ag2/ag2.py +24 -24
  6. openlit/instrumentation/ai21/ai21.py +3 -3
  7. openlit/instrumentation/ai21/async_ai21.py +3 -3
  8. openlit/instrumentation/ai21/utils.py +59 -59
  9. openlit/instrumentation/anthropic/anthropic.py +2 -2
  10. openlit/instrumentation/anthropic/async_anthropic.py +2 -2
  11. openlit/instrumentation/anthropic/utils.py +34 -34
  12. openlit/instrumentation/assemblyai/assemblyai.py +24 -24
  13. openlit/instrumentation/astra/astra.py +3 -3
  14. openlit/instrumentation/astra/async_astra.py +3 -3
  15. openlit/instrumentation/astra/utils.py +39 -39
  16. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +10 -10
  17. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +10 -10
  18. openlit/instrumentation/azure_ai_inference/utils.py +38 -38
  19. openlit/instrumentation/bedrock/__init__.py +2 -1
  20. openlit/instrumentation/bedrock/bedrock.py +32 -214
  21. openlit/instrumentation/bedrock/utils.py +252 -0
  22. openlit/instrumentation/chroma/chroma.py +57 -57
  23. openlit/instrumentation/cohere/async_cohere.py +88 -88
  24. openlit/instrumentation/cohere/cohere.py +88 -88
  25. openlit/instrumentation/controlflow/controlflow.py +15 -15
  26. openlit/instrumentation/crawl4ai/async_crawl4ai.py +14 -14
  27. openlit/instrumentation/crawl4ai/crawl4ai.py +14 -14
  28. openlit/instrumentation/crewai/crewai.py +22 -22
  29. openlit/instrumentation/dynamiq/dynamiq.py +19 -19
  30. openlit/instrumentation/elevenlabs/async_elevenlabs.py +24 -25
  31. openlit/instrumentation/elevenlabs/elevenlabs.py +23 -25
  32. openlit/instrumentation/embedchain/embedchain.py +15 -15
  33. openlit/instrumentation/firecrawl/firecrawl.py +10 -10
  34. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +33 -33
  35. openlit/instrumentation/google_ai_studio/google_ai_studio.py +33 -33
  36. openlit/instrumentation/gpt4all/gpt4all.py +78 -78
  37. openlit/instrumentation/gpu/__init__.py +8 -8
  38. openlit/instrumentation/groq/async_groq.py +74 -74
  39. openlit/instrumentation/groq/groq.py +74 -74
  40. openlit/instrumentation/haystack/haystack.py +6 -6
  41. openlit/instrumentation/julep/async_julep.py +14 -14
  42. openlit/instrumentation/julep/julep.py +14 -14
  43. openlit/instrumentation/langchain/async_langchain.py +39 -39
  44. openlit/instrumentation/langchain/langchain.py +39 -39
  45. openlit/instrumentation/letta/letta.py +26 -26
  46. openlit/instrumentation/litellm/async_litellm.py +94 -94
  47. openlit/instrumentation/litellm/litellm.py +94 -94
  48. openlit/instrumentation/llamaindex/llamaindex.py +7 -7
  49. openlit/instrumentation/mem0/mem0.py +13 -13
  50. openlit/instrumentation/milvus/milvus.py +47 -47
  51. openlit/instrumentation/mistral/async_mistral.py +88 -88
  52. openlit/instrumentation/mistral/mistral.py +88 -88
  53. openlit/instrumentation/multion/async_multion.py +21 -21
  54. openlit/instrumentation/multion/multion.py +21 -21
  55. openlit/instrumentation/ollama/async_ollama.py +3 -3
  56. openlit/instrumentation/ollama/ollama.py +3 -3
  57. openlit/instrumentation/ollama/utils.py +50 -50
  58. openlit/instrumentation/openai/async_openai.py +225 -225
  59. openlit/instrumentation/openai/openai.py +225 -225
  60. openlit/instrumentation/openai_agents/openai_agents.py +11 -11
  61. openlit/instrumentation/phidata/phidata.py +15 -15
  62. openlit/instrumentation/pinecone/pinecone.py +43 -43
  63. openlit/instrumentation/premai/premai.py +86 -86
  64. openlit/instrumentation/qdrant/async_qdrant.py +95 -95
  65. openlit/instrumentation/qdrant/qdrant.py +99 -99
  66. openlit/instrumentation/reka/async_reka.py +33 -33
  67. openlit/instrumentation/reka/reka.py +33 -33
  68. openlit/instrumentation/together/async_together.py +90 -90
  69. openlit/instrumentation/together/together.py +90 -90
  70. openlit/instrumentation/transformers/transformers.py +26 -26
  71. openlit/instrumentation/vertexai/async_vertexai.py +64 -64
  72. openlit/instrumentation/vertexai/vertexai.py +64 -64
  73. openlit/instrumentation/vllm/vllm.py +24 -24
  74. openlit/otel/metrics.py +11 -11
  75. openlit/semcov/__init__.py +3 -3
  76. {openlit-1.33.18.dist-info → openlit-1.33.20.dist-info}/METADATA +8 -8
  77. openlit-1.33.20.dist-info/RECORD +131 -0
  78. {openlit-1.33.18.dist-info → openlit-1.33.20.dist-info}/WHEEL +1 -1
  79. openlit-1.33.18.dist-info/RECORD +0 -130
  80. {openlit-1.33.18.dist-info → openlit-1.33.20.dist-info}/LICENSE +0 -0
@@ -16,7 +16,7 @@ from openlit.__helpers import (
16
16
  otel_event,
17
17
  concatenate_all_contents
18
18
  )
19
- from openlit.semcov import SemanticConvetion
19
+ from openlit.semcov import SemanticConvention
20
20
 
21
21
  def process_chunk(self, chunk):
22
22
  """
@@ -75,47 +75,47 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
75
75
 
76
76
  # Set Span attributes (OTel Semconv)
77
77
  scope._span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
78
- scope._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION, SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
79
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM, SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC)
80
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL, request_model)
81
- scope._span.set_attribute(SemanticConvetion.SERVER_PORT, scope._server_port)
82
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get('max_tokens', -1))
83
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get('stop_sequences', []))
84
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get('temperature', 1.0))
85
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K, scope._kwargs.get('top_k', 1.0))
86
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P, scope._kwargs.get('top_p', 1.0))
87
- scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
88
- scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID, scope._response_id)
89
- scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL, scope._response_model)
90
- scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
91
- scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
92
- scope._span.set_attribute(SemanticConvetion.SERVER_ADDRESS, scope._server_address)
93
-
94
- scope._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
78
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OPERATION, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT)
79
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_ANTHROPIC)
80
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
81
+ scope._span.set_attribute(SemanticConvention.SERVER_PORT, scope._server_port)
82
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get('max_tokens', -1))
83
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get('stop_sequences', []))
84
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get('temperature', 1.0))
85
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_K, scope._kwargs.get('top_k', 1.0))
86
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, scope._kwargs.get('top_p', 1.0))
87
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
88
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
89
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, scope._response_model)
90
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
91
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
92
+ scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, scope._server_address)
93
+
94
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
95
95
  'text' if isinstance(scope._llmresponse, str) else 'json')
96
96
 
97
97
  scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
98
98
  scope._span.set_attribute(SERVICE_NAME, application_name)
99
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM, is_stream)
100
- scope._span.set_attribute(SemanticConvetion.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
101
- scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST, cost)
102
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TBT, scope._tbt)
103
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT, scope._ttft)
104
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION, version)
99
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
100
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
101
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
102
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT, scope._tbt)
103
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, scope._ttft)
104
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
105
105
 
106
106
  # To be removed one the change to log events (from span events) is complete
107
107
  prompt = concatenate_all_contents(formatted_messages)
108
108
  if capture_message_content:
109
109
  scope._span.add_event(
110
- name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
110
+ name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
111
111
  attributes={
112
- SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
112
+ SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt,
113
113
  },
114
114
  )
115
115
  scope._span.add_event(
116
- name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
116
+ name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
117
117
  attributes={
118
- SemanticConvetion.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
118
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
119
119
  },
120
120
  )
121
121
 
@@ -144,9 +144,9 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
144
144
  for role in ['user', 'system', 'assistant', 'tool']:
145
145
  if formatted_messages.get(role, {}).get('content', ''):
146
146
  event = otel_event(
147
- name=getattr(SemanticConvetion, f'GEN_AI_{role.upper()}_MESSAGE'),
147
+ name=getattr(SemanticConvention, f'GEN_AI_{role.upper()}_MESSAGE'),
148
148
  attributes={
149
- SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC
149
+ SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_ANTHROPIC
150
150
  },
151
151
  body = {
152
152
  # pylint: disable=line-too-long
@@ -171,9 +171,9 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
171
171
  event_provider.emit(event)
172
172
 
173
173
  choice_event = otel_event(
174
- name=SemanticConvetion.GEN_AI_CHOICE,
174
+ name=SemanticConvention.GEN_AI_CHOICE,
175
175
  attributes={
176
- SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC
176
+ SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_ANTHROPIC
177
177
  },
178
178
  body=choice_event_body
179
179
  )
@@ -185,8 +185,8 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
185
185
  metrics_attributes = create_metrics_attributes(
186
186
  service_name=application_name,
187
187
  deployment_environment=environment,
188
- operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
189
- system=SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC,
188
+ operation=SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
189
+ system=SemanticConvention.GEN_AI_SYSTEM_ANTHROPIC,
190
190
  request_model=request_model,
191
191
  server_address=scope._server_address,
192
192
  server_port=scope._server_port,
@@ -13,7 +13,7 @@ from openlit.__helpers import (
13
13
  set_server_address_and_port,
14
14
  otel_event
15
15
  )
16
- from openlit.semcov import SemanticConvetion
16
+ from openlit.semcov import SemanticConvention
17
17
 
18
18
  # Initialize logger for logging potential issues and operations
19
19
  logger = logging.getLogger(__name__)
@@ -33,7 +33,7 @@ def transcribe(version, environment, application_name,
33
33
  server_address, server_port = set_server_address_and_port(instance, 'api.assemblyai.com', 443)
34
34
  request_model = kwargs.get('speech_model', 'best')
35
35
 
36
- span_name = f'{SemanticConvetion.GEN_AI_OPERATION_TYPE_AUDIO} {request_model}'
36
+ span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO} {request_model}'
37
37
 
38
38
  with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
39
39
  start_time = time.time()
@@ -47,19 +47,19 @@ def transcribe(version, environment, application_name,
47
47
 
48
48
  # Set Span attributes (OTel Semconv)
49
49
  span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
50
- span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
51
- SemanticConvetion.GEN_AI_OPERATION_TYPE_AUDIO)
52
- span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
53
- SemanticConvetion.GEN_AI_SYSTEM_ASSEMBLYAI)
54
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
50
+ span.set_attribute(SemanticConvention.GEN_AI_OPERATION,
51
+ SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO)
52
+ span.set_attribute(SemanticConvention.GEN_AI_SYSTEM,
53
+ SemanticConvention.GEN_AI_SYSTEM_ASSEMBLYAI)
54
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
55
55
  request_model)
56
- span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
56
+ span.set_attribute(SemanticConvention.SERVER_ADDRESS,
57
57
  server_address)
58
- span.set_attribute(SemanticConvetion.SERVER_PORT,
58
+ span.set_attribute(SemanticConvention.SERVER_PORT,
59
59
  server_port)
60
- span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
60
+ span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL,
61
61
  request_model)
62
- span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
62
+ span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
63
63
  'text')
64
64
 
65
65
  # Set Span attributes (Extras)
@@ -67,32 +67,32 @@ def transcribe(version, environment, application_name,
67
67
  environment)
68
68
  span.set_attribute(SERVICE_NAME,
69
69
  application_name)
70
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_AUDIO_DURATION,
70
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_AUDIO_DURATION,
71
71
  response.audio_duration)
72
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
72
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST,
73
73
  cost)
74
- span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
74
+ span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION,
75
75
  version)
76
76
 
77
77
  # To be removed one the change to log events (from span events) is complete
78
78
  if capture_message_content:
79
79
  span.add_event(
80
- name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
80
+ name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
81
81
  attributes={
82
- SemanticConvetion.GEN_AI_CONTENT_PROMPT: response.audio_url,
82
+ SemanticConvention.GEN_AI_CONTENT_PROMPT: response.audio_url,
83
83
  },
84
84
  )
85
85
  span.add_event(
86
- name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
86
+ name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
87
87
  attributes={
88
- SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.text,
88
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION: response.text,
89
89
  },
90
90
  )
91
91
 
92
92
  input_event = otel_event(
93
- name=SemanticConvetion.GEN_AI_USER_MESSAGE,
93
+ name=SemanticConvention.GEN_AI_USER_MESSAGE,
94
94
  attributes={
95
- SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_ASSEMBLYAI
95
+ SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_ASSEMBLYAI
96
96
  },
97
97
  body={
98
98
  **({'content': response.audio_url} if capture_message_content else {}),
@@ -102,9 +102,9 @@ def transcribe(version, environment, application_name,
102
102
  event_provider.emit(input_event)
103
103
 
104
104
  output_event = otel_event(
105
- name=SemanticConvetion.GEN_AI_CHOICE,
105
+ name=SemanticConvention.GEN_AI_CHOICE,
106
106
  attributes={
107
- SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_ASSEMBLYAI
107
+ SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_ASSEMBLYAI
108
108
  },
109
109
  body={
110
110
  'finish_reason': 'stop',
@@ -123,8 +123,8 @@ def transcribe(version, environment, application_name,
123
123
  attributes = create_metrics_attributes(
124
124
  service_name=application_name,
125
125
  deployment_environment=environment,
126
- operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_AUDIO,
127
- system=SemanticConvetion.GEN_AI_SYSTEM_ASSEMBLYAI,
126
+ operation=SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO,
127
+ system=SemanticConvention.GEN_AI_SYSTEM_ASSEMBLYAI,
128
128
  request_model=request_model,
129
129
  server_address=server_address,
130
130
  server_port=server_port,
@@ -8,7 +8,7 @@ from openlit.instrumentation.astra.utils import (
8
8
  DB_OPERATION_MAP,
9
9
  process_db_operations
10
10
  )
11
- from openlit.semcov import SemanticConvetion
11
+ from openlit.semcov import SemanticConvention
12
12
 
13
13
  def general_wrap(gen_ai_endpoint, version, environment, application_name,
14
14
  tracer, pricing_info, capture_message_content, metrics, disable_metrics):
@@ -22,8 +22,8 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
22
22
  """
23
23
 
24
24
  db_operation = DB_OPERATION_MAP.get(gen_ai_endpoint, "UNKNOWN")
25
- if db_operation == SemanticConvetion.DB_OPERATION_REPLACE and kwargs.get('upsert'):
26
- db_operation = SemanticConvetion.DB_OPERATION_UPSERT
25
+ if db_operation == SemanticConvention.DB_OPERATION_REPLACE and kwargs.get('upsert'):
26
+ db_operation = SemanticConvention.DB_OPERATION_UPSERT
27
27
 
28
28
  span_name = f"{db_operation} {instance.name}"
29
29
 
@@ -8,7 +8,7 @@ from openlit.instrumentation.astra.utils import (
8
8
  DB_OPERATION_MAP,
9
9
  process_db_operations
10
10
  )
11
- from openlit.semcov import SemanticConvetion
11
+ from openlit.semcov import SemanticConvention
12
12
 
13
13
  def async_general_wrap(gen_ai_endpoint, version, environment, application_name,
14
14
  tracer, pricing_info, capture_message_content, metrics, disable_metrics):
@@ -22,8 +22,8 @@ def async_general_wrap(gen_ai_endpoint, version, environment, application_name,
22
22
  """
23
23
 
24
24
  db_operation = DB_OPERATION_MAP.get(gen_ai_endpoint, "UNKNOWN")
25
- if db_operation == SemanticConvetion.DB_OPERATION_REPLACE and kwargs.get('upsert'):
26
- db_operation = SemanticConvetion.DB_OPERATION_UPSERT
25
+ if db_operation == SemanticConvention.DB_OPERATION_REPLACE and kwargs.get('upsert'):
26
+ db_operation = SemanticConvention.DB_OPERATION_UPSERT
27
27
 
28
28
  span_name = f"{db_operation} {instance.name}"
29
29
 
@@ -7,7 +7,7 @@ import logging
7
7
  from opentelemetry.trace import Status, StatusCode
8
8
  from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
- from openlit.semcov import SemanticConvetion
10
+ from openlit.semcov import SemanticConvention
11
11
 
12
12
  # Initialize logger for logging potential issues and operations
13
13
  logger = logging.getLogger(__name__)
@@ -17,15 +17,15 @@ def object_count(obj):
17
17
  return len(obj) if isinstance(obj, list) else 1
18
18
 
19
19
  DB_OPERATION_MAP = {
20
- 'astra.create_collection': SemanticConvetion.DB_OPERATION_CREATE_COLLECTION,
21
- 'astra.drop_collection': SemanticConvetion.DB_OPERATION_DELETE_COLLECTION,
22
- 'astra.insert': SemanticConvetion.DB_OPERATION_INSERT,
23
- 'astra.update': SemanticConvetion.DB_OPERATION_UPDATE,
24
- 'astra.find': SemanticConvetion.DB_OPERATION_SELECT,
25
- 'astra.find_one_and_update': SemanticConvetion.DB_OPERATION_REPLACE,
26
- 'astra.replace_one': SemanticConvetion.DB_OPERATION_REPLACE,
27
- 'astra.delete': SemanticConvetion.DB_OPERATION_DELETE,
28
- 'astra.find_one_and_delete': SemanticConvetion.DB_OPERATION_FIND_AND_DELETE
20
+ 'astra.create_collection': SemanticConvention.DB_OPERATION_CREATE_COLLECTION,
21
+ 'astra.drop_collection': SemanticConvention.DB_OPERATION_DELETE_COLLECTION,
22
+ 'astra.insert': SemanticConvention.DB_OPERATION_INSERT,
23
+ 'astra.update': SemanticConvention.DB_OPERATION_UPDATE,
24
+ 'astra.find': SemanticConvention.DB_OPERATION_SELECT,
25
+ 'astra.find_one_and_update': SemanticConvention.DB_OPERATION_REPLACE,
26
+ 'astra.replace_one': SemanticConvention.DB_OPERATION_REPLACE,
27
+ 'astra.delete': SemanticConvention.DB_OPERATION_DELETE,
28
+ 'astra.find_one_and_delete': SemanticConvention.DB_OPERATION_FIND_AND_DELETE
29
29
  }
30
30
 
31
31
  def process_db_operations(response, span, start_time, gen_ai_endpoint,
@@ -40,41 +40,41 @@ def process_db_operations(response, span, start_time, gen_ai_endpoint,
40
40
 
41
41
  try:
42
42
  span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
43
- span.set_attribute(SemanticConvetion.GEN_AI_OPERATION, SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB)
44
- span.set_attribute(SemanticConvetion.DB_SYSTEM_NAME, SemanticConvetion.DB_SYSTEM_ASTRA)
45
- span.set_attribute(SemanticConvetion.DB_CLIENT_OPERATION_DURATION, end_time - start_time)
46
- span.set_attribute(SemanticConvetion.SERVER_ADDRESS, server_address)
47
- span.set_attribute(SemanticConvetion.SERVER_PORT, server_port)
43
+ span.set_attribute(SemanticConvention.GEN_AI_OPERATION, SemanticConvention.GEN_AI_OPERATION_TYPE_VECTORDB)
44
+ span.set_attribute(SemanticConvention.DB_SYSTEM_NAME, SemanticConvention.DB_SYSTEM_ASTRA)
45
+ span.set_attribute(SemanticConvention.DB_CLIENT_OPERATION_DURATION, end_time - start_time)
46
+ span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
47
+ span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
48
48
  span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
49
49
  span.set_attribute(SERVICE_NAME, application_name)
50
- span.set_attribute(SemanticConvetion.DB_OPERATION_NAME, db_operation)
51
- span.set_attribute(SemanticConvetion.DB_COLLECTION_NAME, collection_name)
52
- span.set_attribute(SemanticConvetion.DB_SDK_VERSION, version)
50
+ span.set_attribute(SemanticConvention.DB_OPERATION_NAME, db_operation)
51
+ span.set_attribute(SemanticConvention.DB_COLLECTION_NAME, collection_name)
52
+ span.set_attribute(SemanticConvention.DB_SDK_VERSION, version)
53
53
 
54
- if db_operation == SemanticConvetion.DB_OPERATION_CREATE_COLLECTION:
55
- span.set_attribute(SemanticConvetion.DB_NAMESPACE, response.keyspace)
56
- span.set_attribute(SemanticConvetion.DB_COLLECTION_NAME, response.name)
57
- span.set_attribute(SemanticConvetion.DB_INDEX_DIMENSION, kwargs.get('dimension', ''))
58
- span.set_attribute(SemanticConvetion.DB_INDEX_METRIC, str(kwargs.get('metric', '')))
54
+ if db_operation == SemanticConvention.DB_OPERATION_CREATE_COLLECTION:
55
+ span.set_attribute(SemanticConvention.DB_NAMESPACE, response.keyspace)
56
+ span.set_attribute(SemanticConvention.DB_COLLECTION_NAME, response.name)
57
+ span.set_attribute(SemanticConvention.DB_INDEX_DIMENSION, kwargs.get('dimension', ''))
58
+ span.set_attribute(SemanticConvention.DB_INDEX_METRIC, str(kwargs.get('metric', '')))
59
59
 
60
- if db_operation == SemanticConvetion.DB_OPERATION_INSERT:
61
- span.set_attribute(SemanticConvetion.DB_DOCUMENTS_COUNT, object_count(args[0]))
62
- span.set_attribute(SemanticConvetion.DB_QUERY_TEXT, str(args[0] or kwargs.get('documents', {})))
60
+ if db_operation == SemanticConvention.DB_OPERATION_INSERT:
61
+ span.set_attribute(SemanticConvention.DB_DOCUMENTS_COUNT, object_count(args[0]))
62
+ span.set_attribute(SemanticConvention.DB_QUERY_TEXT, str(args[0] or kwargs.get('documents', {})))
63
63
 
64
- elif db_operation == SemanticConvetion.DB_OPERATION_UPDATE:
65
- span.set_attribute(SemanticConvetion.DB_RESPONSE_RETURNED_ROWS, response.update_info.get('nModified', 0))
66
- span.set_attribute(SemanticConvetion.DB_QUERY_TEXT, str(args[1] or kwargs.get('update', {})))
64
+ elif db_operation == SemanticConvention.DB_OPERATION_UPDATE:
65
+ span.set_attribute(SemanticConvention.DB_RESPONSE_RETURNED_ROWS, response.update_info.get('nModified', 0))
66
+ span.set_attribute(SemanticConvention.DB_QUERY_TEXT, str(args[1] or kwargs.get('update', {})))
67
67
 
68
- elif db_operation == SemanticConvetion.DB_OPERATION_DELETE:
69
- span.set_attribute(SemanticConvetion.DB_RESPONSE_RETURNED_ROWS, response.deleted_count)
70
- span.set_attribute(SemanticConvetion.DB_QUERY_TEXT, str(args[0] or kwargs.get('filter', {})))
68
+ elif db_operation == SemanticConvention.DB_OPERATION_DELETE:
69
+ span.set_attribute(SemanticConvention.DB_RESPONSE_RETURNED_ROWS, response.deleted_count)
70
+ span.set_attribute(SemanticConvention.DB_QUERY_TEXT, str(args[0] or kwargs.get('filter', {})))
71
71
 
72
72
  elif db_operation in [
73
- SemanticConvetion.DB_OPERATION_SELECT,
74
- SemanticConvetion.DB_OPERATION_FIND_AND_DELETE,
75
- SemanticConvetion.DB_OPERATION_REPLACE
73
+ SemanticConvention.DB_OPERATION_SELECT,
74
+ SemanticConvention.DB_OPERATION_FIND_AND_DELETE,
75
+ SemanticConvention.DB_OPERATION_REPLACE
76
76
  ]:
77
- span.set_attribute(SemanticConvetion.DB_QUERY_TEXT, str(args or kwargs.get('filter', {})))
77
+ span.set_attribute(SemanticConvention.DB_QUERY_TEXT, str(args or kwargs.get('filter', {})))
78
78
 
79
79
  span.set_status(Status(StatusCode.OK))
80
80
 
@@ -82,10 +82,10 @@ def process_db_operations(response, span, start_time, gen_ai_endpoint,
82
82
  attributes = {
83
83
  TELEMETRY_SDK_NAME: 'openlit',
84
84
  SERVICE_NAME: application_name,
85
- SemanticConvetion.DB_SYSTEM_NAME: SemanticConvetion.DB_SYSTEM_ASTRA,
85
+ SemanticConvention.DB_SYSTEM_NAME: SemanticConvention.DB_SYSTEM_ASTRA,
86
86
  DEPLOYMENT_ENVIRONMENT: environment,
87
- SemanticConvetion.GEN_AI_OPERATION: SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB,
88
- SemanticConvetion.DB_OPERATION_NAME: db_operation
87
+ SemanticConvention.GEN_AI_OPERATION: SemanticConvention.GEN_AI_OPERATION_TYPE_VECTORDB,
88
+ SemanticConvention.DB_OPERATION_NAME: db_operation
89
89
  }
90
90
 
91
91
  metrics['db_requests'].add(1, attributes)
@@ -14,7 +14,7 @@ from openlit.instrumentation.azure_ai_inference.utils import (
14
14
  process_chat_response,
15
15
  process_streaming_chat_response,
16
16
  )
17
- from openlit.semcov import SemanticConvetion
17
+ from openlit.semcov import SemanticConvention
18
18
 
19
19
  # Initialize logger for logging potential issues and operations
20
20
  logger = logging.getLogger(__name__)
@@ -43,10 +43,10 @@ def async_complete(version, environment, application_name,
43
43
  self.__wrapped__ = wrapped
44
44
  self._span = span
45
45
  self._span_name = span_name
46
- self._llmresponse = ""
47
- self._response_id = ""
48
- self._response_model = ""
49
- self._finish_reason = ""
46
+ self._llmresponse = ''
47
+ self._response_id = ''
48
+ self._response_model = ''
49
+ self._finish_reason = ''
50
50
  self._input_tokens = 0
51
51
  self._output_tokens = 0
52
52
 
@@ -96,7 +96,7 @@ def async_complete(version, environment, application_name,
96
96
 
97
97
  except Exception as e:
98
98
  handle_exception(self._span, e)
99
- logger.error("Error in trace creation: %s", e)
99
+ logger.error('Error in trace creation: %s', e)
100
100
  raise
101
101
 
102
102
  async def wrapper(wrapped, instance, args, kwargs):
@@ -104,11 +104,11 @@ def async_complete(version, environment, application_name,
104
104
  Wraps the GenAI function call.
105
105
  """
106
106
 
107
- streaming = kwargs.get("stream", False)
108
- server_address, server_port = set_server_address_and_port(instance, "models.github.ai", 443)
109
- request_model = kwargs.get("model", "gpt-4o")
107
+ streaming = kwargs.get('stream', False)
108
+ server_address, server_port = set_server_address_and_port(instance, 'models.github.ai', 443)
109
+ request_model = kwargs.get('model', 'gpt-4o')
110
110
 
111
- span_name = f"{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
111
+ span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
112
112
 
113
113
  # pylint: disable=no-else-return
114
114
  if streaming:
@@ -14,7 +14,7 @@ from openlit.instrumentation.azure_ai_inference.utils import (
14
14
  process_chat_response,
15
15
  process_streaming_chat_response,
16
16
  )
17
- from openlit.semcov import SemanticConvetion
17
+ from openlit.semcov import SemanticConvention
18
18
 
19
19
  # Initialize logger for logging potential issues and operations
20
20
  logger = logging.getLogger(__name__)
@@ -43,10 +43,10 @@ def complete(version, environment, application_name,
43
43
  self.__wrapped__ = wrapped
44
44
  self._span = span
45
45
  self._span_name = span_name
46
- self._llmresponse = ""
47
- self._response_id = ""
48
- self._response_model = ""
49
- self._finish_reason = ""
46
+ self._llmresponse = ''
47
+ self._response_id = ''
48
+ self._response_model = ''
49
+ self._finish_reason = ''
50
50
  self._input_tokens = 0
51
51
  self._output_tokens = 0
52
52
 
@@ -96,7 +96,7 @@ def complete(version, environment, application_name,
96
96
 
97
97
  except Exception as e:
98
98
  handle_exception(self._span, e)
99
- logger.error("Error in trace creation: %s", e)
99
+ logger.error('Error in trace creation: %s', e)
100
100
  raise
101
101
 
102
102
  def wrapper(wrapped, instance, args, kwargs):
@@ -104,11 +104,11 @@ def complete(version, environment, application_name,
104
104
  Wraps the GenAI function call.
105
105
  """
106
106
 
107
- streaming = kwargs.get("stream", False)
108
- server_address, server_port = set_server_address_and_port(instance, "models.github.ai", 443)
109
- request_model = kwargs.get("model", "gpt-4o")
107
+ streaming = kwargs.get('stream', False)
108
+ server_address, server_port = set_server_address_and_port(instance, 'models.github.ai', 443)
109
+ request_model = kwargs.get('model', 'gpt-4o')
110
110
 
111
- span_name = f"{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
111
+ span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
112
112
 
113
113
  # pylint: disable=no-else-return
114
114
  if streaming:
@@ -16,7 +16,7 @@ from openlit.__helpers import (
16
16
  otel_event,
17
17
  concatenate_all_contents
18
18
  )
19
- from openlit.semcov import SemanticConvetion
19
+ from openlit.semcov import SemanticConvention
20
20
 
21
21
  def process_chunk(self, chunk):
22
22
  """
@@ -66,51 +66,51 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
66
66
 
67
67
  # Set Span attributes (OTel Semconv)
68
68
  scope._span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
69
- scope._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION, SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
70
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM, SemanticConvetion.GEN_AI_SYSTEM_AZURE_AI_INFERENCE)
71
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL, request_model)
72
- scope._span.set_attribute(SemanticConvetion.SERVER_PORT, scope._server_port)
73
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get('max_tokens', -1))
74
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get('stop', []))
75
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get('temperature', 1.0))
76
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K, scope._kwargs.get('top_k', 1.0))
77
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P, scope._kwargs.get('top_p', 1.0))
78
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
79
- scope._kwargs.get("frequency_penalty", 0.0))
80
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
81
- scope._kwargs.get("presence_penalty", 0.0))
82
- scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
83
- scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID, scope._response_id)
84
- scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL, scope._response_model)
85
- scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
86
- scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
87
- scope._span.set_attribute(SemanticConvetion.SERVER_ADDRESS, scope._server_address)
88
-
89
- scope._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
69
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OPERATION, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT)
70
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_AZURE_AI_INFERENCE)
71
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
72
+ scope._span.set_attribute(SemanticConvention.SERVER_PORT, scope._server_port)
73
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get('max_tokens', -1))
74
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get('stop', []))
75
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get('temperature', 1.0))
76
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_K, scope._kwargs.get('top_k', 1.0))
77
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, scope._kwargs.get('top_p', 1.0))
78
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
79
+ scope._kwargs.get('frequency_penalty', 0.0))
80
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY,
81
+ scope._kwargs.get('presence_penalty', 0.0))
82
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
83
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
84
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, scope._response_model)
85
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
86
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
87
+ scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, scope._server_address)
88
+
89
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
90
90
  'text' if isinstance(scope._llmresponse, str) else 'json')
91
91
 
92
92
  scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
93
93
  scope._span.set_attribute(SERVICE_NAME, application_name)
94
- scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM, is_stream)
95
- scope._span.set_attribute(SemanticConvetion.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
96
- scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST, cost)
97
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TBT, scope._tbt)
98
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT, scope._ttft)
99
- scope._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION, version)
94
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
95
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
96
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
97
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT, scope._tbt)
98
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, scope._ttft)
99
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
100
100
 
101
101
  # To be removed one the change to log events (from span events) is complete
102
102
  prompt = concatenate_all_contents(formatted_messages)
103
103
  if capture_message_content:
104
104
  scope._span.add_event(
105
- name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
105
+ name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
106
106
  attributes={
107
- SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
107
+ SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt,
108
108
  },
109
109
  )
110
110
  scope._span.add_event(
111
- name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
111
+ name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
112
112
  attributes={
113
- SemanticConvetion.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
113
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
114
114
  },
115
115
  )
116
116
 
@@ -127,9 +127,9 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
127
127
  for role in ['user', 'system', 'assistant', 'tool']:
128
128
  if formatted_messages.get(role, {}).get('content', ''):
129
129
  event = otel_event(
130
- name=getattr(SemanticConvetion, f'GEN_AI_{role.upper()}_MESSAGE'),
130
+ name=getattr(SemanticConvention, f'GEN_AI_{role.upper()}_MESSAGE'),
131
131
  attributes={
132
- SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_AZURE_AI_INFERENCE
132
+ SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AZURE_AI_INFERENCE
133
133
  },
134
134
  body = {
135
135
  # pylint: disable=line-too-long
@@ -154,9 +154,9 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
154
154
  event_provider.emit(event)
155
155
 
156
156
  choice_event = otel_event(
157
- name=SemanticConvetion.GEN_AI_CHOICE,
157
+ name=SemanticConvention.GEN_AI_CHOICE,
158
158
  attributes={
159
- SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_AZURE_AI_INFERENCE
159
+ SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AZURE_AI_INFERENCE
160
160
  },
161
161
  body=choice_event_body
162
162
  )
@@ -168,8 +168,8 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
168
168
  metrics_attributes = create_metrics_attributes(
169
169
  service_name=application_name,
170
170
  deployment_environment=environment,
171
- operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
172
- system=SemanticConvetion.GEN_AI_SYSTEM_AZURE_AI_INFERENCE,
171
+ operation=SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
172
+ system=SemanticConvention.GEN_AI_SYSTEM_AZURE_AI_INFERENCE,
173
173
  request_model=request_model,
174
174
  server_address=scope._server_address,
175
175
  server_port=scope._server_port,
@@ -22,6 +22,7 @@ class BedrockInstrumentor(BaseInstrumentor):
22
22
  application_name = kwargs.get("application_name", "default_application")
23
23
  environment = kwargs.get("environment", "default_environment")
24
24
  tracer = kwargs.get("tracer")
25
+ event_provider = kwargs.get('event_provider')
25
26
  metrics = kwargs.get("metrics_dict")
26
27
  pricing_info = kwargs.get("pricing_info", {})
27
28
  capture_message_content = kwargs.get("capture_message_content", False)
@@ -33,7 +34,7 @@ class BedrockInstrumentor(BaseInstrumentor):
33
34
  "botocore.client",
34
35
  "ClientCreator.create_client",
35
36
  converse(version, environment, application_name,
36
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
37
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
37
38
  )
38
39
 
39
40
  def _uninstrument(self, **kwargs):