openlit 1.34.7__py3-none-any.whl → 1.34.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
openlit/__helpers.py CHANGED
@@ -333,3 +333,49 @@ def format_and_concatenate(messages):
333
333
  for message_data in formatted_messages.values()
334
334
  if message_data['content']
335
335
  )
336
+
337
+ def common_span_attributes(scope, gen_ai_operation, gen_ai_system, server_address, server_port,
338
+ request_model, response_model, environment, application_name, is_stream, tbt, ttft, version):
339
+ """
340
+ Set common span attributes for both chat and RAG operations.
341
+ """
342
+
343
+ scope._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
344
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OPERATION, gen_ai_operation)
345
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, gen_ai_system)
346
+ scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
347
+ scope._span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
348
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
349
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, scope._response_model)
350
+ scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
351
+ scope._span.set_attribute(SERVICE_NAME, application_name)
352
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
353
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT, scope._tbt)
354
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, scope._ttft)
355
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
356
+
357
+ def record_completion_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
358
+ request_model, response_model, environment, application_name, start_time, end_time, cost,
359
+ input_tokens, output_tokens, tbt, ttft):
360
+ """
361
+ Record completion metrics for the operation.
362
+ """
363
+
364
+ attributes = create_metrics_attributes(
365
+ operation=gen_ai_operation,
366
+ system=gen_ai_system,
367
+ server_address=server_address,
368
+ server_port=server_port,
369
+ request_model=request_model,
370
+ response_model=response_model,
371
+ service_name=application_name,
372
+ deployment_environment=environment,
373
+ )
374
+ metrics["genai_client_usage_tokens"].record(input_tokens + output_tokens, attributes)
375
+ metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
376
+ metrics["genai_server_tbt"].record(tbt, attributes)
377
+ metrics["genai_server_ttft"].record(ttft, attributes)
378
+ metrics["genai_requests"].add(1, attributes)
379
+ metrics["genai_completion_tokens"].add(output_tokens, attributes)
380
+ metrics["genai_prompt_tokens"].add(input_tokens, attributes)
381
+ metrics["genai_cost"].record(cost, attributes)
@@ -5,39 +5,35 @@ import importlib.metadata
5
5
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
6
6
  from wrapt import wrap_function_wrapper
7
7
 
8
- from openlit.instrumentation.assemblyai.assemblyai import (
9
- transcribe
10
- )
8
+ from openlit.instrumentation.assemblyai.assemblyai import transcribe
11
9
 
12
- _instruments = ('assemblyai >= 0.35.1',)
10
+ _instruments = ("assemblyai >= 0.35.1",)
13
11
 
14
12
  class AssemblyAIInstrumentor(BaseInstrumentor):
15
13
  """
16
- An instrumentor for AssemblyAI's client library.
14
+ An instrumentor for AssemblyAI client library.
17
15
  """
18
16
 
19
17
  def instrumentation_dependencies(self) -> Collection[str]:
20
18
  return _instruments
21
19
 
22
20
  def _instrument(self, **kwargs):
23
- application_name = kwargs.get('application_name', 'default')
24
- environment = kwargs.get('environment', 'default')
25
- tracer = kwargs.get('tracer')
26
- event_provider = kwargs.get('event_provider')
27
- metrics = kwargs.get('metrics_dict')
28
- pricing_info = kwargs.get('pricing_info', {})
29
- capture_message_content = kwargs.get('capture_message_content', False)
30
- disable_metrics = kwargs.get('disable_metrics')
31
- version = importlib.metadata.version('assemblyai')
21
+ application_name = kwargs.get("application_name", "default")
22
+ environment = kwargs.get("environment", "default")
23
+ tracer = kwargs.get("tracer")
24
+ metrics = kwargs.get("metrics_dict")
25
+ pricing_info = kwargs.get("pricing_info", {})
26
+ capture_message_content = kwargs.get("capture_message_content", False)
27
+ disable_metrics = kwargs.get("disable_metrics")
28
+ version = importlib.metadata.version("assemblyai")
32
29
 
33
30
  # sync transcribe
34
31
  wrap_function_wrapper(
35
- 'assemblyai.transcriber',
36
- 'Transcriber.transcribe',
32
+ "assemblyai.transcriber",
33
+ "Transcriber.transcribe",
37
34
  transcribe(version, environment, application_name,
38
- tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
35
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
39
36
  )
40
37
 
41
38
  def _uninstrument(self, **kwargs):
42
- # Proper uninstrumentation logic to revert patched methods
43
39
  pass
@@ -1,150 +1,59 @@
1
1
  """
2
- Module for monitoring Assembly AI API calls.
2
+ Module for monitoring AssemblyAI API calls.
3
3
  """
4
4
 
5
5
  import logging
6
6
  import time
7
- from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
- from openlit.__helpers import (
10
- get_audio_model_cost,
11
- handle_exception,
12
- create_metrics_attributes,
13
- set_server_address_and_port,
14
- otel_event
15
- )
7
+ from opentelemetry.trace import SpanKind
8
+ from openlit.__helpers import handle_exception, set_server_address_and_port
9
+ from openlit.instrumentation.assemblyai.utils import process_audio_response
16
10
  from openlit.semcov import SemanticConvention
17
11
 
18
12
  # Initialize logger for logging potential issues and operations
19
13
  logger = logging.getLogger(__name__)
20
14
 
21
15
  def transcribe(version, environment, application_name,
22
- tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics):
16
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
23
17
  """
24
- Generates a telemetry wrapper for GenAI function call
18
+ Generates a telemetry wrapper for AssemblyAI transcribe function call
25
19
  """
26
20
 
27
-
28
21
  def wrapper(wrapped, instance, args, kwargs):
29
22
  """
30
- Wraps the GenAI function call.
23
+ Wraps the AssemblyAI transcribe function call.
31
24
  """
32
25
 
33
- server_address, server_port = set_server_address_and_port(instance, 'api.assemblyai.com', 443)
34
- request_model = kwargs.get('speech_model', 'best')
26
+ server_address, server_port = set_server_address_and_port(instance, "api.assemblyai.com", 443)
27
+ request_model = kwargs.get("speech_model", "best")
35
28
 
36
- span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO} {request_model}'
29
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO} {request_model}"
37
30
 
38
- with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
31
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
39
32
  start_time = time.time()
40
- response = wrapped(*args, **kwargs)
41
- end_time = time.time()
42
33
 
43
34
  try:
44
- # Calculate cost of the operation
45
- cost = get_audio_model_cost(request_model,
46
- pricing_info, None, response.audio_duration)
47
-
48
- # Set Span attributes (OTel Semconv)
49
- span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
50
- span.set_attribute(SemanticConvention.GEN_AI_OPERATION,
51
- SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO)
52
- span.set_attribute(SemanticConvention.GEN_AI_SYSTEM,
53
- SemanticConvention.GEN_AI_SYSTEM_ASSEMBLYAI)
54
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
55
- request_model)
56
- span.set_attribute(SemanticConvention.SERVER_ADDRESS,
57
- server_address)
58
- span.set_attribute(SemanticConvention.SERVER_PORT,
59
- server_port)
60
- span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL,
61
- request_model)
62
- span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
63
- 'text')
64
-
65
- # Set Span attributes (Extras)
66
- span.set_attribute(DEPLOYMENT_ENVIRONMENT,
67
- environment)
68
- span.set_attribute(SERVICE_NAME,
69
- application_name)
70
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_AUDIO_DURATION,
71
- response.audio_duration)
72
- span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST,
73
- cost)
74
- span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION,
75
- version)
76
-
77
- # To be removed one the change to log events (from span events) is complete
78
- if capture_message_content:
79
- span.add_event(
80
- name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
81
- attributes={
82
- SemanticConvention.GEN_AI_CONTENT_PROMPT: response.audio_url,
83
- },
84
- )
85
- span.add_event(
86
- name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
87
- attributes={
88
- SemanticConvention.GEN_AI_CONTENT_COMPLETION: response.text,
89
- },
90
- )
91
-
92
- input_event = otel_event(
93
- name=SemanticConvention.GEN_AI_USER_MESSAGE,
94
- attributes={
95
- SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_ASSEMBLYAI
96
- },
97
- body={
98
- **({'content': response.audio_url} if capture_message_content else {}),
99
- 'role': 'user'
100
- }
35
+ response = wrapped(*args, **kwargs)
36
+
37
+ response = process_audio_response(
38
+ response=response,
39
+ gen_ai_endpoint="assemblyai.transcribe",
40
+ pricing_info=pricing_info,
41
+ server_port=server_port,
42
+ server_address=server_address,
43
+ environment=environment,
44
+ application_name=application_name,
45
+ metrics=metrics,
46
+ start_time=start_time,
47
+ span=span,
48
+ capture_message_content=capture_message_content,
49
+ disable_metrics=disable_metrics,
50
+ version=version,
51
+ **kwargs
101
52
  )
102
- event_provider.emit(input_event)
103
-
104
- output_event = otel_event(
105
- name=SemanticConvention.GEN_AI_CHOICE,
106
- attributes={
107
- SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_ASSEMBLYAI
108
- },
109
- body={
110
- 'finish_reason': 'stop',
111
- 'index': 0,
112
- 'message': {
113
- **({'content': response.text} if capture_message_content else {}),
114
- 'role': 'assistant'
115
- }
116
- }
117
- )
118
- event_provider.emit(output_event)
119
-
120
- span.set_status(Status(StatusCode.OK))
121
-
122
- if disable_metrics is False:
123
- attributes = create_metrics_attributes(
124
- service_name=application_name,
125
- deployment_environment=environment,
126
- operation=SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO,
127
- system=SemanticConvention.GEN_AI_SYSTEM_ASSEMBLYAI,
128
- request_model=request_model,
129
- server_address=server_address,
130
- server_port=server_port,
131
- response_model=request_model,
132
- )
133
-
134
- metrics['genai_client_operation_duration'].record(
135
- end_time - start_time, attributes
136
- )
137
- metrics['genai_requests'].add(1, attributes)
138
- metrics['genai_cost'].record(cost, attributes)
139
-
140
- # Return original response
141
- return response
142
53
 
143
54
  except Exception as e:
144
55
  handle_exception(span, e)
145
- logger.error('Error in trace creation: %s', e)
146
56
 
147
- # Return original response
148
- return response
57
+ return response
149
58
 
150
59
  return wrapper
@@ -0,0 +1,142 @@
1
+ """
2
+ AssemblyAI OpenTelemetry instrumentation utility functions
3
+ """
4
+ import time
5
+
6
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
7
+ from opentelemetry.trace import Status, StatusCode
8
+
9
+ from openlit.__helpers import (
10
+ get_audio_model_cost,
11
+ create_metrics_attributes,
12
+ )
13
+ from openlit.semcov import SemanticConvention
14
+
15
+ def format_audio_url(audio_url):
16
+ """
17
+ Process audio URL input to extract content.
18
+ """
19
+ return str(audio_url) if audio_url else ""
20
+
21
+ def common_span_attributes(scope, gen_ai_operation, gen_ai_system, server_address, server_port,
22
+ request_model, response_model, environment, application_name, is_stream, tbt, ttft, version):
23
+ """
24
+ Set common span attributes for both chat and RAG operations.
25
+ """
26
+
27
+ scope._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
28
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OPERATION, gen_ai_operation)
29
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, gen_ai_system)
30
+ scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
31
+ scope._span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
32
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
33
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, scope._response_model)
34
+ scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
35
+ scope._span.set_attribute(SERVICE_NAME, application_name)
36
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
37
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT, scope._tbt)
38
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, scope._ttft)
39
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
40
+
41
+ def record_audio_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
42
+ request_model, response_model, environment, application_name, start_time, end_time, cost):
43
+ """
44
+ Record audio metrics for the operation.
45
+ """
46
+
47
+ attributes = create_metrics_attributes(
48
+ operation=gen_ai_operation,
49
+ system=gen_ai_system,
50
+ server_address=server_address,
51
+ server_port=server_port,
52
+ request_model=request_model,
53
+ response_model=response_model,
54
+ service_name=application_name,
55
+ deployment_environment=environment,
56
+ )
57
+ metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
58
+ metrics["genai_requests"].add(1, attributes)
59
+ metrics["genai_cost"].record(cost, attributes)
60
+
61
+ def common_audio_logic(scope, gen_ai_endpoint, pricing_info, environment, application_name,
62
+ metrics, capture_message_content, disable_metrics, version):
63
+ """
64
+ Process audio transcription request and generate Telemetry
65
+ """
66
+
67
+ prompt = scope._response.audio_url
68
+ request_model = scope._kwargs.get("speech_model", "best")
69
+ is_stream = False
70
+
71
+ # Calculate cost based on audio duration
72
+ cost = get_audio_model_cost(request_model, pricing_info, prompt, scope._response.audio_duration)
73
+
74
+ # Common Span Attributes
75
+ common_span_attributes(scope,
76
+ SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO, SemanticConvention.GEN_AI_SYSTEM_ASSEMBLYAI,
77
+ scope._server_address, scope._server_port, request_model, request_model,
78
+ environment, application_name, is_stream, scope._tbt, scope._ttft, version)
79
+
80
+ # Span Attributes for Response parameters
81
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "text")
82
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response.id)
83
+
84
+ # Span Attributes for Cost
85
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
86
+
87
+ # Audio-specific span attributes
88
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_AUDIO_DURATION, scope._response.audio_duration)
89
+
90
+ # Span Attributes for Content
91
+ if capture_message_content:
92
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, prompt)
93
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._response.text)
94
+
95
+ # To be removed once the change to span_attributes (from span events) is complete
96
+ scope._span.add_event(
97
+ name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
98
+ attributes={
99
+ SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt,
100
+ },
101
+ )
102
+ scope._span.add_event(
103
+ name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
104
+ attributes={
105
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._response.text,
106
+ },
107
+ )
108
+
109
+ scope._span.set_status(Status(StatusCode.OK))
110
+
111
+ # Metrics
112
+ if not disable_metrics:
113
+ record_audio_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO,
114
+ SemanticConvention.GEN_AI_SYSTEM_ASSEMBLYAI, scope._server_address, scope._server_port,
115
+ request_model, request_model, environment, application_name, scope._start_time,
116
+ scope._end_time, cost)
117
+
118
+ def process_audio_response(response, gen_ai_endpoint, pricing_info, server_port, server_address,
119
+ environment, application_name, metrics, start_time, span, capture_message_content=False,
120
+ disable_metrics=False, version="1.0.0", **kwargs):
121
+ """
122
+ Process audio transcription request and generate Telemetry
123
+ """
124
+
125
+ scope = type("GenericScope", (), {})()
126
+
127
+ scope._start_time = start_time
128
+ scope._end_time = time.time()
129
+ scope._span = span
130
+ scope._server_address, scope._server_port = server_address, server_port
131
+ scope._kwargs = kwargs
132
+ scope._response = response
133
+
134
+ # Initialize streaming and timing values for AssemblyAI transcription
135
+ scope._response_model = kwargs.get("speech_model", "best")
136
+ scope._tbt = 0.0
137
+ scope._ttft = scope._end_time - scope._start_time
138
+
139
+ common_audio_logic(scope, gen_ai_endpoint, pricing_info, environment, application_name,
140
+ metrics, capture_message_content, disable_metrics, version)
141
+
142
+ return response
@@ -1,4 +1,3 @@
1
- # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
1
  """Initializer of Auto Instrumentation of ElevenLabs Functions"""
3
2
 
4
3
  from typing import Collection
@@ -6,18 +5,14 @@ import importlib.metadata
6
5
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
6
  from wrapt import wrap_function_wrapper
8
7
 
9
- from openlit.instrumentation.elevenlabs.elevenlabs import (
10
- generate
11
- )
12
- from openlit.instrumentation.elevenlabs.async_elevenlabs import (
13
- async_generate
14
- )
8
+ from openlit.instrumentation.elevenlabs.elevenlabs import generate
9
+ from openlit.instrumentation.elevenlabs.async_elevenlabs import async_generate
15
10
 
16
11
  _instruments = ("elevenlabs >= 1.4.0",)
17
12
 
18
13
  class ElevenLabsInstrumentor(BaseInstrumentor):
19
14
  """
20
- An instrumentor for ElevenLabs's client library.
15
+ An instrumentor for ElevenLabs client library.
21
16
  """
22
17
 
23
18
  def instrumentation_dependencies(self) -> Collection[str]:
@@ -33,14 +28,6 @@ class ElevenLabsInstrumentor(BaseInstrumentor):
33
28
  disable_metrics = kwargs.get("disable_metrics")
34
29
  version = importlib.metadata.version("elevenlabs")
35
30
 
36
- # sync generate
37
- wrap_function_wrapper(
38
- "elevenlabs.client",
39
- "ElevenLabs.generate",
40
- generate("elevenlabs.generate", version, environment, application_name,
41
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
42
- )
43
-
44
31
  # sync text_to_speech.convert
45
32
  wrap_function_wrapper(
46
33
  "elevenlabs.text_to_speech.client",
@@ -49,22 +36,13 @@ class ElevenLabsInstrumentor(BaseInstrumentor):
49
36
  tracer, pricing_info, capture_message_content, metrics, disable_metrics),
50
37
  )
51
38
 
52
- # async generate
53
- wrap_function_wrapper(
54
- "elevenlabs.client",
55
- "AsyncElevenLabs.generate",
56
- async_generate("elevenlabs.generate", version, environment, application_name,
57
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
58
- )
59
-
60
- # sync text_to_speech.convert
39
+ # async text_to_speech.convert
61
40
  wrap_function_wrapper(
62
41
  "elevenlabs.text_to_speech.client",
63
42
  "AsyncTextToSpeechClient.convert",
64
- generate("elevenlabs.text_to_speech", version, environment, application_name,
43
+ async_generate("elevenlabs.text_to_speech", version, environment, application_name,
65
44
  tracer, pricing_info, capture_message_content, metrics, disable_metrics),
66
45
  )
67
46
 
68
47
  def _uninstrument(self, **kwargs):
69
- # Proper uninstrumentation logic to revert patched methods
70
48
  pass
@@ -1,145 +1,55 @@
1
1
  """
2
- Module for monitoring Ollama API calls.
2
+ Module for monitoring ElevenLabs API calls.
3
3
  """
4
4
 
5
- import logging
6
5
  import time
7
- from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
- from openlit.__helpers import (
10
- get_audio_model_cost,
11
- handle_exception,
12
- create_metrics_attributes,
13
- )
6
+ from opentelemetry.trace import SpanKind
7
+ from openlit.__helpers import handle_exception
8
+ from openlit.instrumentation.elevenlabs.utils import process_audio_response
14
9
  from openlit.semcov import SemanticConvention
15
10
 
16
- # Initialize logger for logging potential issues and operations
17
- logger = logging.getLogger(__name__)
18
-
19
11
  def async_generate(gen_ai_endpoint, version, environment, application_name,
20
- tracer, pricing_info, capture_message_content, metrics, disable_metrics):
12
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
21
13
  """
22
- Generates a telemetry wrapper for creating speech audio to collect metrics.
23
-
24
- Args:
25
- version: Version of the monitoring package.
26
- environment: Deployment environment (e.g., production, staging).
27
- application_name: Name of the application using the ElevenLabs API.
28
- tracer: OpenTelemetry tracer for creating spans.
29
- pricing_info: Information used for calculating the cost of generating speech audio.
30
- capture_message_content: Flag indicating whether to trace the input text and generated audio.
31
-
32
- Returns:
33
- A function that wraps the speech audio creation method to add telemetry.
14
+ Generates a telemetry wrapper for GenAI function call
34
15
  """
35
16
 
36
17
  async def wrapper(wrapped, instance, args, kwargs):
37
18
  """
38
- Wraps the 'generate' API call to add telemetry.
39
-
40
- This collects metrics such as execution time, cost, and handles errors
41
- gracefully, adding details to the trace for observability.
42
-
43
- Args:
44
- wrapped: The original 'generate' method to be wrapped.
45
- instance: The instance of the class where the original method is defined.
46
- args: Positional arguments for the 'generate' method.
47
- kwargs: Keyword arguments for the 'generate' method.
48
-
49
- Returns:
50
- The response from the original 'generate' method.
19
+ Wraps the GenAI function call.
51
20
  """
52
21
 
53
22
  server_address, server_port = "api.elevenlabs.io", 443
54
- request_model = kwargs.get('model', kwargs.get('model_id', 'eleven_multilingual_v2'))
23
+ request_model = kwargs.get("model", kwargs.get("model_id", "eleven_multilingual_v2"))
55
24
 
56
- span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO} {request_model}'
25
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO} {request_model}"
57
26
 
58
- with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
27
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
59
28
  start_time = time.time()
60
- response = await wrapped(*args, **kwargs)
61
- end_time = time.time()
29
+ response = wrapped(*args, **kwargs)
62
30
 
63
31
  try:
64
- # Calculate cost of the operation
65
- cost = get_audio_model_cost(request_model,
66
- pricing_info, kwargs.get('text', ''))
67
-
68
- # Set Span attributes
69
- span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
70
- span.set_attribute(SemanticConvention.GEN_AI_OPERATION,
71
- SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO)
72
- span.set_attribute(SemanticConvention.GEN_AI_SYSTEM,
73
- SemanticConvention.GEN_AI_SYSTEM_ASSEMBLYAI)
74
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
75
- request_model)
76
- span.set_attribute(SemanticConvention.SERVER_ADDRESS,
77
- server_address)
78
- span.set_attribute(SemanticConvention.SERVER_PORT,
79
- server_port)
80
- span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL,
81
- request_model)
82
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
83
- request_model)
84
- span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
85
- 'audio')
86
-
87
- # Set Span attributes (Extras)
88
- if gen_ai_endpoint == 'elevenlabs.generate':
89
- if isinstance(kwargs.get('voice', 'Rachel'), str):
90
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_AUDIO_VOICE,
91
- kwargs.get('voice', 'Rachel'))
92
- else:
93
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_AUDIO_VOICE,
94
- kwargs.get('voice_id', ''))
95
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_AUDIO_RESPONSE_FORMAT,
96
- kwargs.get('output_format', 'mp3'))
97
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_AUDIO_SETTINGS,
98
- str(kwargs.get('voice_settings', '')))
99
- span.set_attribute(DEPLOYMENT_ENVIRONMENT,
100
- environment)
101
- span.set_attribute(SERVICE_NAME,
102
- application_name)
103
- span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST,
104
- cost)
105
- span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION,
106
- version)
107
- if capture_message_content:
108
- span.add_event(
109
- name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
110
- attributes={
111
- SemanticConvention.GEN_AI_CONTENT_PROMPT: str(kwargs.get('text', '')),
112
- },
113
- )
114
-
115
- span.set_status(Status(StatusCode.OK))
116
-
117
- if disable_metrics is False:
118
- attributes = create_metrics_attributes(
119
- service_name=application_name,
120
- deployment_environment=environment,
121
- operation=SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO,
122
- system=SemanticConvention.GEN_AI_SYSTEM_ELEVENLABS,
123
- request_model=request_model,
124
- server_address=server_address,
125
- server_port=server_port,
126
- response_model=request_model,
127
- )
128
-
129
- metrics['genai_client_operation_duration'].record(
130
- end_time - start_time, attributes
131
- )
132
- metrics['genai_requests'].add(1, attributes)
133
- metrics['genai_cost'].record(cost, attributes)
134
-
135
- # Return original response
136
- return response
32
+ response = process_audio_response(
33
+ response=response,
34
+ gen_ai_endpoint=gen_ai_endpoint,
35
+ pricing_info=pricing_info,
36
+ server_port=server_port,
37
+ server_address=server_address,
38
+ environment=environment,
39
+ application_name=application_name,
40
+ metrics=metrics,
41
+ start_time=start_time,
42
+ span=span,
43
+ args=args,
44
+ kwargs=kwargs,
45
+ capture_message_content=capture_message_content,
46
+ disable_metrics=disable_metrics,
47
+ version=version
48
+ )
137
49
 
138
50
  except Exception as e:
139
51
  handle_exception(span, e)
140
- logger.error('Error in trace creation: %s', e)
141
52
 
142
- # Return original response
143
- return response
53
+ return response
144
54
 
145
55
  return wrapper