openlit 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,66 @@
1
+ # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
+ """Initializer of Auto Instrumentation of Pinecone Functions"""
3
+ from typing import Collection
4
+ import importlib.metadata
5
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
6
+ from wrapt import wrap_function_wrapper
7
+
8
+ from openlit.instrumentation.pinecone.pinecone import general_wrap
9
+
10
+ _instruments = ("pinecone-client >= 2.2.0",)
11
+
12
+ class PineconeInstrumentor(BaseInstrumentor):
13
+ """An instrumentor for Pinecone's client library."""
14
+
15
+ def instrumentation_dependencies(self) -> Collection[str]:
16
+ return _instruments
17
+
18
+ def _instrument(self, **kwargs):
19
+ application_name = kwargs.get("application_name")
20
+ environment = kwargs.get("environment")
21
+ tracer = kwargs.get("tracer")
22
+ metrics = kwargs.get("metrics_dict")
23
+ pricing_info = kwargs.get("pricing_info")
24
+ trace_content = kwargs.get("trace_content")
25
+ disable_metrics = kwargs.get("disable_metrics")
26
+ version = importlib.metadata.version("pinecone-client")
27
+
28
+ wrap_function_wrapper(
29
+ "pinecone.control.pinecone",
30
+ "Pinecone.create_index",
31
+ general_wrap("pinecone.create_index", version, environment, application_name,
32
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
33
+ )
34
+
35
+ wrap_function_wrapper(
36
+ "pinecone.data.index",
37
+ "Index.upsert",
38
+ general_wrap("pinecone.upsert", version, environment, application_name,
39
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
40
+ )
41
+
42
+ wrap_function_wrapper(
43
+ "pinecone.data.index",
44
+ "Index.query",
45
+ general_wrap("pinecone.query", version, environment, application_name,
46
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
47
+ )
48
+
49
+ wrap_function_wrapper(
50
+ "pinecone.data.index",
51
+ "Index.update",
52
+ general_wrap("pinecone.update", version, environment, application_name,
53
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
54
+ )
55
+
56
+ wrap_function_wrapper(
57
+ "pinecone.data.index",
58
+ "Index.delete",
59
+ general_wrap("pinecone.delete", version, environment, application_name,
60
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
61
+ )
62
+
63
+
64
+ @staticmethod
65
+ def _uninstrument(self, **kwargs):
66
+ pass
@@ -0,0 +1,173 @@
1
+ # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument
2
+ """
3
+ Module for monitoring Pinecone.
4
+ """
5
+
6
+ import logging
7
+ from opentelemetry.trace import SpanKind, Status, StatusCode
8
+ from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
+ from openlit.__helpers import handle_exception
10
+ from openlit.semcov import SemanticConvetion
11
+
12
+ # Initialize logger for logging potential issues and operations
13
+ logger = logging.getLogger(__name__)
14
+
15
+ def object_count(obj):
16
+ """
17
+ Counts Length of object if it exists, Else returns None
18
+ """
19
+
20
+ if obj:
21
+ return len(obj)
22
+
23
+ return None
24
+
25
+ def general_wrap(gen_ai_endpoint, version, environment, application_name,
26
+ tracer, pricing_info, trace_content, metrics, disable_metrics):
27
+ """
28
+ Wraps a Pinecone operation to trace and log its execution metrics.
29
+
30
+ This function is intended to wrap around Pinecone operations in order to
31
+ measure their execution time, log relevant information, and trace the execution
32
+ using OpenTelemetry. This helps in monitoring and debugging operations within
33
+ the Pinecone space.
34
+
35
+ Parameters:
36
+ - pinecone_operation (str): The specific Pinecone operation being monitored.
37
+ Examples include 'create_index', 'query', 'upsert', etc.
38
+ - version (str): The version of the application interfacing with Pinecone.
39
+ - environment (str): The deployment environment, such as 'production' or 'development'.
40
+ - application_name (str): The name of the application performing the Pinecone operation.
41
+ - tracer (opentelemetry.trace.Tracer): An object used for OpenTelemetry tracing.
42
+ - pricing_info (dict): Information about pricing, not used in current implementation.
43
+ - trace_content (bool): A flag indicating whether the content of responses should be traced.
44
+
45
+ Returns:
46
+ - function: A decorator function that, when applied, wraps the target function with
47
+ additional functionality for tracing and logging Pinecone operations.
48
+ """
49
+
50
+ def wrapper(wrapped, instance, args, kwargs):
51
+ """
52
+ Executes the wrapped Pinecone operation, adding tracing and logging.
53
+
54
+ This inner wrapper function captures the execution of Pinecone operations,
55
+ annotating the operation with relevant metrics and tracing information, and
56
+ ensuring any exceptions are caught and logged appropriately.
57
+
58
+ Parameters:
59
+ - wrapped (Callable): The Pinecone operation to be wrapped and executed.
60
+ - instance (object): The instance on which the operation is called (for class methods).
61
+ - args (tuple): Positional arguments for the Pinecone operation.
62
+ - kwargs (dict): Keyword arguments for the Pinecone operation.
63
+
64
+ Returns:
65
+ - Any: The result of executing the wrapped Pinecone operation.
66
+ """
67
+
68
+ with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
69
+ response = wrapped(*args, **kwargs)
70
+
71
+ try:
72
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
73
+ span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
74
+ gen_ai_endpoint)
75
+ span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
76
+ environment)
77
+ span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
78
+ application_name)
79
+ span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
80
+ SemanticConvetion.GEN_AI_TYPE_VECTORDB)
81
+ span.set_attribute(SemanticConvetion.DB_SYSTEM,
82
+ SemanticConvetion.DB_SYSTEM_PINECONE)
83
+
84
+ if gen_ai_endpoint == "pinecone.create_index":
85
+ db_operation = SemanticConvetion.DB_OPERATION_CREATE_INDEX
86
+ span.set_attribute(SemanticConvetion.DB_OPERATION,
87
+ SemanticConvetion.DB_OPERATION_CREATE_INDEX)
88
+ span.set_attribute(SemanticConvetion.DB_INDEX_NAME,
89
+ kwargs.get("name", ""))
90
+ span.set_attribute(SemanticConvetion.DB_INDEX_DIMENSION,
91
+ kwargs.get("dimensions", ""))
92
+ span.set_attribute(SemanticConvetion.DB_INDEX_METRIC,
93
+ kwargs.get("metric", ""))
94
+ span.set_attribute(SemanticConvetion.DB_INDEX_SPEC,
95
+ str(kwargs.get("spec", "")))
96
+
97
+ elif gen_ai_endpoint == "pinecone.query":
98
+ db_operation = SemanticConvetion.DB_OPERATION_QUERY
99
+ span.set_attribute(SemanticConvetion.DB_OPERATION,
100
+ SemanticConvetion.DB_OPERATION_QUERY)
101
+ span.set_attribute(SemanticConvetion.DB_STATEMENT,
102
+ str(kwargs.get("vector")))
103
+ span.set_attribute(SemanticConvetion.DB_N_RESULTS,
104
+ kwargs.get("top_k", ""))
105
+ span.set_attribute(SemanticConvetion.DB_FILTER,
106
+ str(kwargs.get("filter", "")))
107
+ span.set_attribute(SemanticConvetion.DB_NAMESPACE,
108
+ str(kwargs.get("namespace", "")))
109
+
110
+ elif gen_ai_endpoint == "pinecone.update":
111
+ db_operation = SemanticConvetion.DB_OPERATION_UPDATE
112
+ span.set_attribute(SemanticConvetion.DB_OPERATION,
113
+ SemanticConvetion.DB_OPERATION_UPDATE)
114
+ span.set_attribute(SemanticConvetion.DB_UPDATE_ID,
115
+ kwargs.get("id",""))
116
+ span.set_attribute(SemanticConvetion.DB_UPDATE_VALUES,
117
+ str(kwargs.get("values",[])))
118
+ span.set_attribute(SemanticConvetion.DB_NAMESPACE,
119
+ str(kwargs.get("namespace", "")))
120
+ span.set_attribute(SemanticConvetion.DB_UPDATE_METADATA,
121
+ str(kwargs.get("set_metadata", "")))
122
+
123
+ elif gen_ai_endpoint == "pinecone.upsert":
124
+ db_operation = SemanticConvetion.DB_OPERATION_UPSERT
125
+ span.set_attribute(SemanticConvetion.DB_OPERATION,
126
+ SemanticConvetion.DB_OPERATION_UPSERT)
127
+ span.set_attribute(SemanticConvetion.DB_VECTOR_COUNT,
128
+ object_count(kwargs.get("vectors")))
129
+
130
+ elif gen_ai_endpoint == "pinecone.delete":
131
+ db_operation = SemanticConvetion.DB_OPERATION_DELETE
132
+ span.set_attribute(SemanticConvetion.DB_OPERATION,
133
+ SemanticConvetion.DB_OPERATION_DELETE)
134
+ span.set_attribute(SemanticConvetion.DB_ID_COUNT,
135
+ object_count(kwargs.get("ids")))
136
+ span.set_attribute(SemanticConvetion.DB_FILTER,
137
+ str(kwargs.get("filter", "")))
138
+ span.set_attribute(SemanticConvetion.DB_DELETE_ALL,
139
+ kwargs.get("delete_all", False))
140
+ span.set_attribute(SemanticConvetion.DB_NAMESPACE,
141
+ kwargs.get("namespace", ""))
142
+
143
+ span.set_status(Status(StatusCode.OK))
144
+
145
+ if disable_metrics is False:
146
+ attributes = {
147
+ TELEMETRY_SDK_NAME:
148
+ "openlit",
149
+ SemanticConvetion.GEN_AI_APPLICATION_NAME:
150
+ application_name,
151
+ SemanticConvetion.DB_SYSTEM:
152
+ SemanticConvetion.DB_SYSTEM_PINECONE,
153
+ SemanticConvetion.GEN_AI_ENVIRONMENT:
154
+ environment,
155
+ SemanticConvetion.GEN_AI_TYPE:
156
+ SemanticConvetion.GEN_AI_TYPE_VECTORDB,
157
+ SemanticConvetion.DB_OPERATION:
158
+ db_operation
159
+ }
160
+
161
+ metrics["db_requests"].add(1, attributes)
162
+
163
+ # Return original response
164
+ return response
165
+
166
+ except Exception as e:
167
+ handle_exception(span, e)
168
+ logger.error("Error in trace creation: %s", e)
169
+
170
+ # Return original response
171
+ return response
172
+
173
+ return wrapper
@@ -0,0 +1,37 @@
1
+ # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
+ """Initializer of Auto Instrumentation of HuggingFace Transformer Functions"""
3
+ from typing import Collection
4
+ import importlib.metadata
5
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
6
+ from wrapt import wrap_function_wrapper
7
+
8
+ from openlit.instrumentation.transformers.transformers import text_wrap
9
+
10
+ _instruments = ("transformers >= 4.39.3",)
11
+
12
+ class TransformersInstrumentor(BaseInstrumentor):
13
+ """An instrumentor for HuggingFace Transformer Functions."""
14
+
15
+ def instrumentation_dependencies(self) -> Collection[str]:
16
+ return _instruments
17
+
18
+ def _instrument(self, **kwargs):
19
+ application_name = kwargs.get("application_name")
20
+ environment = kwargs.get("environment")
21
+ tracer = kwargs.get("tracer")
22
+ metrics = kwargs.get("metrics_dict")
23
+ pricing_info = kwargs.get("pricing_info")
24
+ trace_content = kwargs.get("trace_content")
25
+ disable_metrics = kwargs.get("disable_metrics")
26
+ version = importlib.metadata.version("transformers")
27
+
28
+ wrap_function_wrapper(
29
+ "transformers.pipelines",
30
+ "TextGenerationPipeline.__call__",
31
+ text_wrap("huggingface.text_generation", version, environment, application_name,
32
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
33
+ )
34
+
35
+ @staticmethod
36
+ def _uninstrument(self, **kwargs):
37
+ pass
@@ -0,0 +1,156 @@
1
+ # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument
2
+ """
3
+ Module for monitoring ChromaDB.
4
+ """
5
+
6
+ import logging
7
+ from opentelemetry.trace import SpanKind, Status, StatusCode
8
+ from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
+ from openlit.__helpers import handle_exception, general_tokens
10
+ from openlit.semcov import SemanticConvetion
11
+
12
+ # Initialize logger for logging potential issues and operations
13
+ logger = logging.getLogger(__name__)
14
+
15
+ def text_wrap(gen_ai_endpoint, version, environment, application_name,
16
+ tracer, pricing_info, trace_content, metrics, disable_metrics):
17
+ """
18
+ Creates a wrapper around a function call to trace and log its execution metrics.
19
+
20
+ This function wraps any given function to measure its execution time,
21
+ log its operation, and trace its execution using OpenTelemetry.
22
+
23
+ Parameters:
24
+ - gen_ai_endpoint (str): A descriptor or name for the endpoint being traced.
25
+ - version (str): The version of the Langchain application.
26
+ - environment (str): The deployment environment (e.g., 'production', 'development').
27
+ - application_name (str): Name of the Langchain application.
28
+ - tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
29
+ - pricing_info (dict): Information about the pricing for internal metrics (currently not used).
30
+ - trace_content (bool): Flag indicating whether to trace the content of the response.
31
+
32
+ Returns:
33
+ - function: A higher-order function that takes a function 'wrapped' and returns
34
+ a new function that wraps 'wrapped' with additional tracing and logging.
35
+ """
36
+
37
+ def wrapper(wrapped, instance, args, kwargs):
38
+ """
39
+ An inner wrapper function that executes the wrapped function, measures execution
40
+ time, and records trace data using OpenTelemetry.
41
+
42
+ Parameters:
43
+ - wrapped (Callable): The original function that this wrapper will execute.
44
+ - instance (object): The instance to which the wrapped function belongs. This
45
+ is used for instance methods. For static and classmethods,
46
+ this may be None.
47
+ - args (tuple): Positional arguments passed to the wrapped function.
48
+ - kwargs (dict): Keyword arguments passed to the wrapped function.
49
+
50
+ Returns:
51
+ - The result of the wrapped function call.
52
+
53
+ The wrapper initiates a span with the provided tracer, sets various attributes
54
+ on the span based on the function's execution and response, and ensures
55
+ errors are handled and logged appropriately.
56
+ """
57
+
58
+ with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
59
+ response = wrapped(*args, **kwargs)
60
+
61
+ # pylint: disable=protected-access
62
+ forward_params = instance._forward_params
63
+
64
+ try:
65
+ if args and len(args) > 0:
66
+ prompt = args[0]
67
+ else:
68
+ prompt = kwargs.get("args", "")
69
+
70
+ prompt_tokens = general_tokens(prompt)
71
+
72
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
73
+ span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
74
+ gen_ai_endpoint)
75
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
76
+ SemanticConvetion.GEN_AI_SYSTEM_HUGGING_FACE)
77
+ span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
78
+ environment)
79
+ span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
80
+ application_name)
81
+ span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
82
+ SemanticConvetion.GEN_AI_TYPE_CHAT)
83
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
84
+ instance.model.config.name_or_path)
85
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
86
+ forward_params.get("temperature"))
87
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
88
+ forward_params.get("top_p"))
89
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
90
+ forward_params.get("max_length"))
91
+ span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
92
+ prompt)
93
+ if trace_content:
94
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
95
+ prompt_tokens)
96
+
97
+ i = 0
98
+ completion_tokens = 0
99
+ for completion in response:
100
+ if len(response) > 1:
101
+ attribute_name = f"gen_ai.content.completion.{i}"
102
+ else:
103
+ attribute_name = SemanticConvetion.GEN_AI_CONTENT_COMPLETION
104
+ if i == 0:
105
+ if trace_content:
106
+ span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION,
107
+ completion["generated_text"])
108
+ completion_tokens += general_tokens(completion["generated_text"])
109
+ else:
110
+ if trace_content:
111
+ span.set_attribute(attribute_name,
112
+ completion["generated_text"])
113
+ completion_tokens += general_tokens(completion["generated_text"])
114
+ i=i+1
115
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
116
+ completion_tokens)
117
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
118
+ prompt_tokens + completion_tokens)
119
+ span.set_status(Status(StatusCode.OK))
120
+
121
+ if disable_metrics is False:
122
+ attributes = {
123
+ TELEMETRY_SDK_NAME:
124
+ "openlit",
125
+ SemanticConvetion.GEN_AI_APPLICATION_NAME:
126
+ application_name,
127
+ SemanticConvetion.GEN_AI_SYSTEM:
128
+ SemanticConvetion.GEN_AI_SYSTEM_HUGGING_FACE,
129
+ SemanticConvetion.GEN_AI_ENVIRONMENT:
130
+ environment,
131
+ SemanticConvetion.GEN_AI_TYPE:
132
+ SemanticConvetion.GEN_AI_TYPE_CHAT,
133
+ SemanticConvetion.GEN_AI_REQUEST_MODEL:
134
+ instance.model.config.name_or_path
135
+ }
136
+
137
+ metrics["genai_requests"].add(1, attributes)
138
+ metrics["genai_total_tokens"].add(
139
+ prompt_tokens +
140
+ completion_tokens, attributes)
141
+ metrics["genai_completion_tokens"].add(
142
+ completion_tokens, attributes)
143
+ metrics["genai_prompt_tokens"].add(
144
+ prompt_tokens, attributes)
145
+
146
+ # Return original response
147
+ return response
148
+
149
+ except Exception as e:
150
+ handle_exception(span, e)
151
+ logger.error("Error in trace creation: %s", e)
152
+
153
+ # Return original response
154
+ return response
155
+
156
+ return wrapper
@@ -0,0 +1,109 @@
1
+ # pylint: disable=duplicate-code, line-too-long
2
+ """
3
+ Setups up OpenTelemetry Meter
4
+ """
5
+ import os
6
+ from opentelemetry import metrics
7
+ from opentelemetry.sdk.metrics import MeterProvider
8
+ from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader, ConsoleMetricExporter
9
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
10
+ from opentelemetry.sdk.resources import Resource
11
+ from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
12
+
13
+ from openlit.semcov import SemanticConvetion
14
+
15
+ # Global flag to check if the meter provider initialization is complete.
16
+ METER_SET = False
17
+
18
+ def setup_meter(application_name, environment, meter, otlp_endpoint, otlp_headers):
19
+ """
20
+ Sets up OpenTelemetry metrics with a counter for total requests.
21
+
22
+ Params:
23
+ application_name (str): The name of the application for which metrics are being collected.
24
+ otlp_endpoint (str): The OTLP exporter endpoint for metrics.
25
+ otlp_headers (dict): Headers for the OTLP request.
26
+
27
+ Returns:
28
+ A dictionary containing the meter and created metrics for easy access.
29
+ """
30
+
31
+ # pylint: disable=global-statement
32
+ global METER_SET
33
+
34
+ try:
35
+ if meter is None and not METER_SET:
36
+ # Create a resource with the service name attribute.
37
+ resource = Resource(attributes={
38
+ SERVICE_NAME: application_name,
39
+ DEPLOYMENT_ENVIRONMENT: environment,
40
+ TELEMETRY_SDK_NAME: "openlit"}
41
+ )
42
+
43
+ # Only set environment variables if you have a non-None value.
44
+ if otlp_endpoint is not None:
45
+ os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = otlp_endpoint
46
+
47
+ if otlp_headers is not None:
48
+ if isinstance(otlp_headers, dict):
49
+ headers_str = ','.join(f"{key}={value}" for key, value in otlp_headers.items())
50
+ else:
51
+ headers_str = otlp_headers
52
+ # Now, we have either converted the dict to a string or used the provided string.
53
+ os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = headers_str
54
+
55
+ # Configure the span exporter and processor based on whether the endpoint is effectively set.
56
+ if os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT"):
57
+ metric_exporter = OTLPMetricExporter()
58
+ else:
59
+ metric_exporter = ConsoleMetricExporter()
60
+
61
+ metric_reader = PeriodicExportingMetricReader(metric_exporter)
62
+
63
+ meter_provider = MeterProvider(resource=resource, metric_readers=[metric_reader])
64
+
65
+ metrics.set_meter_provider(meter_provider)
66
+
67
+ meter = metrics.get_meter(__name__, version="0.1.0")
68
+
69
+ METER_SET = True
70
+
71
+ # Define and create the metrics
72
+ metrics_dict = {
73
+ "genai_requests": meter.create_counter(
74
+ name=SemanticConvetion.GEN_AI_REQUESTS,
75
+ description="Number of requests to OpenAI",
76
+ unit="1",
77
+ ),
78
+ "genai_prompt_tokens": meter.create_counter(
79
+ name=SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
80
+ description="Number of prompt tokens processed.",
81
+ unit="1",
82
+ ),
83
+ "genai_completion_tokens": meter.create_counter(
84
+ name=SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
85
+ description="Number of completion tokens processed.",
86
+ unit="1",
87
+ ),
88
+ "genai_total_tokens": meter.create_counter(
89
+ name=SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
90
+ description="Number of total tokens processed.",
91
+ unit="1",
92
+ ),
93
+ "genai_cost": meter.create_histogram(
94
+ name=SemanticConvetion.GEN_AI_USAGE_COST,
95
+ description="The distribution of OpenAI request costs.",
96
+ unit="USD",
97
+ ),
98
+ "db_requests": meter.create_counter(
99
+ name=SemanticConvetion.DB_REQUESTS,
100
+ description="Number of requests to VectorDBs",
101
+ unit="1",
102
+ ),
103
+ }
104
+
105
+ return metrics_dict
106
+
107
+ # pylint: disable=bare-except
108
+ except:
109
+ return None
@@ -0,0 +1,83 @@
1
+ # pylint: disable=duplicate-code, line-too-long
2
+ """
3
+ Setups up OpenTelemetry tracer
4
+ """
5
+
6
+ import os
7
+ from opentelemetry import trace
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
+ from opentelemetry.sdk.resources import Resource
10
+ from opentelemetry.sdk.trace import TracerProvider
11
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor, SimpleSpanProcessor
12
+ from opentelemetry.sdk.trace.export import ConsoleSpanExporter
13
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
14
+
15
+
16
+ # Global flag to check if the tracer provider initialization is complete.
17
+ TRACER_SET = False
18
+
19
+ def setup_tracing(application_name, environment, tracer, otlp_endpoint, otlp_headers, disable_batch):
20
+ """
21
+ Sets up tracing with OpenTelemetry. Initializes the tracer provider and configures the span processor and exporter.
22
+
23
+ Params:
24
+ application_name (str): The name of the application to be used in traces.
25
+ tracer (Tracer): Optional custom tracer. If provided, it is immediately returned and no setup is performed.
26
+ otlp_endpoint (str): The OTLP exporter endpoint. Falls back to the OTEL_EXPORTER_OTLP_ENDPOINT environment variable if not specified.
27
+ otlp_headers (dict): Headers for the OTLP request. Falls back to the OTEL_EXPORTER_OTLP_HEADERS environment variable if not specified.
28
+ disable_batch (bool): Flag to disable the batch span processor in favor of a simpler processor for exporting.
29
+
30
+ Returns:
31
+ The provided custom tracer if not None; otherwise, a tracer instance configured according to the given parameters or environment variables.
32
+ """
33
+
34
+ # If an external tracer is provided, return it immediately.
35
+ if tracer is not None:
36
+ return tracer
37
+
38
+ # Proceed with setting up a new tracer or configuration only if TRACER_SET is False.
39
+ # pylint: disable=global-statement
40
+ global TRACER_SET
41
+
42
+ try:
43
+ if not TRACER_SET:
44
+ # Create a resource with the service name attribute.
45
+ resource = Resource(attributes={
46
+ SERVICE_NAME: application_name,
47
+ DEPLOYMENT_ENVIRONMENT: environment,
48
+ TELEMETRY_SDK_NAME: "openlit"}
49
+ )
50
+
51
+ # Initialize the TracerProvider with the created resource.
52
+ trace.set_tracer_provider(TracerProvider(resource=resource))
53
+
54
+ # Only set environment variables if you have a non-None value.
55
+ if otlp_endpoint is not None:
56
+ os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = otlp_endpoint
57
+
58
+ if otlp_headers is not None:
59
+ if isinstance(otlp_headers, dict):
60
+ headers_str = ','.join(f"{key}={value}" for key, value in otlp_headers.items())
61
+ else:
62
+ headers_str = otlp_headers
63
+
64
+ os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = headers_str
65
+
66
+ # Configure the span exporter and processor based on whether the endpoint is effectively set.
67
+ if os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT"):
68
+ span_exporter = OTLPSpanExporter()
69
+
70
+ span_processor = BatchSpanProcessor(span_exporter) if not disable_batch else SimpleSpanProcessor(span_exporter)
71
+ else:
72
+ span_exporter = ConsoleSpanExporter()
73
+ span_processor = SimpleSpanProcessor(span_exporter)
74
+
75
+ trace.get_tracer_provider().add_span_processor(span_processor)
76
+
77
+ TRACER_SET = True
78
+
79
+ return trace.get_tracer(__name__)
80
+
81
+ # pylint: disable=bare-except
82
+ except:
83
+ return None