openlit 1.30.0__tar.gz → 1.30.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {openlit-1.30.0 → openlit-1.30.2}/PKG-INFO +1 -1
- {openlit-1.30.0 → openlit-1.30.2}/pyproject.toml +1 -1
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/__init__.py +2 -2
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/evals/utils.py +5 -2
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/guard/utils.py +5 -1
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +0 -1
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +0 -1
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/crewai/crewai.py +4 -4
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +0 -1
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/litellm/async_litellm.py +6 -7
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/litellm/litellm.py +6 -7
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/otel/metrics.py +6 -3
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/otel/tracing.py +5 -2
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/semcov/__init__.py +3 -0
- {openlit-1.30.0 → openlit-1.30.2}/LICENSE +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/README.md +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/__helpers.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/evals/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/evals/all.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/evals/bias_detection.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/evals/hallucination.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/evals/toxicity.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/guard/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/guard/all.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/guard/prompt_injection.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/guard/restrict_topic.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/guard/sensitive_topic.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/azure_ai_inference/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/bedrock/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/bedrock/bedrock.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/chroma/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/chroma/chroma.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/cohere/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/cohere/cohere.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/crewai/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/google_ai_studio/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/gpu/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/groq/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/groq/async_groq.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/groq/groq.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/haystack/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/haystack/haystack.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/langchain/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/langchain/langchain.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/litellm/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/milvus/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/milvus/milvus.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/mistral/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/mistral/async_mistral.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/mistral/mistral.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/ollama/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/ollama/async_ollama.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/ollama/ollama.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/openai/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/openai/async_azure_openai.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/openai/async_openai.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/openai/azure_openai.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/openai/openai.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/qdrant/async_qdrant.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/transformers/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/transformers/transformers.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/vllm/__init__.py +0 -0
- {openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/vllm/vllm.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.30.
|
3
|
+
Version: 1.30.2
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "openlit"
|
3
|
-
version = "1.30.
|
3
|
+
version = "1.30.2"
|
4
4
|
description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
|
5
5
|
authors = ["OpenLIT"]
|
6
6
|
repository = "https://github.com/openlit/openlit/tree/main/openlit/python"
|
@@ -379,7 +379,7 @@ def get_prompt(url=None, name=None, api_key=None, prompt_id=None,
|
|
379
379
|
# Return the JSON response
|
380
380
|
return response.json()
|
381
381
|
except requests.RequestException as error:
|
382
|
-
|
382
|
+
logger.error("Error fetching prompt: '%s'", error)
|
383
383
|
return None
|
384
384
|
|
385
385
|
def get_secrets(url=None, api_key=None, key=None, tags=None, should_set_env=None):
|
@@ -437,7 +437,7 @@ def get_secrets(url=None, api_key=None, key=None, tags=None, should_set_env=None
|
|
437
437
|
os.environ[token] = str(value)
|
438
438
|
return vault_response
|
439
439
|
except requests.RequestException as error:
|
440
|
-
|
440
|
+
logger.error("Error fetching secrets: '%s'", error)
|
441
441
|
return None
|
442
442
|
|
443
443
|
def trace(wrapped):
|
@@ -3,15 +3,18 @@
|
|
3
3
|
|
4
4
|
import json
|
5
5
|
import os
|
6
|
+
import logging
|
6
7
|
from typing import Optional, Tuple, List
|
7
8
|
from pydantic import BaseModel
|
8
|
-
|
9
9
|
from opentelemetry.metrics import get_meter
|
10
10
|
from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
|
11
11
|
from anthropic import Anthropic
|
12
12
|
from openai import OpenAI
|
13
13
|
from openlit.semcov import SemanticConvetion
|
14
14
|
|
15
|
+
# Initialize logger for logging potential issues and operations
|
16
|
+
logger = logging.getLogger(__name__)
|
17
|
+
|
15
18
|
class JsonOutput(BaseModel):
|
16
19
|
"""
|
17
20
|
A model representing the structure of JSON output for prompt injection detection.
|
@@ -216,7 +219,7 @@ def parse_llm_response(response) -> JsonOutput:
|
|
216
219
|
|
217
220
|
return JsonOutput(**data)
|
218
221
|
except (json.JSONDecodeError, TypeError) as e:
|
219
|
-
|
222
|
+
logger.error("Error parsing LLM response: '%s'", e)
|
220
223
|
return JsonOutput(score=0, classification="none", explanation="none",
|
221
224
|
verdict="no", evaluation="none")
|
222
225
|
|
@@ -4,6 +4,7 @@
|
|
4
4
|
import re
|
5
5
|
import json
|
6
6
|
import os
|
7
|
+
import logging
|
7
8
|
from typing import Optional, Tuple
|
8
9
|
from pydantic import BaseModel
|
9
10
|
from opentelemetry.metrics import get_meter
|
@@ -12,6 +13,9 @@ from anthropic import Anthropic
|
|
12
13
|
from openai import OpenAI
|
13
14
|
from openlit.semcov import SemanticConvetion
|
14
15
|
|
16
|
+
# Initialize logger for logging potential issues and operations
|
17
|
+
logger = logging.getLogger(__name__)
|
18
|
+
|
15
19
|
class JsonOutput(BaseModel):
|
16
20
|
"""
|
17
21
|
A model representing the structure of JSON output for prompt injection detection.
|
@@ -158,7 +162,7 @@ def parse_llm_response(response) -> JsonOutput:
|
|
158
162
|
|
159
163
|
return JsonOutput(**data)
|
160
164
|
except (json.JSONDecodeError, TypeError) as e:
|
161
|
-
|
165
|
+
logger.error("Error parsing LLM response: '%s'", e)
|
162
166
|
return JsonOutput(score=0, classification="none", explanation="none",
|
163
167
|
verdict="none", guard="none")
|
164
168
|
|
@@ -195,7 +195,6 @@ def async_complete(gen_ai_endpoint, version, environment, application_name,
|
|
195
195
|
with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
|
196
196
|
response = await wrapped(*args, **kwargs)
|
197
197
|
|
198
|
-
# print(instance._system_instruction.__dict__["_pb"].parts[0].text)
|
199
198
|
try:
|
200
199
|
# Format 'messages' into a single string
|
201
200
|
message_prompt = kwargs.get("messages", "")
|
@@ -195,7 +195,6 @@ def complete(gen_ai_endpoint, version, environment, application_name,
|
|
195
195
|
with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
|
196
196
|
response = wrapped(*args, **kwargs)
|
197
197
|
|
198
|
-
# print(instance._system_instruction.__dict__["_pb"].parts[0].text)
|
199
198
|
try:
|
200
199
|
# Format 'messages' into a single string
|
201
200
|
message_prompt = kwargs.get("messages", "")
|
@@ -36,9 +36,9 @@ def crew_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
36
36
|
gen_ai_endpoint: Endpoint identifier for logging and tracing.
|
37
37
|
version: Version of the monitoring package.
|
38
38
|
environment: Deployment environment (e.g., production, staging).
|
39
|
-
application_name: Name of the application using the
|
39
|
+
application_name: Name of the application using the CrewAI Agent.
|
40
40
|
tracer: OpenTelemetry tracer for creating spans.
|
41
|
-
pricing_info: Information used for calculating the cost of
|
41
|
+
pricing_info: Information used for calculating the cost of CrewAI usage.
|
42
42
|
trace_content: Flag indicating whether to trace the actual content.
|
43
43
|
|
44
44
|
Returns:
|
@@ -70,9 +70,9 @@ def crew_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
70
70
|
# Set base span attribues
|
71
71
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
72
72
|
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
73
|
-
SemanticConvetion.
|
73
|
+
SemanticConvetion.GEN_AI_SYSTEM_CREWAI)
|
74
74
|
span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
75
|
-
SemanticConvetion.
|
75
|
+
SemanticConvetion.GEN_AI_TYPE_AGENT)
|
76
76
|
span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
77
77
|
gen_ai_endpoint)
|
78
78
|
|
{openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py
RENAMED
@@ -164,7 +164,6 @@ def generate(gen_ai_endpoint, version, environment, application_name,
|
|
164
164
|
with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
|
165
165
|
response = wrapped(*args, **kwargs)
|
166
166
|
|
167
|
-
# print(instance._system_instruction.__dict__["_pb"].parts[0].text)
|
168
167
|
try:
|
169
168
|
prompt = ""
|
170
169
|
for arg in args:
|
@@ -26,9 +26,9 @@ def acompletion(gen_ai_endpoint, version, environment, application_name,
|
|
26
26
|
gen_ai_endpoint: Endpoint identifier for logging and tracing.
|
27
27
|
version: Version of the monitoring package.
|
28
28
|
environment: Deployment environment (e.g., production, staging).
|
29
|
-
application_name: Name of the application using the
|
29
|
+
application_name: Name of the application using the LiteLLM SDK.
|
30
30
|
tracer: OpenTelemetry tracer for creating spans.
|
31
|
-
pricing_info: Information used for calculating the cost of
|
31
|
+
pricing_info: Information used for calculating the cost of LiteLLM usage.
|
32
32
|
trace_content: Flag indicating whether to trace the actual content.
|
33
33
|
|
34
34
|
Returns:
|
@@ -38,7 +38,6 @@ def acompletion(gen_ai_endpoint, version, environment, application_name,
|
|
38
38
|
class TracedAsyncStream:
|
39
39
|
"""
|
40
40
|
Wrapper for streaming responses to collect metrics and trace data.
|
41
|
-
Wraps the 'openai.AsyncStream' response to collect message IDs and aggregated response.
|
42
41
|
|
43
42
|
This class implements the '__aiter__' and '__anext__' methods that
|
44
43
|
handle asynchronous streaming responses.
|
@@ -125,7 +124,7 @@ def acompletion(gen_ai_endpoint, version, environment, application_name,
|
|
125
124
|
# Set Span attributes
|
126
125
|
self._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
127
126
|
self._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
128
|
-
SemanticConvetion.
|
127
|
+
SemanticConvetion.GEN_AI_SYSTEM_LITELLM)
|
129
128
|
self._span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
130
129
|
SemanticConvetion.GEN_AI_TYPE_CHAT)
|
131
130
|
self._span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
@@ -185,7 +184,7 @@ def acompletion(gen_ai_endpoint, version, environment, application_name,
|
|
185
184
|
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
186
185
|
application_name,
|
187
186
|
SemanticConvetion.GEN_AI_SYSTEM:
|
188
|
-
SemanticConvetion.
|
187
|
+
SemanticConvetion.GEN_AI_SYSTEM_LITELLM,
|
189
188
|
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
190
189
|
environment,
|
191
190
|
SemanticConvetion.GEN_AI_TYPE:
|
@@ -268,7 +267,7 @@ def acompletion(gen_ai_endpoint, version, environment, application_name,
|
|
268
267
|
# Set base span attribues
|
269
268
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
270
269
|
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
271
|
-
SemanticConvetion.
|
270
|
+
SemanticConvetion.GEN_AI_SYSTEM_LITELLM)
|
272
271
|
span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
273
272
|
SemanticConvetion.GEN_AI_TYPE_CHAT)
|
274
273
|
span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
@@ -379,7 +378,7 @@ def acompletion(gen_ai_endpoint, version, environment, application_name,
|
|
379
378
|
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
380
379
|
application_name,
|
381
380
|
SemanticConvetion.GEN_AI_SYSTEM:
|
382
|
-
SemanticConvetion.
|
381
|
+
SemanticConvetion.GEN_AI_SYSTEM_LITELLM,
|
383
382
|
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
384
383
|
environment,
|
385
384
|
SemanticConvetion.GEN_AI_TYPE:
|
@@ -26,9 +26,9 @@ def completion(gen_ai_endpoint, version, environment, application_name,
|
|
26
26
|
gen_ai_endpoint: Endpoint identifier for logging and tracing.
|
27
27
|
version: Version of the monitoring package.
|
28
28
|
environment: Deployment environment (e.g., production, staging).
|
29
|
-
application_name: Name of the application using the
|
29
|
+
application_name: Name of the application using the LiteLLM SDK.
|
30
30
|
tracer: OpenTelemetry tracer for creating spans.
|
31
|
-
pricing_info: Information used for calculating the cost of
|
31
|
+
pricing_info: Information used for calculating the cost of LiteLLM usage.
|
32
32
|
trace_content: Flag indicating whether to trace the actual content.
|
33
33
|
|
34
34
|
Returns:
|
@@ -38,7 +38,6 @@ def completion(gen_ai_endpoint, version, environment, application_name,
|
|
38
38
|
class TracedSyncStream:
|
39
39
|
"""
|
40
40
|
Wrapper for streaming responses to collect metrics and trace data.
|
41
|
-
Wraps the 'openai.AsyncStream' response to collect message IDs and aggregated response.
|
42
41
|
|
43
42
|
This class implements the '__aiter__' and '__anext__' methods that
|
44
43
|
handle asynchronous streaming responses.
|
@@ -125,7 +124,7 @@ def completion(gen_ai_endpoint, version, environment, application_name,
|
|
125
124
|
# Set Span attributes
|
126
125
|
self._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
127
126
|
self._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
128
|
-
SemanticConvetion.
|
127
|
+
SemanticConvetion.GEN_AI_SYSTEM_LITELLM)
|
129
128
|
self._span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
130
129
|
SemanticConvetion.GEN_AI_TYPE_CHAT)
|
131
130
|
self._span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
@@ -185,7 +184,7 @@ def completion(gen_ai_endpoint, version, environment, application_name,
|
|
185
184
|
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
186
185
|
application_name,
|
187
186
|
SemanticConvetion.GEN_AI_SYSTEM:
|
188
|
-
SemanticConvetion.
|
187
|
+
SemanticConvetion.GEN_AI_SYSTEM_LITELLM,
|
189
188
|
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
190
189
|
environment,
|
191
190
|
SemanticConvetion.GEN_AI_TYPE:
|
@@ -268,7 +267,7 @@ def completion(gen_ai_endpoint, version, environment, application_name,
|
|
268
267
|
# Set base span attribues
|
269
268
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
270
269
|
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
271
|
-
SemanticConvetion.
|
270
|
+
SemanticConvetion.GEN_AI_SYSTEM_LITELLM)
|
272
271
|
span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
273
272
|
SemanticConvetion.GEN_AI_TYPE_CHAT)
|
274
273
|
span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
@@ -379,7 +378,7 @@ def completion(gen_ai_endpoint, version, environment, application_name,
|
|
379
378
|
SemanticConvetion.GEN_AI_APPLICATION_NAME:
|
380
379
|
application_name,
|
381
380
|
SemanticConvetion.GEN_AI_SYSTEM:
|
382
|
-
SemanticConvetion.
|
381
|
+
SemanticConvetion.GEN_AI_SYSTEM_LITELLM,
|
383
382
|
SemanticConvetion.GEN_AI_ENVIRONMENT:
|
384
383
|
environment,
|
385
384
|
SemanticConvetion.GEN_AI_TYPE:
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# pylint: disable=duplicate-code, line-too-long
|
1
|
+
# pylint: disable=duplicate-code, line-too-long, ungrouped-imports
|
2
2
|
"""
|
3
3
|
Setups up OpenTelemetry Meter
|
4
4
|
"""
|
@@ -8,10 +8,13 @@ from opentelemetry.sdk.metrics import MeterProvider
|
|
8
8
|
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader, ConsoleMetricExporter
|
9
9
|
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
10
10
|
from opentelemetry.sdk.resources import Resource
|
11
|
-
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
|
12
|
-
|
13
11
|
from openlit.semcov import SemanticConvetion
|
14
12
|
|
13
|
+
if os.environ.get("OTEL_EXPORTER_OTLP_PROTOCOL") == "grpc":
|
14
|
+
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter
|
15
|
+
else:
|
16
|
+
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
|
17
|
+
|
15
18
|
# Global flag to check if the meter provider initialization is complete.
|
16
19
|
METER_SET = False
|
17
20
|
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# pylint: disable=duplicate-code, line-too-long
|
1
|
+
# pylint: disable=duplicate-code, line-too-long, ungrouped-imports
|
2
2
|
"""
|
3
3
|
Setups up OpenTelemetry tracer
|
4
4
|
"""
|
@@ -10,8 +10,11 @@ from opentelemetry.sdk.resources import Resource
|
|
10
10
|
from opentelemetry.sdk.trace import TracerProvider
|
11
11
|
from opentelemetry.sdk.trace.export import BatchSpanProcessor, SimpleSpanProcessor
|
12
12
|
from opentelemetry.sdk.trace.export import ConsoleSpanExporter
|
13
|
-
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
14
13
|
|
14
|
+
if os.environ.get("OTEL_EXPORTER_OTLP_PROTOCOL") == "grpc":
|
15
|
+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
|
16
|
+
else:
|
17
|
+
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
15
18
|
|
16
19
|
# Global flag to check if the tracer provider initialization is complete.
|
17
20
|
TRACER_SET = False
|
@@ -88,6 +88,7 @@ class SemanticConvetion:
|
|
88
88
|
GEN_AI_TYPE_FINETUNING = "fine_tuning"
|
89
89
|
GEN_AI_TYPE_VECTORDB = "vectordb"
|
90
90
|
GEN_AI_TYPE_FRAMEWORK = "framework"
|
91
|
+
GEN_AI_TYPE_AGENT = "agent"
|
91
92
|
|
92
93
|
GEN_AI_SYSTEM_HUGGING_FACE = "huggingface"
|
93
94
|
GEN_AI_SYSTEM_OPENAI = "openai"
|
@@ -108,6 +109,8 @@ class SemanticConvetion:
|
|
108
109
|
GEN_AI_SYSTEM_LLAMAINDEX = "llama_index"
|
109
110
|
GEN_AI_SYSTEM_HAYSTACK = "haystack"
|
110
111
|
GEN_AI_SYSTEM_EMBEDCHAIN = "embedchain"
|
112
|
+
GEN_AI_SYSTEM_LITELLM = "litellm"
|
113
|
+
GEN_AI_SYSTEM_CREWAI = "crewai"
|
111
114
|
|
112
115
|
# Vector DB
|
113
116
|
DB_REQUESTS = "db.total.requests"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/azure_ai_inference/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{openlit-1.30.0 → openlit-1.30.2}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|