openlit 1.30.1__tar.gz → 1.30.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. {openlit-1.30.1 → openlit-1.30.2}/PKG-INFO +1 -1
  2. {openlit-1.30.1 → openlit-1.30.2}/pyproject.toml +1 -1
  3. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/__init__.py +2 -2
  4. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/evals/utils.py +5 -2
  5. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/guard/utils.py +5 -1
  6. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +0 -1
  7. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +0 -1
  8. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +0 -1
  9. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/otel/metrics.py +6 -3
  10. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/otel/tracing.py +5 -2
  11. {openlit-1.30.1 → openlit-1.30.2}/LICENSE +0 -0
  12. {openlit-1.30.1 → openlit-1.30.2}/README.md +0 -0
  13. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/__helpers.py +0 -0
  14. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/evals/__init__.py +0 -0
  15. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/evals/all.py +0 -0
  16. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/evals/bias_detection.py +0 -0
  17. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/evals/hallucination.py +0 -0
  18. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/evals/toxicity.py +0 -0
  19. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/guard/__init__.py +0 -0
  20. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/guard/all.py +0 -0
  21. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/guard/prompt_injection.py +0 -0
  22. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/guard/restrict_topic.py +0 -0
  23. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/guard/sensitive_topic.py +0 -0
  24. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
  25. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
  26. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
  27. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/azure_ai_inference/__init__.py +0 -0
  28. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/bedrock/__init__.py +0 -0
  29. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/bedrock/bedrock.py +0 -0
  30. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/chroma/__init__.py +0 -0
  31. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/chroma/chroma.py +0 -0
  32. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/cohere/__init__.py +0 -0
  33. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/cohere/cohere.py +0 -0
  34. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/crewai/__init__.py +0 -0
  35. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/crewai/crewai.py +0 -0
  36. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
  37. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
  38. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
  39. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
  40. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
  41. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/google_ai_studio/__init__.py +0 -0
  42. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +0 -0
  43. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
  44. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
  45. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/gpu/__init__.py +0 -0
  46. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/groq/__init__.py +0 -0
  47. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/groq/async_groq.py +0 -0
  48. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/groq/groq.py +0 -0
  49. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/haystack/__init__.py +0 -0
  50. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/haystack/haystack.py +0 -0
  51. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/langchain/__init__.py +0 -0
  52. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/langchain/langchain.py +0 -0
  53. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/litellm/__init__.py +0 -0
  54. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/litellm/async_litellm.py +0 -0
  55. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/litellm/litellm.py +0 -0
  56. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
  57. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
  58. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/milvus/__init__.py +0 -0
  59. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/milvus/milvus.py +0 -0
  60. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/mistral/__init__.py +0 -0
  61. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/mistral/async_mistral.py +0 -0
  62. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/mistral/mistral.py +0 -0
  63. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/ollama/__init__.py +0 -0
  64. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/ollama/async_ollama.py +0 -0
  65. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/ollama/ollama.py +0 -0
  66. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/openai/__init__.py +0 -0
  67. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/openai/async_azure_openai.py +0 -0
  68. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/openai/async_openai.py +0 -0
  69. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/openai/azure_openai.py +0 -0
  70. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/openai/openai.py +0 -0
  71. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
  72. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
  73. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
  74. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/qdrant/async_qdrant.py +0 -0
  75. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
  76. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/transformers/__init__.py +0 -0
  77. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/transformers/transformers.py +0 -0
  78. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
  79. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
  80. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
  81. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/vllm/__init__.py +0 -0
  82. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/instrumentation/vllm/vllm.py +0 -0
  83. {openlit-1.30.1 → openlit-1.30.2}/src/openlit/semcov/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: openlit
3
- Version: 1.30.1
3
+ Version: 1.30.2
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "openlit"
3
- version = "1.30.1"
3
+ version = "1.30.2"
4
4
  description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
5
5
  authors = ["OpenLIT"]
6
6
  repository = "https://github.com/openlit/openlit/tree/main/openlit/python"
@@ -379,7 +379,7 @@ def get_prompt(url=None, name=None, api_key=None, prompt_id=None,
379
379
  # Return the JSON response
380
380
  return response.json()
381
381
  except requests.RequestException as error:
382
- print(f"Error fetching prompt: {error}")
382
+ logger.error("Error fetching prompt: '%s'", error)
383
383
  return None
384
384
 
385
385
  def get_secrets(url=None, api_key=None, key=None, tags=None, should_set_env=None):
@@ -437,7 +437,7 @@ def get_secrets(url=None, api_key=None, key=None, tags=None, should_set_env=None
437
437
  os.environ[token] = str(value)
438
438
  return vault_response
439
439
  except requests.RequestException as error:
440
- print(f"Error fetching secrets: {error}")
440
+ logger.error("Error fetching secrets: '%s'", error)
441
441
  return None
442
442
 
443
443
  def trace(wrapped):
@@ -3,15 +3,18 @@
3
3
 
4
4
  import json
5
5
  import os
6
+ import logging
6
7
  from typing import Optional, Tuple, List
7
8
  from pydantic import BaseModel
8
-
9
9
  from opentelemetry.metrics import get_meter
10
10
  from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
11
11
  from anthropic import Anthropic
12
12
  from openai import OpenAI
13
13
  from openlit.semcov import SemanticConvetion
14
14
 
15
+ # Initialize logger for logging potential issues and operations
16
+ logger = logging.getLogger(__name__)
17
+
15
18
  class JsonOutput(BaseModel):
16
19
  """
17
20
  A model representing the structure of JSON output for prompt injection detection.
@@ -216,7 +219,7 @@ def parse_llm_response(response) -> JsonOutput:
216
219
 
217
220
  return JsonOutput(**data)
218
221
  except (json.JSONDecodeError, TypeError) as e:
219
- print(f"Error parsing LLM response: {e}")
222
+ logger.error("Error parsing LLM response: '%s'", e)
220
223
  return JsonOutput(score=0, classification="none", explanation="none",
221
224
  verdict="no", evaluation="none")
222
225
 
@@ -4,6 +4,7 @@
4
4
  import re
5
5
  import json
6
6
  import os
7
+ import logging
7
8
  from typing import Optional, Tuple
8
9
  from pydantic import BaseModel
9
10
  from opentelemetry.metrics import get_meter
@@ -12,6 +13,9 @@ from anthropic import Anthropic
12
13
  from openai import OpenAI
13
14
  from openlit.semcov import SemanticConvetion
14
15
 
16
+ # Initialize logger for logging potential issues and operations
17
+ logger = logging.getLogger(__name__)
18
+
15
19
  class JsonOutput(BaseModel):
16
20
  """
17
21
  A model representing the structure of JSON output for prompt injection detection.
@@ -158,7 +162,7 @@ def parse_llm_response(response) -> JsonOutput:
158
162
 
159
163
  return JsonOutput(**data)
160
164
  except (json.JSONDecodeError, TypeError) as e:
161
- print(f"Error parsing LLM response: {e}")
165
+ logger.error("Error parsing LLM response: '%s'", e)
162
166
  return JsonOutput(score=0, classification="none", explanation="none",
163
167
  verdict="none", guard="none")
164
168
 
@@ -195,7 +195,6 @@ def async_complete(gen_ai_endpoint, version, environment, application_name,
195
195
  with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
196
196
  response = await wrapped(*args, **kwargs)
197
197
 
198
- # print(instance._system_instruction.__dict__["_pb"].parts[0].text)
199
198
  try:
200
199
  # Format 'messages' into a single string
201
200
  message_prompt = kwargs.get("messages", "")
@@ -195,7 +195,6 @@ def complete(gen_ai_endpoint, version, environment, application_name,
195
195
  with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
196
196
  response = wrapped(*args, **kwargs)
197
197
 
198
- # print(instance._system_instruction.__dict__["_pb"].parts[0].text)
199
198
  try:
200
199
  # Format 'messages' into a single string
201
200
  message_prompt = kwargs.get("messages", "")
@@ -164,7 +164,6 @@ def generate(gen_ai_endpoint, version, environment, application_name,
164
164
  with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
165
165
  response = wrapped(*args, **kwargs)
166
166
 
167
- # print(instance._system_instruction.__dict__["_pb"].parts[0].text)
168
167
  try:
169
168
  prompt = ""
170
169
  for arg in args:
@@ -1,4 +1,4 @@
1
- # pylint: disable=duplicate-code, line-too-long
1
+ # pylint: disable=duplicate-code, line-too-long, ungrouped-imports
2
2
  """
3
3
  Setups up OpenTelemetry Meter
4
4
  """
@@ -8,10 +8,13 @@ from opentelemetry.sdk.metrics import MeterProvider
8
8
  from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader, ConsoleMetricExporter
9
9
  from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
10
10
  from opentelemetry.sdk.resources import Resource
11
- from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
12
-
13
11
  from openlit.semcov import SemanticConvetion
14
12
 
13
+ if os.environ.get("OTEL_EXPORTER_OTLP_PROTOCOL") == "grpc":
14
+ from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter
15
+ else:
16
+ from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
17
+
15
18
  # Global flag to check if the meter provider initialization is complete.
16
19
  METER_SET = False
17
20
 
@@ -1,4 +1,4 @@
1
- # pylint: disable=duplicate-code, line-too-long
1
+ # pylint: disable=duplicate-code, line-too-long, ungrouped-imports
2
2
  """
3
3
  Setups up OpenTelemetry tracer
4
4
  """
@@ -10,8 +10,11 @@ from opentelemetry.sdk.resources import Resource
10
10
  from opentelemetry.sdk.trace import TracerProvider
11
11
  from opentelemetry.sdk.trace.export import BatchSpanProcessor, SimpleSpanProcessor
12
12
  from opentelemetry.sdk.trace.export import ConsoleSpanExporter
13
- from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
14
13
 
14
+ if os.environ.get("OTEL_EXPORTER_OTLP_PROTOCOL") == "grpc":
15
+ from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
16
+ else:
17
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
15
18
 
16
19
  # Global flag to check if the tracer provider initialization is complete.
17
20
  TRACER_SET = False
File without changes
File without changes