openlit 1.29.1__py3-none-any.whl → 1.30.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
openlit/__helpers.py CHANGED
@@ -13,6 +13,20 @@ from opentelemetry.trace import Status, StatusCode
13
13
  # Set up logging
14
14
  logger = logging.getLogger(__name__)
15
15
 
16
+ def response_as_dict(response):
17
+ """
18
+ Return parsed response as a dict
19
+ """
20
+ # pylint: disable=no-else-return
21
+ if isinstance(response, dict):
22
+ return response
23
+ if hasattr(response, "model_dump"):
24
+ return response.model_dump()
25
+ elif hasattr(response, "parse"):
26
+ return response_as_dict(response.parse())
27
+ else:
28
+ return response
29
+
16
30
  def get_env_variable(name, arg_value, error_message):
17
31
  """
18
32
  Retrieve an environment variable if the argument is not provided
openlit/__init__.py CHANGED
@@ -46,6 +46,8 @@ from openlit.instrumentation.pinecone import PineconeInstrumentor
46
46
  from openlit.instrumentation.qdrant import QdrantInstrumentor
47
47
  from openlit.instrumentation.milvus import MilvusInstrumentor
48
48
  from openlit.instrumentation.transformers import TransformersInstrumentor
49
+ from openlit.instrumentation.litellm import LiteLLMInstrumentor
50
+ from openlit.instrumentation.crewai import CrewAIInstrumentor
49
51
  from openlit.instrumentation.gpu import GPUInstrumentor
50
52
  import openlit.guard
51
53
  import openlit.evals
@@ -228,6 +230,8 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
228
230
  "qdrant": "qdrant_client",
229
231
  "milvus": "pymilvus",
230
232
  "transformers": "transformers",
233
+ "litellm": "litellm",
234
+ "crewai": "crewai",
231
235
  }
232
236
 
233
237
  invalid_instrumentors = [
@@ -305,6 +309,8 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
305
309
  "qdrant": QdrantInstrumentor(),
306
310
  "milvus": MilvusInstrumentor(),
307
311
  "transformers": TransformersInstrumentor(),
312
+ "litellm": LiteLLMInstrumentor(),
313
+ "crewai": CrewAIInstrumentor(),
308
314
  }
309
315
 
310
316
  # Initialize and instrument only the enabled instrumentors
@@ -350,7 +356,8 @@ def get_prompt(url=None, name=None, api_key=None, prompt_id=None,
350
356
  'version': version,
351
357
  'shouldCompile': should_compile,
352
358
  'variables': variables,
353
- 'metaProperties': meta_properties
359
+ 'metaProperties': meta_properties,
360
+ 'source': 'python-sdk'
354
361
  }
355
362
 
356
363
  # Remove None values from payload
@@ -372,7 +379,7 @@ def get_prompt(url=None, name=None, api_key=None, prompt_id=None,
372
379
  # Return the JSON response
373
380
  return response.json()
374
381
  except requests.RequestException as error:
375
- print(f"Error fetching prompt: {error}")
382
+ logger.error("Error fetching prompt: '%s'", error)
376
383
  return None
377
384
 
378
385
  def get_secrets(url=None, api_key=None, key=None, tags=None, should_set_env=None):
@@ -401,6 +408,7 @@ def get_secrets(url=None, api_key=None, key=None, tags=None, should_set_env=None
401
408
  payload = {
402
409
  'key': key,
403
410
  'tags': tags,
411
+ 'source': 'python-sdk'
404
412
  }
405
413
 
406
414
  # Remove None values from payload
@@ -429,7 +437,7 @@ def get_secrets(url=None, api_key=None, key=None, tags=None, should_set_env=None
429
437
  os.environ[token] = str(value)
430
438
  return vault_response
431
439
  except requests.RequestException as error:
432
- print(f"Error fetching secrets: {error}")
440
+ logger.error("Error fetching secrets: '%s'", error)
433
441
  return None
434
442
 
435
443
  def trace(wrapped):
openlit/evals/utils.py CHANGED
@@ -3,15 +3,18 @@
3
3
 
4
4
  import json
5
5
  import os
6
+ import logging
6
7
  from typing import Optional, Tuple, List
7
8
  from pydantic import BaseModel
8
-
9
9
  from opentelemetry.metrics import get_meter
10
10
  from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
11
11
  from anthropic import Anthropic
12
12
  from openai import OpenAI
13
13
  from openlit.semcov import SemanticConvetion
14
14
 
15
+ # Initialize logger for logging potential issues and operations
16
+ logger = logging.getLogger(__name__)
17
+
15
18
  class JsonOutput(BaseModel):
16
19
  """
17
20
  A model representing the structure of JSON output for prompt injection detection.
@@ -216,7 +219,7 @@ def parse_llm_response(response) -> JsonOutput:
216
219
 
217
220
  return JsonOutput(**data)
218
221
  except (json.JSONDecodeError, TypeError) as e:
219
- print(f"Error parsing LLM response: {e}")
222
+ logger.error("Error parsing LLM response: '%s'", e)
220
223
  return JsonOutput(score=0, classification="none", explanation="none",
221
224
  verdict="no", evaluation="none")
222
225
 
openlit/guard/utils.py CHANGED
@@ -4,6 +4,7 @@
4
4
  import re
5
5
  import json
6
6
  import os
7
+ import logging
7
8
  from typing import Optional, Tuple
8
9
  from pydantic import BaseModel
9
10
  from opentelemetry.metrics import get_meter
@@ -12,6 +13,9 @@ from anthropic import Anthropic
12
13
  from openai import OpenAI
13
14
  from openlit.semcov import SemanticConvetion
14
15
 
16
+ # Initialize logger for logging potential issues and operations
17
+ logger = logging.getLogger(__name__)
18
+
15
19
  class JsonOutput(BaseModel):
16
20
  """
17
21
  A model representing the structure of JSON output for prompt injection detection.
@@ -158,7 +162,7 @@ def parse_llm_response(response) -> JsonOutput:
158
162
 
159
163
  return JsonOutput(**data)
160
164
  except (json.JSONDecodeError, TypeError) as e:
161
- print(f"Error parsing LLM response: {e}")
165
+ logger.error("Error parsing LLM response: '%s'", e)
162
166
  return JsonOutput(score=0, classification="none", explanation="none",
163
167
  verdict="none", guard="none")
164
168
 
@@ -195,7 +195,6 @@ def async_complete(gen_ai_endpoint, version, environment, application_name,
195
195
  with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
196
196
  response = await wrapped(*args, **kwargs)
197
197
 
198
- # print(instance._system_instruction.__dict__["_pb"].parts[0].text)
199
198
  try:
200
199
  # Format 'messages' into a single string
201
200
  message_prompt = kwargs.get("messages", "")
@@ -195,7 +195,6 @@ def complete(gen_ai_endpoint, version, environment, application_name,
195
195
  with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
196
196
  response = wrapped(*args, **kwargs)
197
197
 
198
- # print(instance._system_instruction.__dict__["_pb"].parts[0].text)
199
198
  try:
200
199
  # Format 'messages' into a single string
201
200
  message_prompt = kwargs.get("messages", "")
@@ -0,0 +1,50 @@
1
+ # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
+ """Initializer of Auto Instrumentation of CrewAI Functions"""
3
+
4
+ from typing import Collection
5
+ import importlib.metadata
6
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
+ from wrapt import wrap_function_wrapper
8
+
9
+ from openlit.instrumentation.crewai.crewai import (
10
+ crew_wrap
11
+ )
12
+
13
+ _instruments = ("crewai >= 0.80.0",)
14
+
15
+ class CrewAIInstrumentor(BaseInstrumentor):
16
+ """
17
+ An instrumentor for CrewAI's client library.
18
+ """
19
+
20
+ def instrumentation_dependencies(self) -> Collection[str]:
21
+ return _instruments
22
+
23
+ def _instrument(self, **kwargs):
24
+ application_name = kwargs.get("application_name", "default_application")
25
+ environment = kwargs.get("environment", "default_environment")
26
+ tracer = kwargs.get("tracer")
27
+ metrics = kwargs.get("metrics_dict")
28
+ pricing_info = kwargs.get("pricing_info", {})
29
+ trace_content = kwargs.get("trace_content", False)
30
+ disable_metrics = kwargs.get("disable_metrics")
31
+ version = importlib.metadata.version("crewai")
32
+
33
+ wrap_function_wrapper(
34
+ "crewai.agent",
35
+ "Agent.execute_task",
36
+ crew_wrap("crewai.agent_execute_task", version, environment, application_name,
37
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
38
+ )
39
+
40
+ wrap_function_wrapper(
41
+ "crewai.task",
42
+ "Task._execute_core",
43
+ crew_wrap("crewai.task_execute_core", version, environment, application_name,
44
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
45
+ )
46
+
47
+
48
+ def _uninstrument(self, **kwargs):
49
+ # Proper uninstrumentation logic to revert patched methods
50
+ pass
@@ -0,0 +1,149 @@
1
+ # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, too-many-branches
2
+ """
3
+ Module for monitoring LiteLLM calls.
4
+ """
5
+
6
+ import logging
7
+ import json
8
+ from opentelemetry.trace import SpanKind, Status, StatusCode
9
+ from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
10
+ from openlit.__helpers import (
11
+ handle_exception,
12
+ )
13
+ from openlit.semcov import SemanticConvetion
14
+
15
+ # Initialize logger for logging potential issues and operations
16
+ logger = logging.getLogger(__name__)
17
+
18
+ def _parse_tools(tools):
19
+ result = []
20
+ for tool in tools:
21
+ res = {}
22
+ if hasattr(tool, "name") and tool.name is not None:
23
+ res["name"] = tool.name
24
+ if hasattr(tool, "description") and tool.description is not None:
25
+ res["description"] = tool.description
26
+ if res:
27
+ result.append(res)
28
+ return json.dumps(result)
29
+
30
+ def crew_wrap(gen_ai_endpoint, version, environment, application_name,
31
+ tracer, pricing_info, trace_content, metrics, disable_metrics):
32
+ """
33
+ Generates a telemetry wrapper for chat completions to collect metrics.
34
+
35
+ Args:
36
+ gen_ai_endpoint: Endpoint identifier for logging and tracing.
37
+ version: Version of the monitoring package.
38
+ environment: Deployment environment (e.g., production, staging).
39
+ application_name: Name of the application using the CrewAI Agent.
40
+ tracer: OpenTelemetry tracer for creating spans.
41
+ pricing_info: Information used for calculating the cost of CrewAI usage.
42
+ trace_content: Flag indicating whether to trace the actual content.
43
+
44
+ Returns:
45
+ A function that wraps the chat completions method to add telemetry.
46
+ """
47
+
48
+ def wrapper(wrapped, instance, args, kwargs):
49
+ """
50
+ Wraps the 'chat.completions' API call to add telemetry.
51
+
52
+ This collects metrics such as execution time, cost, and token usage, and handles errors
53
+ gracefully, adding details to the trace for observability.
54
+
55
+ Args:
56
+ wrapped: The original 'chat.completions' method to be wrapped.
57
+ instance: The instance of the class where the original method is defined.
58
+ args: Positional arguments for the 'chat.completions' method.
59
+ kwargs: Keyword arguments for the 'chat.completions' method.
60
+
61
+ Returns:
62
+ The response from the original 'chat.completions' method.
63
+ """
64
+
65
+ # pylint: disable=line-too-long
66
+ with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
67
+ response = wrapped(*args, **kwargs)
68
+
69
+ try:
70
+ # Set base span attribues
71
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
72
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
73
+ SemanticConvetion.GEN_AI_SYSTEM_CREWAI)
74
+ span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
75
+ SemanticConvetion.GEN_AI_TYPE_AGENT)
76
+ span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
77
+ gen_ai_endpoint)
78
+
79
+ instance_class = instance.__class__.__name__
80
+
81
+ if instance_class == "Task":
82
+ task = {}
83
+ for key, value in instance.__dict__.items():
84
+ if value is None:
85
+ continue
86
+ if key == "tools":
87
+ value = _parse_tools(value)
88
+ task[key] = value
89
+ elif key == "agent":
90
+ task[key] = value.role
91
+ else:
92
+ task[key] = str(value)
93
+
94
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TASK_ID,
95
+ task.get('id', ''))
96
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TASK,
97
+ task.get('description', ''))
98
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_EXPECTED_OUTPUT,
99
+ task.get('expected_output', ''))
100
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ACTUAL_OUTPUT,
101
+ task.get('output', ''))
102
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_HUMAN_INPUT,
103
+ task.get('human_input', ''))
104
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TASK_ASSOCIATION,
105
+ str(task.get('processed_by_agents', '')))
106
+
107
+ elif instance_class == "Agent":
108
+ agent = {}
109
+ for key, value in instance.__dict__.items():
110
+ if key == "tools":
111
+ value = _parse_tools(value)
112
+ if value is None:
113
+ continue
114
+ agent[key] = str(value)
115
+
116
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ID,
117
+ agent.get('id', ''))
118
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ROLE,
119
+ agent.get('role', ''))
120
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_GOAL,
121
+ agent.get('goal', ''))
122
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_CONTEXT,
123
+ agent.get('backstory', ''))
124
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ENABLE_CACHE,
125
+ agent.get('cache', ''))
126
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ALLOW_DELEGATION,
127
+ agent.get('allow_delegation', ''))
128
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ALLOW_CODE_EXECUTION,
129
+ agent.get('allow_code_execution', ''))
130
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_MAX_RETRY_LIMIT,
131
+ agent.get('max_retry_limit', ''))
132
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TOOLS,
133
+ str(agent.get('tools', '')))
134
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TOOL_RESULTS,
135
+ str(agent.get('tools_results', '')))
136
+
137
+ span.set_status(Status(StatusCode.OK))
138
+
139
+ # Return original response
140
+ return response
141
+
142
+ except Exception as e:
143
+ handle_exception(span, e)
144
+ logger.error("Error in trace creation: %s", e)
145
+
146
+ # Return original response
147
+ return response
148
+
149
+ return wrapper
@@ -164,7 +164,6 @@ def generate(gen_ai_endpoint, version, environment, application_name,
164
164
  with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
165
165
  response = wrapped(*args, **kwargs)
166
166
 
167
- # print(instance._system_instruction.__dict__["_pb"].parts[0].text)
168
167
  try:
169
168
  prompt = ""
170
169
  for arg in args:
@@ -0,0 +1,54 @@
1
+ # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
+ """Initializer of Auto Instrumentation of LiteLLM Functions"""
3
+
4
+ from typing import Collection
5
+ import importlib.metadata
6
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
+ from wrapt import wrap_function_wrapper
8
+
9
+ from openlit.instrumentation.litellm.litellm import (
10
+ completion
11
+ )
12
+ from openlit.instrumentation.litellm.async_litellm import (
13
+ acompletion
14
+ )
15
+
16
+ _instruments = ("litellm >= 1.52.6",)
17
+
18
+ class LiteLLMInstrumentor(BaseInstrumentor):
19
+ """
20
+ An instrumentor for LiteLLM's client library.
21
+ """
22
+
23
+ def instrumentation_dependencies(self) -> Collection[str]:
24
+ return _instruments
25
+
26
+ def _instrument(self, **kwargs):
27
+ application_name = kwargs.get("application_name", "default_application")
28
+ environment = kwargs.get("environment", "default_environment")
29
+ tracer = kwargs.get("tracer")
30
+ metrics = kwargs.get("metrics_dict")
31
+ pricing_info = kwargs.get("pricing_info", {})
32
+ trace_content = kwargs.get("trace_content", False)
33
+ disable_metrics = kwargs.get("disable_metrics")
34
+ version = importlib.metadata.version("litellm")
35
+
36
+ # completion
37
+ wrap_function_wrapper(
38
+ "litellm",
39
+ "completion",
40
+ completion("litellm.completion", version, environment, application_name,
41
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
42
+ )
43
+
44
+ wrap_function_wrapper(
45
+ "litellm",
46
+ "acompletion",
47
+ acompletion("litellm.completion", version, environment, application_name,
48
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
49
+ )
50
+
51
+
52
+ def _uninstrument(self, **kwargs):
53
+ # Proper uninstrumentation logic to revert patched methods
54
+ pass