openlit 1.32.11__py3-none-any.whl → 1.33.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
openlit/__init__.py CHANGED
@@ -52,6 +52,7 @@ from openlit.instrumentation.milvus import MilvusInstrumentor
52
52
  from openlit.instrumentation.astra import AstraInstrumentor
53
53
  from openlit.instrumentation.transformers import TransformersInstrumentor
54
54
  from openlit.instrumentation.litellm import LiteLLMInstrumentor
55
+ from openlit.instrumentation.together import TogetherInstrumentor
55
56
  from openlit.instrumentation.crewai import CrewAIInstrumentor
56
57
  from openlit.instrumentation.ag2 import AG2Instrumentor
57
58
  from openlit.instrumentation.multion import MultiOnInstrumentor
@@ -62,6 +63,7 @@ from openlit.instrumentation.ai21 import AI21Instrumentor
62
63
  from openlit.instrumentation.controlflow import ControlFlowInstrumentor
63
64
  from openlit.instrumentation.crawl4ai import Crawl4AIInstrumentor
64
65
  from openlit.instrumentation.firecrawl import FireCrawlInstrumentor
66
+ from openlit.instrumentation.letta import LettaInstrumentor
65
67
  from openlit.instrumentation.gpu import GPUInstrumentor
66
68
  import openlit.guard
67
69
  import openlit.evals
@@ -187,7 +189,6 @@ def instrument_if_available(
187
189
  metrics_dict=config.metrics_dict,
188
190
  disable_metrics=config.disable_metrics,
189
191
  )
190
- logger.info("Instrumented %s", instrumentor_name)
191
192
  else:
192
193
  # pylint: disable=line-too-long
193
194
  logger.info("Library for %s (%s) not found. Skipping instrumentation", instrumentor_name, module_name)
@@ -262,6 +263,8 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
262
263
  "assemblyai": "assemblyai",
263
264
  "crawl4ai": "crawl4ai",
264
265
  "firecrawl": "firecrawl",
266
+ "letta": "letta",
267
+ "together": "together",
265
268
  }
266
269
 
267
270
  invalid_instrumentors = [
@@ -357,6 +360,8 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
357
360
  "assemblyai": AssemblyAIInstrumentor(),
358
361
  "crawl4ai": Crawl4AIInstrumentor(),
359
362
  "firecrawl": FireCrawlInstrumentor(),
363
+ "letta": LettaInstrumentor(),
364
+ "together": TogetherInstrumentor(),
360
365
  }
361
366
 
362
367
  # Initialize and instrument only the enabled instrumentors
@@ -0,0 +1,77 @@
1
+ # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
+ """Initializer of Auto Instrumentation of Letta Functions"""
3
+
4
+ from typing import Collection
5
+ import importlib.metadata
6
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
+ from wrapt import wrap_function_wrapper
8
+
9
+ from openlit.instrumentation.letta.letta import (
10
+ create_agent, send_message
11
+ )
12
+
13
+ _instruments = ("letta >= 0.6.2",)
14
+
15
+ class LettaInstrumentor(BaseInstrumentor):
16
+ """
17
+ An instrumentor for Letta's client library.
18
+ """
19
+
20
+ def instrumentation_dependencies(self) -> Collection[str]:
21
+ return _instruments
22
+
23
+ def _instrument(self, **kwargs):
24
+ application_name = kwargs.get("application_name", "default_application")
25
+ environment = kwargs.get("environment", "default_environment")
26
+ tracer = kwargs.get("tracer")
27
+ metrics = kwargs.get("metrics_dict")
28
+ pricing_info = kwargs.get("pricing_info", {})
29
+ trace_content = kwargs.get("trace_content", False)
30
+ disable_metrics = kwargs.get("disable_metrics")
31
+ version = importlib.metadata.version("letta")
32
+
33
+ wrap_function_wrapper(
34
+ "letta.client.client",
35
+ "LocalClient.create_agent",
36
+ create_agent("letta.create_agent", version, environment, application_name,
37
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
38
+ )
39
+
40
+ wrap_function_wrapper(
41
+ "letta.client.client",
42
+ "LocalClient.get_agent",
43
+ create_agent("letta.get_agent", version, environment, application_name,
44
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
45
+ )
46
+
47
+ wrap_function_wrapper(
48
+ "letta.client.client",
49
+ "LocalClient.send_message",
50
+ send_message("letta.send_message", version, environment, application_name,
51
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
52
+ )
53
+
54
+ wrap_function_wrapper(
55
+ "letta.client.client",
56
+ "RESTClient.create_agent",
57
+ create_agent("letta.create_agent", version, environment, application_name,
58
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
59
+ )
60
+
61
+ wrap_function_wrapper(
62
+ "letta.client.client",
63
+ "RESTClient.get_agent",
64
+ create_agent("letta.get_agent", version, environment, application_name,
65
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
66
+ )
67
+
68
+ wrap_function_wrapper(
69
+ "letta.client.client",
70
+ "RESTClient.send_message",
71
+ send_message("letta.send_message", version, environment, application_name,
72
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
73
+ )
74
+
75
+ def _uninstrument(self, **kwargs):
76
+ # Proper uninstrumentation logic to revert patched methods
77
+ pass
@@ -0,0 +1,186 @@
1
+ # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, too-many-branches
2
+ """
3
+ Module for monitoring Letta calls.
4
+ """
5
+
6
+ import logging
7
+ from opentelemetry.trace import SpanKind, Status, StatusCode
8
+ from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
+ from openlit.__helpers import (
10
+ handle_exception, get_chat_model_cost
11
+ )
12
+ from openlit.semcov import SemanticConvetion
13
+
14
+ # Initialize logger for logging potential issues and operations
15
+ logger = logging.getLogger(__name__)
16
+
17
+ def create_agent(gen_ai_endpoint, version, environment, application_name,
18
+ tracer, pricing_info, trace_content, metrics, disable_metrics):
19
+ """
20
+ Generates a telemetry wrapper for chat completions to collect metrics.
21
+
22
+ Args:
23
+ gen_ai_endpoint: Endpoint identifier for logging and tracing.
24
+ version: Version of the monitoring package.
25
+ environment: Deployment environment (e.g., production, staging).
26
+ application_name: Name of the application using the Letta Agent.
27
+ tracer: OpenTelemetry tracer for creating spans.
28
+ pricing_info: Information used for calculating the cost of Letta usage.
29
+ trace_content: Flag indicating whether to trace the actual content.
30
+
31
+ Returns:
32
+ A function that wraps the chat completions method to add telemetry.
33
+ """
34
+
35
+ def wrapper(wrapped, instance, args, kwargs):
36
+ """
37
+ Wraps the API call to add telemetry.
38
+
39
+ This collects metrics such as execution time, cost, and token usage, and handles errors
40
+ gracefully, adding details to the trace for observability.
41
+
42
+ Args:
43
+ wrapped: The original method to be wrapped.
44
+ instance: The instance of the class where the original method is defined.
45
+ args: Positional arguments for the method.
46
+ kwargs: Keyword arguments for the method.
47
+
48
+ Returns:
49
+ The response from the original method.
50
+ """
51
+
52
+ # pylint: disable=line-too-long
53
+ with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
54
+ response = wrapped(*args, **kwargs)
55
+
56
+ try:
57
+ # Set base span attribues
58
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
59
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
60
+ SemanticConvetion.GEN_AI_SYSTEM_LETTA)
61
+ span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
62
+ SemanticConvetion.GEN_AI_TYPE_AGENT)
63
+ span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
+ gen_ai_endpoint)
65
+ span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
66
+ application_name)
67
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ID,
68
+ response.id)
69
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ROLE,
70
+ response.name)
71
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_INSTRUCTIONS,
72
+ response.system)
73
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
74
+ response.llm_config.model)
75
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TYPE,
76
+ response.agent_type)
77
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TOOLS,
78
+ response.tool_names)
79
+
80
+ span.set_status(Status(StatusCode.OK))
81
+
82
+ # Return original response
83
+ return response
84
+
85
+ except Exception as e:
86
+ handle_exception(span, e)
87
+ logger.error("Error in trace creation: %s", e)
88
+
89
+ # Return original response
90
+ return response
91
+
92
+ return wrapper
93
+
94
+ def send_message(gen_ai_endpoint, version, environment, application_name,
95
+ tracer, pricing_info, trace_content, metrics, disable_metrics):
96
+ """
97
+ Generates a telemetry wrapper for chat completions to collect metrics.
98
+
99
+ Args:
100
+ gen_ai_endpoint: Endpoint identifier for logging and tracing.
101
+ version: Version of the monitoring package.
102
+ environment: Deployment environment (e.g., production, staging).
103
+ application_name: Name of the application using the Letta Agent.
104
+ tracer: OpenTelemetry tracer for creating spans.
105
+ pricing_info: Information used for calculating the cost of Letta usage.
106
+ trace_content: Flag indicating whether to trace the actual content.
107
+
108
+ Returns:
109
+ A function that wraps the chat completions method to add telemetry.
110
+ """
111
+
112
+ def wrapper(wrapped, instance, args, kwargs):
113
+ """
114
+ Wraps the API call to add telemetry.
115
+
116
+ This collects metrics such as execution time, cost, and token usage, and handles errors
117
+ gracefully, adding details to the trace for observability.
118
+
119
+ Args:
120
+ wrapped: The original method to be wrapped.
121
+ instance: The instance of the class where the original method is defined.
122
+ args: Positional arguments for the method.
123
+ kwargs: Keyword arguments for the method.
124
+
125
+ Returns:
126
+ The response from the original method.
127
+ """
128
+
129
+ # pylint: disable=line-too-long
130
+ with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
131
+ response = wrapped(*args, **kwargs)
132
+
133
+ try:
134
+ # Calculate cost of the operation
135
+ cost = get_chat_model_cost(kwargs.get("model", "gpt-4o"),
136
+ pricing_info, response.usage.prompt_tokens,
137
+ response.usage.completion_tokens)
138
+ # Set base span attribues
139
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
140
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
141
+ SemanticConvetion.GEN_AI_SYSTEM_LETTA)
142
+ span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
143
+ SemanticConvetion.GEN_AI_TYPE_AGENT)
144
+ span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
145
+ gen_ai_endpoint)
146
+ span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
147
+ application_name)
148
+ span.set_attribute(SemanticConvetion.GEN_AI_AGENT_STEP_COUNT,
149
+ response.usage.step_count)
150
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
151
+ response.usage.prompt_tokens)
152
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
153
+ response.usage.completion_tokens)
154
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
155
+ response.usage.total_tokens)
156
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
157
+ cost)
158
+
159
+ if trace_content:
160
+ span.add_event(
161
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
162
+ attributes={
163
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("message", ""),
164
+ },
165
+ )
166
+ span.add_event(
167
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
168
+ # pylint: disable=line-too-long
169
+ attributes={
170
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: str(response.messages),
171
+ },
172
+ )
173
+
174
+ span.set_status(Status(StatusCode.OK))
175
+
176
+ # Return original response
177
+ return response
178
+
179
+ except Exception as e:
180
+ handle_exception(span, e)
181
+ logger.error("Error in trace creation: %s", e)
182
+
183
+ # Return original response
184
+ return response
185
+
186
+ return wrapper
@@ -0,0 +1,70 @@
1
+ # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
+ """Initializer of Auto Instrumentation of Together AI Functions"""
3
+
4
+ from typing import Collection
5
+ import importlib.metadata
6
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
+ from wrapt import wrap_function_wrapper
8
+
9
+ from openlit.instrumentation.together.together import (
10
+ completion, image_generate
11
+ )
12
+ from openlit.instrumentation.together.async_together import (
13
+ async_completion, async_image_generate
14
+ )
15
+
16
+ _instruments = ("together >= 1.3.5",)
17
+
18
+ class TogetherInstrumentor(BaseInstrumentor):
19
+ """
20
+ An instrumentor for Together's client library.
21
+ """
22
+
23
+ def instrumentation_dependencies(self) -> Collection[str]:
24
+ return _instruments
25
+
26
+ def _instrument(self, **kwargs):
27
+ application_name = kwargs.get("application_name", "default_application")
28
+ environment = kwargs.get("environment", "default_environment")
29
+ tracer = kwargs.get("tracer")
30
+ metrics = kwargs.get("metrics_dict")
31
+ pricing_info = kwargs.get("pricing_info", {})
32
+ trace_content = kwargs.get("trace_content", False)
33
+ disable_metrics = kwargs.get("disable_metrics")
34
+ version = importlib.metadata.version("together")
35
+
36
+ # Chat completions
37
+ wrap_function_wrapper(
38
+ "together.resources.chat.completions",
39
+ "ChatCompletions.create",
40
+ completion("together.chat.completions", version, environment, application_name,
41
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
42
+ )
43
+
44
+ # Image generate
45
+ wrap_function_wrapper(
46
+ "together.resources.images",
47
+ "Images.generate",
48
+ image_generate("together.image.generate", version, environment, application_name,
49
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
50
+ )
51
+
52
+ # Chat completions
53
+ wrap_function_wrapper(
54
+ "together.resources.chat.completions",
55
+ "AsyncChatCompletions.create",
56
+ async_completion("together.chat.completions", version, environment, application_name,
57
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
58
+ )
59
+
60
+ # Image generate
61
+ wrap_function_wrapper(
62
+ "together.resources.images",
63
+ "AsyncImages.generate",
64
+ async_image_generate("together.image.generate", version, environment, application_name,
65
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
66
+ )
67
+
68
+ def _uninstrument(self, **kwargs):
69
+ # Proper uninstrumentation logic to revert patched methods
70
+ pass