openlit 1.32.11__py3-none-any.whl → 1.32.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__init__.py +3 -0
- openlit/instrumentation/letta/__init__.py +77 -0
- openlit/instrumentation/letta/letta.py +186 -0
- openlit/semcov/__init__.py +1 -0
- {openlit-1.32.11.dist-info → openlit-1.32.12.dist-info}/METADATA +2 -2
- {openlit-1.32.11.dist-info → openlit-1.32.12.dist-info}/RECORD +8 -6
- {openlit-1.32.11.dist-info → openlit-1.32.12.dist-info}/LICENSE +0 -0
- {openlit-1.32.11.dist-info → openlit-1.32.12.dist-info}/WHEEL +0 -0
openlit/__init__.py
CHANGED
@@ -62,6 +62,7 @@ from openlit.instrumentation.ai21 import AI21Instrumentor
|
|
62
62
|
from openlit.instrumentation.controlflow import ControlFlowInstrumentor
|
63
63
|
from openlit.instrumentation.crawl4ai import Crawl4AIInstrumentor
|
64
64
|
from openlit.instrumentation.firecrawl import FireCrawlInstrumentor
|
65
|
+
from openlit.instrumentation.letta import LettaInstrumentor
|
65
66
|
from openlit.instrumentation.gpu import GPUInstrumentor
|
66
67
|
import openlit.guard
|
67
68
|
import openlit.evals
|
@@ -262,6 +263,7 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
|
|
262
263
|
"assemblyai": "assemblyai",
|
263
264
|
"crawl4ai": "crawl4ai",
|
264
265
|
"firecrawl": "firecrawl",
|
266
|
+
"letta": "letta",
|
265
267
|
}
|
266
268
|
|
267
269
|
invalid_instrumentors = [
|
@@ -357,6 +359,7 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
|
|
357
359
|
"assemblyai": AssemblyAIInstrumentor(),
|
358
360
|
"crawl4ai": Crawl4AIInstrumentor(),
|
359
361
|
"firecrawl": FireCrawlInstrumentor(),
|
362
|
+
"letta": LettaInstrumentor(),
|
360
363
|
}
|
361
364
|
|
362
365
|
# Initialize and instrument only the enabled instrumentors
|
@@ -0,0 +1,77 @@
|
|
1
|
+
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
|
+
"""Initializer of Auto Instrumentation of Letta Functions"""
|
3
|
+
|
4
|
+
from typing import Collection
|
5
|
+
import importlib.metadata
|
6
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
7
|
+
from wrapt import wrap_function_wrapper
|
8
|
+
|
9
|
+
from openlit.instrumentation.letta.letta import (
|
10
|
+
create_agent, send_message
|
11
|
+
)
|
12
|
+
|
13
|
+
_instruments = ("letta >= 0.6.2",)
|
14
|
+
|
15
|
+
class LettaInstrumentor(BaseInstrumentor):
|
16
|
+
"""
|
17
|
+
An instrumentor for Letta's client library.
|
18
|
+
"""
|
19
|
+
|
20
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
21
|
+
return _instruments
|
22
|
+
|
23
|
+
def _instrument(self, **kwargs):
|
24
|
+
application_name = kwargs.get("application_name", "default_application")
|
25
|
+
environment = kwargs.get("environment", "default_environment")
|
26
|
+
tracer = kwargs.get("tracer")
|
27
|
+
metrics = kwargs.get("metrics_dict")
|
28
|
+
pricing_info = kwargs.get("pricing_info", {})
|
29
|
+
trace_content = kwargs.get("trace_content", False)
|
30
|
+
disable_metrics = kwargs.get("disable_metrics")
|
31
|
+
version = importlib.metadata.version("letta")
|
32
|
+
|
33
|
+
wrap_function_wrapper(
|
34
|
+
"letta.client.client",
|
35
|
+
"LocalClient.create_agent",
|
36
|
+
create_agent("letta.create_agent", version, environment, application_name,
|
37
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
38
|
+
)
|
39
|
+
|
40
|
+
wrap_function_wrapper(
|
41
|
+
"letta.client.client",
|
42
|
+
"LocalClient.get_agent",
|
43
|
+
create_agent("letta.get_agent", version, environment, application_name,
|
44
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
45
|
+
)
|
46
|
+
|
47
|
+
wrap_function_wrapper(
|
48
|
+
"letta.client.client",
|
49
|
+
"LocalClient.send_message",
|
50
|
+
send_message("letta.send_message", version, environment, application_name,
|
51
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
52
|
+
)
|
53
|
+
|
54
|
+
wrap_function_wrapper(
|
55
|
+
"letta.client.client",
|
56
|
+
"RESTClient.create_agent",
|
57
|
+
create_agent("letta.create_agent", version, environment, application_name,
|
58
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
59
|
+
)
|
60
|
+
|
61
|
+
wrap_function_wrapper(
|
62
|
+
"letta.client.client",
|
63
|
+
"RESTClient.get_agent",
|
64
|
+
create_agent("letta.get_agent", version, environment, application_name,
|
65
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
66
|
+
)
|
67
|
+
|
68
|
+
wrap_function_wrapper(
|
69
|
+
"letta.client.client",
|
70
|
+
"RESTClient.send_message",
|
71
|
+
send_message("letta.send_message", version, environment, application_name,
|
72
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
73
|
+
)
|
74
|
+
|
75
|
+
def _uninstrument(self, **kwargs):
|
76
|
+
# Proper uninstrumentation logic to revert patched methods
|
77
|
+
pass
|
@@ -0,0 +1,186 @@
|
|
1
|
+
# pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, too-many-branches
|
2
|
+
"""
|
3
|
+
Module for monitoring Letta calls.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import logging
|
7
|
+
from opentelemetry.trace import SpanKind, Status, StatusCode
|
8
|
+
from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
|
9
|
+
from openlit.__helpers import (
|
10
|
+
handle_exception, get_chat_model_cost
|
11
|
+
)
|
12
|
+
from openlit.semcov import SemanticConvetion
|
13
|
+
|
14
|
+
# Initialize logger for logging potential issues and operations
|
15
|
+
logger = logging.getLogger(__name__)
|
16
|
+
|
17
|
+
def create_agent(gen_ai_endpoint, version, environment, application_name,
|
18
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics):
|
19
|
+
"""
|
20
|
+
Generates a telemetry wrapper for chat completions to collect metrics.
|
21
|
+
|
22
|
+
Args:
|
23
|
+
gen_ai_endpoint: Endpoint identifier for logging and tracing.
|
24
|
+
version: Version of the monitoring package.
|
25
|
+
environment: Deployment environment (e.g., production, staging).
|
26
|
+
application_name: Name of the application using the Letta Agent.
|
27
|
+
tracer: OpenTelemetry tracer for creating spans.
|
28
|
+
pricing_info: Information used for calculating the cost of Letta usage.
|
29
|
+
trace_content: Flag indicating whether to trace the actual content.
|
30
|
+
|
31
|
+
Returns:
|
32
|
+
A function that wraps the chat completions method to add telemetry.
|
33
|
+
"""
|
34
|
+
|
35
|
+
def wrapper(wrapped, instance, args, kwargs):
|
36
|
+
"""
|
37
|
+
Wraps the API call to add telemetry.
|
38
|
+
|
39
|
+
This collects metrics such as execution time, cost, and token usage, and handles errors
|
40
|
+
gracefully, adding details to the trace for observability.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
wrapped: The original method to be wrapped.
|
44
|
+
instance: The instance of the class where the original method is defined.
|
45
|
+
args: Positional arguments for the method.
|
46
|
+
kwargs: Keyword arguments for the method.
|
47
|
+
|
48
|
+
Returns:
|
49
|
+
The response from the original method.
|
50
|
+
"""
|
51
|
+
|
52
|
+
# pylint: disable=line-too-long
|
53
|
+
with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
|
54
|
+
response = wrapped(*args, **kwargs)
|
55
|
+
|
56
|
+
try:
|
57
|
+
# Set base span attribues
|
58
|
+
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
59
|
+
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
60
|
+
SemanticConvetion.GEN_AI_SYSTEM_LETTA)
|
61
|
+
span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
62
|
+
SemanticConvetion.GEN_AI_TYPE_AGENT)
|
63
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
64
|
+
gen_ai_endpoint)
|
65
|
+
span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
|
66
|
+
application_name)
|
67
|
+
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ID,
|
68
|
+
response.id)
|
69
|
+
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ROLE,
|
70
|
+
response.name)
|
71
|
+
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_INSTRUCTIONS,
|
72
|
+
response.system)
|
73
|
+
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
74
|
+
response.llm_config.model)
|
75
|
+
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TYPE,
|
76
|
+
response.agent_type)
|
77
|
+
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TOOLS,
|
78
|
+
response.tool_names)
|
79
|
+
|
80
|
+
span.set_status(Status(StatusCode.OK))
|
81
|
+
|
82
|
+
# Return original response
|
83
|
+
return response
|
84
|
+
|
85
|
+
except Exception as e:
|
86
|
+
handle_exception(span, e)
|
87
|
+
logger.error("Error in trace creation: %s", e)
|
88
|
+
|
89
|
+
# Return original response
|
90
|
+
return response
|
91
|
+
|
92
|
+
return wrapper
|
93
|
+
|
94
|
+
def send_message(gen_ai_endpoint, version, environment, application_name,
|
95
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics):
|
96
|
+
"""
|
97
|
+
Generates a telemetry wrapper for chat completions to collect metrics.
|
98
|
+
|
99
|
+
Args:
|
100
|
+
gen_ai_endpoint: Endpoint identifier for logging and tracing.
|
101
|
+
version: Version of the monitoring package.
|
102
|
+
environment: Deployment environment (e.g., production, staging).
|
103
|
+
application_name: Name of the application using the Letta Agent.
|
104
|
+
tracer: OpenTelemetry tracer for creating spans.
|
105
|
+
pricing_info: Information used for calculating the cost of Letta usage.
|
106
|
+
trace_content: Flag indicating whether to trace the actual content.
|
107
|
+
|
108
|
+
Returns:
|
109
|
+
A function that wraps the chat completions method to add telemetry.
|
110
|
+
"""
|
111
|
+
|
112
|
+
def wrapper(wrapped, instance, args, kwargs):
|
113
|
+
"""
|
114
|
+
Wraps the API call to add telemetry.
|
115
|
+
|
116
|
+
This collects metrics such as execution time, cost, and token usage, and handles errors
|
117
|
+
gracefully, adding details to the trace for observability.
|
118
|
+
|
119
|
+
Args:
|
120
|
+
wrapped: The original method to be wrapped.
|
121
|
+
instance: The instance of the class where the original method is defined.
|
122
|
+
args: Positional arguments for the method.
|
123
|
+
kwargs: Keyword arguments for the method.
|
124
|
+
|
125
|
+
Returns:
|
126
|
+
The response from the original method.
|
127
|
+
"""
|
128
|
+
|
129
|
+
# pylint: disable=line-too-long
|
130
|
+
with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
|
131
|
+
response = wrapped(*args, **kwargs)
|
132
|
+
|
133
|
+
try:
|
134
|
+
# Calculate cost of the operation
|
135
|
+
cost = get_chat_model_cost(kwargs.get("model", "gpt-4o"),
|
136
|
+
pricing_info, response.usage.prompt_tokens,
|
137
|
+
response.usage.completion_tokens)
|
138
|
+
# Set base span attribues
|
139
|
+
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
140
|
+
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
141
|
+
SemanticConvetion.GEN_AI_SYSTEM_LETTA)
|
142
|
+
span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
|
143
|
+
SemanticConvetion.GEN_AI_TYPE_AGENT)
|
144
|
+
span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
|
145
|
+
gen_ai_endpoint)
|
146
|
+
span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
|
147
|
+
application_name)
|
148
|
+
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_STEP_COUNT,
|
149
|
+
response.usage.step_count)
|
150
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
151
|
+
response.usage.prompt_tokens)
|
152
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
|
153
|
+
response.usage.completion_tokens)
|
154
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
155
|
+
response.usage.total_tokens)
|
156
|
+
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
157
|
+
cost)
|
158
|
+
|
159
|
+
if trace_content:
|
160
|
+
span.add_event(
|
161
|
+
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
162
|
+
attributes={
|
163
|
+
SemanticConvetion.GEN_AI_CONTENT_PROMPT: kwargs.get("message", ""),
|
164
|
+
},
|
165
|
+
)
|
166
|
+
span.add_event(
|
167
|
+
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
168
|
+
# pylint: disable=line-too-long
|
169
|
+
attributes={
|
170
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: str(response.messages),
|
171
|
+
},
|
172
|
+
)
|
173
|
+
|
174
|
+
span.set_status(Status(StatusCode.OK))
|
175
|
+
|
176
|
+
# Return original response
|
177
|
+
return response
|
178
|
+
|
179
|
+
except Exception as e:
|
180
|
+
handle_exception(span, e)
|
181
|
+
logger.error("Error in trace creation: %s", e)
|
182
|
+
|
183
|
+
# Return original response
|
184
|
+
return response
|
185
|
+
|
186
|
+
return wrapper
|
openlit/semcov/__init__.py
CHANGED
@@ -133,6 +133,7 @@ class SemanticConvetion:
|
|
133
133
|
GEN_AI_SYSTEM_ASSEMBLYAI = "assemblyai"
|
134
134
|
GEN_AI_SYSTEM_CRAWL4AI = "crawl4ai"
|
135
135
|
GEN_AI_SYSTEM_FIRECRAWL = "firecrawl"
|
136
|
+
GEN_AI_SYSTEM_LETTA = "letta"
|
136
137
|
|
137
138
|
# Vector DB
|
138
139
|
DB_OPERATION_API_ENDPOINT = "db.operation.api_endpoint"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.32.
|
3
|
+
Version: 1.32.12
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -82,7 +82,7 @@ This project proudly follows and maintains the [Semantic Conventions](https://gi
|
|
82
82
|
| [✅ ElevenLabs](https://docs.openlit.io/latest/integrations/elevenlabs) | | [✅ ControlFlow](https://docs.openlit.io/latest/integrations/controlflow) | |
|
83
83
|
| [✅ vLLM](https://docs.openlit.io/latest/integrations/vllm) | | [✅ Crawl4AI](https://docs.openlit.io/latest/integrations/crawl4ai) | |
|
84
84
|
| [✅ OLA Krutrim](https://docs.openlit.io/latest/integrations/krutrim) | | [✅ FireCrawl](https://docs.openlit.io/latest/integrations/firecrawl) | |
|
85
|
-
| [✅ Google AI Studio](https://docs.openlit.io/latest/integrations/google-ai-studio) | |
|
85
|
+
| [✅ Google AI Studio](https://docs.openlit.io/latest/integrations/google-ai-studio) | | [✅ Letta](https://docs.openlit.io/latest/integrations/letta) | |
|
86
86
|
| [✅ NVIDIA NIM](https://docs.openlit.io/latest/integrations/nvidia-nim) | | | |
|
87
87
|
| [✅ Titan ML](https://docs.openlit.io/latest/integrations/titan-ml) | | | |
|
88
88
|
| [✅ Reka AI](https://docs.openlit.io/latest/integrations/reka) | | | |
|
@@ -1,5 +1,5 @@
|
|
1
1
|
openlit/__helpers.py,sha256=bqMxdNndLW5NGO2wwpAoHEOnAFr_mhnmVLua3ifpSEc,6427
|
2
|
-
openlit/__init__.py,sha256=
|
2
|
+
openlit/__init__.py,sha256=VBmUj1L-HKJnSIV6Ga1YHPgxdLZbNPj6T6DDIkdvzOM,21870
|
3
3
|
openlit/evals/__init__.py,sha256=nJe99nuLo1b5rf7pt9U9BCdSDedzbVi2Fj96cgl7msM,380
|
4
4
|
openlit/evals/all.py,sha256=oWrue3PotE-rB5WePG3MRYSA-ro6WivkclSHjYlAqGs,7154
|
5
5
|
openlit/evals/bias_detection.py,sha256=mCdsfK7x1vX7S3psC3g641IMlZ-7df3h-V6eiICj5N8,8154
|
@@ -66,6 +66,8 @@ openlit/instrumentation/julep/async_julep.py,sha256=OO8lIm9uUV1lhPo_klKBVyaDwgHh
|
|
66
66
|
openlit/instrumentation/julep/julep.py,sha256=lDUmkEP4hXk7vgUUbNRD-mnfdfrZifXSFVVILs8Ttkw,5276
|
67
67
|
openlit/instrumentation/langchain/__init__.py,sha256=gVtPZJifx-H8rqdZlU3GXdy3NtRF8yVb7PW7gE-ddJk,3592
|
68
68
|
openlit/instrumentation/langchain/langchain.py,sha256=XzZ3AH5Ep-UwMlIWVohXaGcZmuDYfUDcc4yeL4HTXvk,37860
|
69
|
+
openlit/instrumentation/letta/__init__.py,sha256=sjjOuMvZ1EPGEluNW0WTuSXYPhrb453cBIizt88Ta3g,2951
|
70
|
+
openlit/instrumentation/letta/letta.py,sha256=V_RLcGPy3Y9shxeDEtaDu7dHMnDWA08ijxWAZuQPQWg,8292
|
69
71
|
openlit/instrumentation/litellm/__init__.py,sha256=Z-LsVHKJdPganHfJA_rWg7xAfQYkvLfpLdF-eckU4qY,2401
|
70
72
|
openlit/instrumentation/litellm/async_litellm.py,sha256=1MKNZbvKaf1lFWbXi1MQy3qFNNeXawav34SDlOQ_H3w,27544
|
71
73
|
openlit/instrumentation/litellm/litellm.py,sha256=4YqCQ4CEQ4sfDu7pTlnflL_AfUqYEQdJDTO7nHJ6noY,27450
|
@@ -110,8 +112,8 @@ openlit/instrumentation/vllm/__init__.py,sha256=OVWalQ1dXvip1DUsjUGaHX4J-2FrSp-T
|
|
110
112
|
openlit/instrumentation/vllm/vllm.py,sha256=lDzM7F5pgxvh8nKL0dcKB4TD0Mc9wXOWeXOsOGN7Wd8,6527
|
111
113
|
openlit/otel/metrics.py,sha256=y7SQDTyfLakMrz0V4DThN-WAeap7YZzyndeYGSP6nVg,4516
|
112
114
|
openlit/otel/tracing.py,sha256=fG3vl-flSZ30whCi7rrG25PlkIhhr8PhnfJYCkZzCD0,3895
|
113
|
-
openlit/semcov/__init__.py,sha256=
|
114
|
-
openlit-1.32.
|
115
|
-
openlit-1.32.
|
116
|
-
openlit-1.32.
|
117
|
-
openlit-1.32.
|
115
|
+
openlit/semcov/__init__.py,sha256=F30Ki_08YjPrMe73kjp5sulC0qxHp9e-VExbzAOM1YI,10935
|
116
|
+
openlit-1.32.12.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
117
|
+
openlit-1.32.12.dist-info/METADATA,sha256=hHrHGhViseJ9Cj6kyqrmMSIeudiOoQ93Mrbj_mnF8rQ,22735
|
118
|
+
openlit-1.32.12.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
119
|
+
openlit-1.32.12.dist-info/RECORD,,
|
File without changes
|
File without changes
|