openlit 0.0.1__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +1 -1
- openlit/__init__.py +40 -16
- openlit/instrumentation/cohere/__init__.py +1 -1
- openlit/instrumentation/cohere/cohere.py +12 -12
- openlit/instrumentation/langchain/__init__.py +1 -2
- {openlit-0.0.1.dist-info → openlit-1.0.0.dist-info}/METADATA +69 -7
- {openlit-0.0.1.dist-info → openlit-1.0.0.dist-info}/RECORD +9 -9
- {openlit-0.0.1.dist-info → openlit-1.0.0.dist-info}/LICENSE +0 -0
- {openlit-0.0.1.dist-info → openlit-1.0.0.dist-info}/WHEEL +0 -0
openlit/__helpers.py
CHANGED
@@ -124,7 +124,7 @@ def get_audio_model_cost(model, pricing_info, prompt):
|
|
124
124
|
|
125
125
|
def fetch_pricing_info():
|
126
126
|
"""Fetches pricing information from a specified URL."""
|
127
|
-
pricing_url = "https://raw.githubusercontent.com/
|
127
|
+
pricing_url = "https://raw.githubusercontent.com/openlit/openlit/main/assets/pricing.json"
|
128
128
|
try:
|
129
129
|
# Set a timeout of 10 seconds for both the connection and the read
|
130
130
|
response = requests.get(pricing_url, timeout=20)
|
openlit/__init__.py
CHANGED
@@ -3,8 +3,9 @@ The __init__.py module for the openLIT package.
|
|
3
3
|
This module sets up the openLIT configuration and instrumentation for various
|
4
4
|
large language models (LLMs).
|
5
5
|
"""
|
6
|
-
from typing import
|
6
|
+
from typing import Dict
|
7
7
|
import logging
|
8
|
+
from importlib.util import find_spec
|
8
9
|
|
9
10
|
# Import internal modules for setting up tracing and fetching pricing info.
|
10
11
|
from openlit.otel.tracing import setup_tracing
|
@@ -91,6 +92,30 @@ class OpenlitConfig:
|
|
91
92
|
cls.trace_content = trace_content
|
92
93
|
cls.disable_metrics = disable_metrics
|
93
94
|
|
95
|
+
def instrument_if_available(instrumentor_name, instrumentor_instance, config,
|
96
|
+
disabled_instrumentors, module_name_map):
|
97
|
+
"""Instruments the specified instrumentor if its library is available."""
|
98
|
+
if instrumentor_name in disabled_instrumentors:
|
99
|
+
return
|
100
|
+
|
101
|
+
module_name = module_name_map.get(instrumentor_name)
|
102
|
+
|
103
|
+
if not module_name or find_spec(module_name) is not None:
|
104
|
+
try:
|
105
|
+
instrumentor_instance.instrument(
|
106
|
+
environment=config.environment,
|
107
|
+
application_name=config.application_name,
|
108
|
+
tracer=config.tracer,
|
109
|
+
pricing_info=config.pricing_info,
|
110
|
+
trace_content=config.trace_content,
|
111
|
+
metrics_dict=config.metrics_dict,
|
112
|
+
disable_metrics=config.disable_metrics
|
113
|
+
)
|
114
|
+
|
115
|
+
# pylint: disable=broad-exception-caught
|
116
|
+
except Exception as e:
|
117
|
+
logger.error("Failed to instrument %s: %s", instrumentor_name, e)
|
118
|
+
|
94
119
|
def init(environment="default", application_name="default", tracer=None, otlp_endpoint=None,
|
95
120
|
otlp_headers=None, disable_batch=False, trace_content=True, disabled_instrumentors=None,
|
96
121
|
meter=None, disable_metrics=False):
|
@@ -115,12 +140,19 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
|
|
115
140
|
disabled_instrumentors = disabled_instrumentors if disabled_instrumentors else []
|
116
141
|
|
117
142
|
# Check for invalid instrumentor names
|
118
|
-
|
119
|
-
|
120
|
-
"
|
121
|
-
"
|
143
|
+
|
144
|
+
module_name_map = {
|
145
|
+
"openai": "openai",
|
146
|
+
"anthropic": "anthropic",
|
147
|
+
"cohere": "cohere",
|
148
|
+
"mistral": "mistralai",
|
149
|
+
"langchain": "langchain",
|
150
|
+
"chroma": "chromadb",
|
151
|
+
"pinecone": "pincone",
|
152
|
+
"transformers": "transformers"
|
122
153
|
}
|
123
|
-
|
154
|
+
|
155
|
+
invalid_instrumentors = [name for name in disabled_instrumentors if name not in module_name_map]
|
124
156
|
for invalid_name in invalid_instrumentors:
|
125
157
|
logger.warning("Invalid instrumentor name detected and ignored: '%s'", invalid_name)
|
126
158
|
|
@@ -168,16 +200,8 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
|
|
168
200
|
|
169
201
|
# Initialize and instrument only the enabled instrumentors
|
170
202
|
for name, instrumentor in instrumentor_instances.items():
|
171
|
-
|
172
|
-
|
173
|
-
environment=config.environment,
|
174
|
-
application_name=config.application_name,
|
175
|
-
tracer=config.tracer,
|
176
|
-
pricing_info=config.pricing_info,
|
177
|
-
trace_content=config.trace_content,
|
178
|
-
metrics_dict=config.metrics_dict,
|
179
|
-
disable_metrics=config.disable_metrics
|
180
|
-
)
|
203
|
+
instrument_if_available(name, instrumentor, config,
|
204
|
+
disabled_instrumentors, module_name_map)
|
181
205
|
|
182
206
|
# pylint: disable=broad-exception-caught
|
183
207
|
except Exception as e:
|
@@ -7,7 +7,7 @@ from wrapt import wrap_function_wrapper
|
|
7
7
|
|
8
8
|
from openlit.instrumentation.cohere.cohere import chat, chat_stream, embed
|
9
9
|
|
10
|
-
_instruments = ("cohere >= 5.
|
10
|
+
_instruments = ("cohere >= 5.3.2",)
|
11
11
|
|
12
12
|
class CohereInstrumentor(BaseInstrumentor):
|
13
13
|
"""An instrumentor for Cohere's client library."""
|
@@ -173,8 +173,8 @@ def chat(gen_ai_endpoint, version, environment, application_name, tracer,
|
|
173
173
|
# Calculate cost of the operation
|
174
174
|
cost = get_chat_model_cost(kwargs.get("model", "command"),
|
175
175
|
pricing_info,
|
176
|
-
response.meta
|
177
|
-
response.meta
|
176
|
+
response.meta.billed_units.input_tokens,
|
177
|
+
response.meta.billed_units.output_tokens)
|
178
178
|
|
179
179
|
# Set Span attributes
|
180
180
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
@@ -207,12 +207,12 @@ def chat(gen_ai_endpoint, version, environment, application_name, tracer,
|
|
207
207
|
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
|
208
208
|
response.response_id)
|
209
209
|
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
210
|
-
response.meta
|
210
|
+
response.meta.billed_units.input_tokens)
|
211
211
|
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
|
212
|
-
response.meta
|
212
|
+
response.meta.billed_units.output_tokens)
|
213
213
|
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
214
|
-
response.meta
|
215
|
-
response.meta
|
214
|
+
response.meta.billed_units.input_tokens +
|
215
|
+
response.meta.billed_units.output_tokens)
|
216
216
|
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
217
217
|
cost)
|
218
218
|
if trace_content:
|
@@ -241,12 +241,12 @@ def chat(gen_ai_endpoint, version, environment, application_name, tracer,
|
|
241
241
|
|
242
242
|
metrics["genai_requests"].add(1, attributes)
|
243
243
|
metrics["genai_total_tokens"].add(
|
244
|
-
response.meta
|
245
|
-
response.meta
|
244
|
+
response.meta.billed_units.input_tokens +
|
245
|
+
response.meta.billed_units.output_tokens, attributes)
|
246
246
|
metrics["genai_completion_tokens"].add(
|
247
|
-
response.meta
|
247
|
+
response.meta.billed_units.output_tokens, attributes)
|
248
248
|
metrics["genai_prompt_tokens"].add(
|
249
|
-
response.meta
|
249
|
+
response.meta.billed_units.input_tokens, attributes)
|
250
250
|
metrics["genai_cost"].record(cost, attributes)
|
251
251
|
|
252
252
|
# Return original response
|
@@ -307,8 +307,8 @@ def chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
307
307
|
if event.event_type == "stream-end":
|
308
308
|
llmresponse = event.response.text
|
309
309
|
response_id = event.response.response_id
|
310
|
-
prompt_tokens = event.response.meta
|
311
|
-
completion_tokens = event.response.meta
|
310
|
+
prompt_tokens = event.response.meta.billed_units.input_tokens
|
311
|
+
completion_tokens = event.response.meta.billed_units.output_tokens
|
312
312
|
finish_reason = event.finish_reason
|
313
313
|
yield event
|
314
314
|
|
@@ -7,8 +7,7 @@ from wrapt import wrap_function_wrapper
|
|
7
7
|
|
8
8
|
from openlit.instrumentation.langchain.langchain import general_wrap, hub
|
9
9
|
|
10
|
-
_instruments = ("langchain >= 0.1.1",
|
11
|
-
"langchain-core > 0.1.1", "langchain-community >= 0.0.31")
|
10
|
+
_instruments = ("langchain >= 0.1.1",)
|
12
11
|
|
13
12
|
WRAPPED_METHODS = [
|
14
13
|
{
|
@@ -1,8 +1,8 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: openlit
|
3
|
-
Version: 0.0
|
3
|
+
Version: 1.0.0
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications, facilitating the integration of observability into your GenAI-driven projects
|
5
|
-
Home-page: https://github.com/
|
5
|
+
Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT
|
7
7
|
Author: OpenLIT
|
8
8
|
Requires-Python: >=3.7.1,<4.0.0
|
@@ -18,10 +18,25 @@ Requires-Dist: opentelemetry-instrumentation (>=0.45b0,<0.46)
|
|
18
18
|
Requires-Dist: opentelemetry-sdk (>=1.24.0,<2.0.0)
|
19
19
|
Requires-Dist: requests (>=2.26.0,<3.0.0)
|
20
20
|
Requires-Dist: tiktoken (>=0.1.1,<0.2.0)
|
21
|
-
Project-URL: Repository, https://github.com/
|
21
|
+
Project-URL: Repository, https://github.com/openlit/openlit/tree/main/openlit/python
|
22
22
|
Description-Content-Type: text/markdown
|
23
23
|
|
24
|
-
|
24
|
+
<div align="center">
|
25
|
+
<img src="https://github.com/openlit/.github/blob/main/profile/assets/wide-logo-no-bg.png?raw=true" alt="OpenLIT Logo" width="30%"><h1>
|
26
|
+
OpenTelemetry Auto-Instrumentation for GenAI & LLM Applications</h1>
|
27
|
+
|
28
|
+
**[Documentation](https://docs.openlit.io/) | [Quickstart](#-getting-started) | [Python SDK](https://github.com/openlit/openlit/tree/main/sdk/python)**
|
29
|
+
|
30
|
+
[](https://github.com/openlit/openlit)
|
31
|
+
[](https://github.com/openlit/openlit/blob/main/LICENSE)
|
32
|
+
[](https://pepy.tech/project/openlit)
|
33
|
+
[](https://github.com/openlit/openlit/pulse)
|
34
|
+
[](https://github.com/openlit/openlit/graphs/contributors)
|
35
|
+
|
36
|
+
[](https://join.slack.com/t/openlit/shared_invite/zt-2etnfttwg-TjP_7BZXfYg84oAukY8QRQ)
|
37
|
+
[](https://twitter.com/openlit_io)
|
38
|
+
|
39
|
+
</div>
|
25
40
|
|
26
41
|
OpenLIT Python SDK is an **OpenTelemetry-native** Auto instrumentation library for monitoring LLM Applications, facilitating the integration of observability into your GenAI-driven projects. Designed with simplicity and efficiency, OpenLIT offers the ability to embed observability into your GenAI-driven projects effortlessly using just **a single line of code**.
|
27
42
|
|
@@ -63,10 +78,20 @@ This project adheres to the [Semantic Conventions](https://github.com/open-telem
|
|
63
78
|
pip install openlit
|
64
79
|
```
|
65
80
|
|
66
|
-
##
|
81
|
+
## 🚀 Getting Started
|
82
|
+
|
83
|
+
## Step 1: Install OpenLIT SDK
|
84
|
+
|
85
|
+
```bash
|
86
|
+
pip install openlit
|
87
|
+
```
|
88
|
+
|
89
|
+
### Step 2: Instrument your Application
|
90
|
+
Integrating the OpenLIT into LLM applications is straightforward. Start monitoring for your LLM Application with just **one line of code**:
|
67
91
|
|
68
92
|
```python
|
69
93
|
import openlit
|
94
|
+
|
70
95
|
openlit.init()
|
71
96
|
```
|
72
97
|
|
@@ -74,6 +99,42 @@ By default, OpenLIT directs traces and metrics straight to your console. To forw
|
|
74
99
|
|
75
100
|
To send telemetry to OpenTelemetry backends requiring authentication, set the `otlp_headers` parameter with its desired value. Alternatively, you can configure the endpoint by setting the `OTEL_EXPORTER_OTLP_HEADERS` environment variable as recommended in the OpenTelemetry documentation.
|
76
101
|
|
102
|
+
#### Example
|
103
|
+
|
104
|
+
Here is how you can send telemetry from OpenLIT to Grafana Cloud
|
105
|
+
|
106
|
+
```python
|
107
|
+
openlit.init(
|
108
|
+
otlp_endpoint="https://otlp-gateway-prod-us-east-0.grafana.net/otlp",
|
109
|
+
otlp_headers="Authorization=Basic%20<base64 encoded Instance ID and API Token>"
|
110
|
+
)
|
111
|
+
```
|
112
|
+
|
113
|
+
Alternatively, You can also choose to set these values using `OTEL_EXPORTER_OTLP_ENDPOINT` and `OTEL_EXPORTER_OTLP_HEADERS` environment variables
|
114
|
+
|
115
|
+
```python
|
116
|
+
openlit.init()
|
117
|
+
```
|
118
|
+
|
119
|
+
```env
|
120
|
+
export OTEL_EXPORTER_OTLP_ENDPOINT = "https://otlp-gateway-prod-us-east-0.grafana.net/otlp"
|
121
|
+
export OTEL_EXPORTER_OTLP_HEADERS = "Authorization=Basic%20<base64 encoded Instance ID and API Token>"
|
122
|
+
```
|
123
|
+
|
124
|
+
### Step 3: Visualize and Optimize!
|
125
|
+
With the LLM Observability data now being collected and sent to your chosen OpenTelemetry backend, the next step is to visualize and analyze this data to glean insights into your application's performance, behavior, and identify areas of improvement. Here is how you would use the data in Grafana, follow these detailed instructions to explore your LLM application's Telemetry data.
|
126
|
+
|
127
|
+
- Select the **Explore** option from Grafana's sidebar.
|
128
|
+
- At the top, ensure the correct Tempo data source is selected from the dropdown menu.
|
129
|
+
- Use the **Query** field to specify any particular traces you are interested in, or leave it empty to browse through all the available traces.
|
130
|
+
- You can adjust the time range to focus on specific periods of interest.
|
131
|
+
- Hit **Run Query** to fetch your trace data. You'll see a visual representation of your traces along with detailed information on particular spans when clicked.
|
132
|
+
|
133
|
+
#### Next Steps
|
134
|
+
|
135
|
+
- **Create Dashboards:** Beyond just exploring traces, consider creating dashboards in Grafana to monitor key performance indicators (KPIs) and metrics over time. Dashboards can be customized with various panels to display graphs, logs, and single stats that are most relevant to your application's performance and usage patterns.
|
136
|
+
- **Set Alerts:** Grafana also allows you to set up alerts based on specific thresholds. This feature can be invaluable in proactively managing your application's health by notifying you of potential issues before they impact users.
|
137
|
+
- **Iterate and Optimize:** Use the insights gained from your observability data to make informed decisions on optimizing your LLM application. This might involve refining model parameters, adjusting scaling strategies, or identifying and resolving bottlenecks.
|
77
138
|
|
78
139
|
|
79
140
|
### Configuration
|
@@ -107,7 +168,8 @@ Your input helps us grow and improve, and we're here to support you every step o
|
|
107
168
|
|
108
169
|
Connect with the OpenLIT community and maintainers for support, discussions, and updates:
|
109
170
|
|
110
|
-
- 🌟 If you like it, Leave a star on our [GitHub](https://github.com/
|
171
|
+
- 🌟 If you like it, Leave a star on our [GitHub](https://github.com/openlit/openlit/)
|
111
172
|
- 🌍 Join our [Slack](https://join.slack.com/t/openlit/shared_invite/zt-2etnfttwg-TjP_7BZXfYg84oAukY8QRQ) Community for live interactions and questions.
|
112
|
-
- 🐞 Report bugs on our [GitHub Issues](https://github.com/
|
173
|
+
- 🐞 Report bugs on our [GitHub Issues](https://github.com/openlit/openlit/issues) to help us improve OpenLIT.
|
113
174
|
- 𝕏 Follow us on [X](https://twitter.com/openlit) for the latest updates and news.
|
175
|
+
|
@@ -1,13 +1,13 @@
|
|
1
|
-
openlit/__helpers.py,sha256=
|
2
|
-
openlit/__init__.py,sha256=
|
1
|
+
openlit/__helpers.py,sha256=EEbLEUKuCiBp0WiieAvUnGcaU5D7grFgNVDCBgMKjQE,4651
|
2
|
+
openlit/__init__.py,sha256=E1J6QPM0BiQAbOSetZ32yaw-Ikx4ZgvPemMP9XTdxFA,8795
|
3
3
|
openlit/instrumentation/anthropic/__init__.py,sha256=oaU53BOPyfUKbEzYvLr1DPymDluurSnwo4Hernf2XdU,1955
|
4
4
|
openlit/instrumentation/anthropic/anthropic.py,sha256=gLN7LrgbTTOxgO8TEn-mX7WCYVGExrIGB-_ueCLPMEY,15993
|
5
5
|
openlit/instrumentation/anthropic/async_anthropic.py,sha256=wb5U9aF3FtgPZ_1EZudsuKaB6wmOrEVwDIlfcEWnQqU,16035
|
6
6
|
openlit/instrumentation/chroma/__init__.py,sha256=61lFpHlUEQUobsUJZHXdvOViKwsOH8AOvSfc4VgCmiM,3253
|
7
7
|
openlit/instrumentation/chroma/chroma.py,sha256=wcY5sN-Lfdr4P56FDy8O_ft20gfxTDP12c2vIUF7Qno,10374
|
8
|
-
openlit/instrumentation/cohere/__init__.py,sha256=
|
9
|
-
openlit/instrumentation/cohere/cohere.py,sha256=
|
10
|
-
openlit/instrumentation/langchain/__init__.py,sha256=
|
8
|
+
openlit/instrumentation/cohere/__init__.py,sha256=PC5T1qIg9pwLNocBP_WjG5B_6p_z019s8quk_fNLAMs,1920
|
9
|
+
openlit/instrumentation/cohere/cohere.py,sha256=LdRMStLs-UTxDsGsTObEv-unC8g1tRFaU2EFCUBFGHI,20348
|
10
|
+
openlit/instrumentation/langchain/__init__.py,sha256=TW1ZR7I1i9Oig-wDWp3j1gmtQFO76jNBXQRBGGKzoOo,2531
|
11
11
|
openlit/instrumentation/langchain/langchain.py,sha256=D-PiYlmUW5SuE4rvKRMOsMGtW5wI6aA3ldig1JhXUok,7609
|
12
12
|
openlit/instrumentation/mistral/__init__.py,sha256=zJCIpFWRbsYrvooOJYuqwyuKeSOQLWbyXWCObL-Snks,3156
|
13
13
|
openlit/instrumentation/mistral/async_mistral.py,sha256=nD1ns326mclIUjd4PEfrSa8mufpoZqjYF1meK9oohkA,21328
|
@@ -24,7 +24,7 @@ openlit/instrumentation/transformers/transformers.py,sha256=peT0BGskYt7AZ0b93TZ7
|
|
24
24
|
openlit/otel/metrics.py,sha256=GdlQB1PpNvFAVbCqSTh7A87k6VVb1raHrW0y7xGSuQA,4293
|
25
25
|
openlit/otel/tracing.py,sha256=peismkno0YPoRezHPbF5Ycz15_oOBErn_coW1CPspHg,3612
|
26
26
|
openlit/semcov/__init__.py,sha256=n7lrz6xOwATtKF8cOWvhjr3JIrZ16sy3DXAW4Li2Q24,5686
|
27
|
-
openlit-0.0.
|
28
|
-
openlit-0.0.
|
29
|
-
openlit-0.0.
|
30
|
-
openlit-0.0.
|
27
|
+
openlit-1.0.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
28
|
+
openlit-1.0.0.dist-info/METADATA,sha256=GlARW9aeWjuMH3mYz-AWBv39pthUN-o88CSg6WptwTk,10454
|
29
|
+
openlit-1.0.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
30
|
+
openlit-1.0.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|