openlit 1.1.2__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__init__.py +4 -2
- openlit/instrumentation/vertexai/__init__.py +147 -0
- openlit/instrumentation/vertexai/async_vertexai.py +1047 -0
- openlit/instrumentation/vertexai/vertexai.py +1047 -0
- openlit/semcov/__init__.py +1 -0
- {openlit-1.1.2.dist-info → openlit-1.2.0.dist-info}/METADATA +1 -1
- {openlit-1.1.2.dist-info → openlit-1.2.0.dist-info}/RECORD +9 -6
- {openlit-1.1.2.dist-info → openlit-1.2.0.dist-info}/LICENSE +0 -0
- {openlit-1.1.2.dist-info → openlit-1.2.0.dist-info}/WHEEL +0 -0
openlit/__init__.py
CHANGED
@@ -18,6 +18,7 @@ from openlit.instrumentation.anthropic import AnthropicInstrumentor
|
|
18
18
|
from openlit.instrumentation.cohere import CohereInstrumentor
|
19
19
|
from openlit.instrumentation.mistral import MistralInstrumentor
|
20
20
|
from openlit.instrumentation.bedrock import BedrockInstrumentor
|
21
|
+
from openlit.instrumentation.vertexai import VertexAIInstrumentor
|
21
22
|
from openlit.instrumentation.langchain import LangChainInstrumentor
|
22
23
|
from openlit.instrumentation.chroma import ChromaInstrumentor
|
23
24
|
from openlit.instrumentation.pinecone import PineconeInstrumentor
|
@@ -139,7 +140,6 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
|
|
139
140
|
disable_metrics (bool): Flag to disable metrics (Optional)
|
140
141
|
"""
|
141
142
|
disabled_instrumentors = disabled_instrumentors if disabled_instrumentors else []
|
142
|
-
|
143
143
|
# Check for invalid instrumentor names
|
144
144
|
|
145
145
|
module_name_map = {
|
@@ -148,9 +148,10 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
|
|
148
148
|
"cohere": "cohere",
|
149
149
|
"mistral": "mistralai",
|
150
150
|
"bedrock": "boto3",
|
151
|
+
"vertexai": "vertexai",
|
151
152
|
"langchain": "langchain",
|
152
153
|
"chroma": "chromadb",
|
153
|
-
"pinecone": "pinecone
|
154
|
+
"pinecone": "pinecone",
|
154
155
|
"transformers": "transformers"
|
155
156
|
}
|
156
157
|
|
@@ -195,6 +196,7 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
|
|
195
196
|
"cohere": CohereInstrumentor(),
|
196
197
|
"mistral": MistralInstrumentor(),
|
197
198
|
"bedrock": BedrockInstrumentor(),
|
199
|
+
"vertexai": VertexAIInstrumentor(),
|
198
200
|
"langchain": LangChainInstrumentor(),
|
199
201
|
"chroma": ChromaInstrumentor(),
|
200
202
|
"pinecone": PineconeInstrumentor(),
|
@@ -0,0 +1,147 @@
|
|
1
|
+
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
|
+
"""Initializer of Auto Instrumentation of VertexAI Functions"""
|
3
|
+
|
4
|
+
from typing import Collection
|
5
|
+
import importlib.metadata
|
6
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
7
|
+
from wrapt import wrap_function_wrapper
|
8
|
+
|
9
|
+
from openlit.instrumentation.vertexai.vertexai import (
|
10
|
+
generate_content, predict, predict_streaming,
|
11
|
+
send_message, start_chat, start_chat_streaming,
|
12
|
+
embeddings
|
13
|
+
)
|
14
|
+
from openlit.instrumentation.vertexai.async_vertexai import (
|
15
|
+
generate_content_async, predict_async,
|
16
|
+
predict_streaming_async,
|
17
|
+
send_message_async,
|
18
|
+
start_chat_async, start_chat_streaming_async,
|
19
|
+
embeddings_async
|
20
|
+
)
|
21
|
+
|
22
|
+
|
23
|
+
_instruments = ("google-cloud-aiplatform >= 1.38.1",)
|
24
|
+
|
25
|
+
class VertexAIInstrumentor(BaseInstrumentor):
|
26
|
+
"""
|
27
|
+
An instrumentor for VertexAI's client library.
|
28
|
+
"""
|
29
|
+
|
30
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
31
|
+
return _instruments
|
32
|
+
|
33
|
+
def _instrument(self, **kwargs):
|
34
|
+
application_name = kwargs.get("application_name", "default")
|
35
|
+
environment = kwargs.get("environment", "default")
|
36
|
+
tracer = kwargs.get("tracer")
|
37
|
+
metrics = kwargs.get("metrics_dict")
|
38
|
+
pricing_info = kwargs.get("pricing_info", {})
|
39
|
+
trace_content = kwargs.get("trace_content", False)
|
40
|
+
disable_metrics = kwargs.get("disable_metrics")
|
41
|
+
version = importlib.metadata.version("google-cloud-aiplatform")
|
42
|
+
|
43
|
+
#sync
|
44
|
+
wrap_function_wrapper(
|
45
|
+
"vertexai.generative_models",
|
46
|
+
"GenerativeModel.generate_content",
|
47
|
+
generate_content("vertexai.generate_content", version, environment, application_name,
|
48
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
49
|
+
)
|
50
|
+
|
51
|
+
wrap_function_wrapper(
|
52
|
+
"vertexai.generative_models",
|
53
|
+
"ChatSession.send_message",
|
54
|
+
send_message("vertexai.send_message", version, environment, application_name,
|
55
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
56
|
+
)
|
57
|
+
|
58
|
+
wrap_function_wrapper(
|
59
|
+
"vertexai.language_models",
|
60
|
+
"TextGenerationModel.predict",
|
61
|
+
predict("vertexai.predict", version, environment, application_name,
|
62
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
63
|
+
)
|
64
|
+
|
65
|
+
wrap_function_wrapper(
|
66
|
+
"vertexai.language_models",
|
67
|
+
"TextGenerationModel.predict_streaming",
|
68
|
+
predict_streaming("vertexai.predict", version, environment, application_name,
|
69
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
70
|
+
)
|
71
|
+
|
72
|
+
wrap_function_wrapper(
|
73
|
+
"vertexai.language_models",
|
74
|
+
"ChatSession.send_message",
|
75
|
+
start_chat("vertexai.send_message", version, environment, application_name,
|
76
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
77
|
+
)
|
78
|
+
|
79
|
+
wrap_function_wrapper(
|
80
|
+
"vertexai.language_models",
|
81
|
+
"ChatSession.send_message_streaming",
|
82
|
+
start_chat_streaming("vertexai.send_message", version, environment, application_name,
|
83
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
84
|
+
)
|
85
|
+
|
86
|
+
wrap_function_wrapper(
|
87
|
+
"vertexai.language_models",
|
88
|
+
"TextEmbeddingModel.get_embeddings",
|
89
|
+
embeddings("vertexai.get_embeddings", version, environment, application_name,
|
90
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
91
|
+
)
|
92
|
+
|
93
|
+
#async
|
94
|
+
wrap_function_wrapper(
|
95
|
+
"vertexai.generative_models",
|
96
|
+
"GenerativeModel.generate_content_async",
|
97
|
+
generate_content_async("vertexai.generate_content", version, environment,
|
98
|
+
application_name, tracer, pricing_info, trace_content,
|
99
|
+
metrics, disable_metrics),
|
100
|
+
)
|
101
|
+
|
102
|
+
wrap_function_wrapper(
|
103
|
+
"vertexai.generative_models",
|
104
|
+
"ChatSession.send_message_async",
|
105
|
+
send_message_async("vertexai.send_message", version, environment, application_name,
|
106
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
107
|
+
)
|
108
|
+
|
109
|
+
wrap_function_wrapper(
|
110
|
+
"vertexai.language_models",
|
111
|
+
"TextGenerationModel.predict_async",
|
112
|
+
predict_async("vertexai.predict", version, environment, application_name,
|
113
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
114
|
+
)
|
115
|
+
|
116
|
+
wrap_function_wrapper(
|
117
|
+
"vertexai.language_models",
|
118
|
+
"TextGenerationModel.predict_streaming_async",
|
119
|
+
predict_streaming_async("vertexai.predict", version, environment, application_name,
|
120
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
121
|
+
)
|
122
|
+
|
123
|
+
wrap_function_wrapper(
|
124
|
+
"vertexai.language_models",
|
125
|
+
"ChatSession.send_message_async",
|
126
|
+
start_chat_async("vertexai.send_message", version, environment, application_name,
|
127
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
128
|
+
)
|
129
|
+
|
130
|
+
wrap_function_wrapper(
|
131
|
+
"vertexai.language_models",
|
132
|
+
"ChatSession.send_message_streaming_async",
|
133
|
+
start_chat_streaming_async("vertexai.send_message", version, environment,
|
134
|
+
application_name, tracer, pricing_info, trace_content,
|
135
|
+
metrics, disable_metrics),
|
136
|
+
)
|
137
|
+
|
138
|
+
wrap_function_wrapper(
|
139
|
+
"vertexai.language_models",
|
140
|
+
"TextEmbeddingModel.get_embeddings_async",
|
141
|
+
embeddings_async("vertexai.get_embeddings", version, environment, application_name,
|
142
|
+
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
143
|
+
)
|
144
|
+
|
145
|
+
def _uninstrument(self, **kwargs):
|
146
|
+
# Proper uninstrumentation logic to revert patched methods
|
147
|
+
pass
|