openlit 1.18.1__py3-none-any.whl → 1.18.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/instrumentation/mistral/__init__.py +18 -18
- openlit/instrumentation/mistral/async_mistral.py +13 -13
- openlit/instrumentation/mistral/mistral.py +12 -12
- {openlit-1.18.1.dist-info → openlit-1.18.2.dist-info}/METADATA +3 -3
- {openlit-1.18.1.dist-info → openlit-1.18.2.dist-info}/RECORD +7 -7
- {openlit-1.18.1.dist-info → openlit-1.18.2.dist-info}/LICENSE +0 -0
- {openlit-1.18.1.dist-info → openlit-1.18.2.dist-info}/WHEEL +0 -0
@@ -9,10 +9,10 @@ from openlit.instrumentation.mistral.mistral import chat, chat_stream, embedding
|
|
9
9
|
from openlit.instrumentation.mistral.async_mistral import async_chat, async_chat_stream
|
10
10
|
from openlit.instrumentation.mistral.async_mistral import async_embeddings
|
11
11
|
|
12
|
-
_instruments = ("mistralai >=
|
12
|
+
_instruments = ("mistralai >= 1.0.0",)
|
13
13
|
|
14
14
|
class MistralInstrumentor(BaseInstrumentor):
|
15
|
-
"""An instrumentor for
|
15
|
+
"""An instrumentor for Mistral's client library."""
|
16
16
|
|
17
17
|
def instrumentation_dependencies(self) -> Collection[str]:
|
18
18
|
return _instruments
|
@@ -27,50 +27,50 @@ class MistralInstrumentor(BaseInstrumentor):
|
|
27
27
|
disable_metrics = kwargs.get("disable_metrics")
|
28
28
|
version = importlib.metadata.version("mistralai")
|
29
29
|
|
30
|
-
#sync
|
30
|
+
# sync
|
31
31
|
wrap_function_wrapper(
|
32
|
-
"mistralai.
|
33
|
-
"
|
32
|
+
"mistralai.chat",
|
33
|
+
"Chat.complete",
|
34
34
|
chat("mistral.chat", version, environment, application_name,
|
35
35
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
36
36
|
)
|
37
37
|
|
38
|
-
#sync
|
38
|
+
# sync
|
39
39
|
wrap_function_wrapper(
|
40
|
-
"mistralai.
|
41
|
-
"
|
40
|
+
"mistralai.chat",
|
41
|
+
"Chat.stream",
|
42
42
|
chat_stream("mistral.chat", version, environment, application_name,
|
43
43
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
44
44
|
)
|
45
45
|
|
46
|
-
#sync
|
46
|
+
# sync
|
47
47
|
wrap_function_wrapper(
|
48
|
-
"mistralai.
|
49
|
-
"
|
48
|
+
"mistralai.embeddings",
|
49
|
+
"Embeddings.create",
|
50
50
|
embeddings("mistral.embeddings", version, environment, application_name,
|
51
51
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
52
52
|
)
|
53
53
|
|
54
54
|
# Async
|
55
55
|
wrap_function_wrapper(
|
56
|
-
"mistralai.
|
57
|
-
"
|
56
|
+
"mistralai.chat",
|
57
|
+
"Chat.complete_async",
|
58
58
|
async_chat("mistral.chat", version, environment, application_name,
|
59
59
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
60
60
|
)
|
61
61
|
|
62
|
-
#
|
62
|
+
# Async
|
63
63
|
wrap_function_wrapper(
|
64
|
-
"mistralai.
|
65
|
-
"
|
64
|
+
"mistralai.chat",
|
65
|
+
"Chat.stream_async",
|
66
66
|
async_chat_stream("mistral.chat", version, environment, application_name,
|
67
67
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
68
68
|
)
|
69
69
|
|
70
70
|
#sync
|
71
71
|
wrap_function_wrapper(
|
72
|
-
"mistralai.
|
73
|
-
"
|
72
|
+
"mistralai.embeddings",
|
73
|
+
"Embeddings.create_async",
|
74
74
|
async_embeddings("mistral.embeddings", version, environment, application_name,
|
75
75
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
76
76
|
)
|
@@ -56,8 +56,8 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
|
|
56
56
|
message_prompt = kwargs.get('messages', "")
|
57
57
|
formatted_messages = []
|
58
58
|
for message in message_prompt:
|
59
|
-
role = message
|
60
|
-
content = message
|
59
|
+
role = message["role"]
|
60
|
+
content = message["content"]
|
61
61
|
|
62
62
|
if isinstance(content, list):
|
63
63
|
content_str = ", ".join(
|
@@ -207,14 +207,14 @@ def async_chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
207
207
|
llmresponse = ""
|
208
208
|
|
209
209
|
# Loop through streaming events capturing relevant details
|
210
|
-
async for event in wrapped(*args, **kwargs):
|
211
|
-
response_id = event.id
|
212
|
-
llmresponse += event.choices[0].delta.content
|
213
|
-
if event.usage is not None:
|
214
|
-
prompt_tokens = event.usage.prompt_tokens
|
215
|
-
completion_tokens = event.usage.completion_tokens
|
216
|
-
total_tokens = event.usage.total_tokens
|
217
|
-
finish_reason = event.choices[0].finish_reason
|
210
|
+
async for event in await wrapped(*args, **kwargs):
|
211
|
+
response_id = event.data.id
|
212
|
+
llmresponse += event.data.choices[0].delta.content
|
213
|
+
if event.data.usage is not None:
|
214
|
+
prompt_tokens = event.data.usage.prompt_tokens
|
215
|
+
completion_tokens = event.data.usage.completion_tokens
|
216
|
+
total_tokens = event.data.usage.total_tokens
|
217
|
+
finish_reason = event.data.choices[0].finish_reason
|
218
218
|
yield event
|
219
219
|
|
220
220
|
# Handling exception ensure observability without disrupting operation
|
@@ -223,8 +223,8 @@ def async_chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
223
223
|
message_prompt = kwargs.get('messages', "")
|
224
224
|
formatted_messages = []
|
225
225
|
for message in message_prompt:
|
226
|
-
role = message
|
227
|
-
content = message
|
226
|
+
role = message["role"]
|
227
|
+
content = message["content"]
|
228
228
|
|
229
229
|
if isinstance(content, list):
|
230
230
|
content_str = ", ".join(
|
@@ -364,7 +364,7 @@ def async_embeddings(gen_ai_endpoint, version, environment, application_name,
|
|
364
364
|
|
365
365
|
try:
|
366
366
|
# Get prompt from kwargs and store as a single string
|
367
|
-
prompt = ', '.join(kwargs.get('
|
367
|
+
prompt = ', '.join(kwargs.get('inputs', []))
|
368
368
|
|
369
369
|
# Calculate cost of the operation
|
370
370
|
cost = get_embed_model_cost(kwargs.get('model', "mistral-embed"),
|
@@ -55,8 +55,8 @@ def chat(gen_ai_endpoint, version, environment, application_name,
|
|
55
55
|
message_prompt = kwargs.get('messages', "")
|
56
56
|
formatted_messages = []
|
57
57
|
for message in message_prompt:
|
58
|
-
role = message
|
59
|
-
content = message
|
58
|
+
role = message["role"]
|
59
|
+
content = message["content"]
|
60
60
|
|
61
61
|
if isinstance(content, list):
|
62
62
|
content_str = ", ".join(
|
@@ -207,13 +207,13 @@ def chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
207
207
|
|
208
208
|
# Loop through streaming events capturing relevant details
|
209
209
|
for event in wrapped(*args, **kwargs):
|
210
|
-
response_id = event.id
|
211
|
-
llmresponse += event.choices[0].delta.content
|
212
|
-
if event.usage is not None:
|
213
|
-
prompt_tokens = event.usage.prompt_tokens
|
214
|
-
completion_tokens = event.usage.completion_tokens
|
215
|
-
total_tokens = event.usage.total_tokens
|
216
|
-
finish_reason = event.choices[0].finish_reason
|
210
|
+
response_id = event.data.id
|
211
|
+
llmresponse += event.data.choices[0].delta.content
|
212
|
+
if event.data.usage is not None:
|
213
|
+
prompt_tokens = event.data.usage.prompt_tokens
|
214
|
+
completion_tokens = event.data.usage.completion_tokens
|
215
|
+
total_tokens = event.data.usage.total_tokens
|
216
|
+
finish_reason = event.data.choices[0].finish_reason
|
217
217
|
yield event
|
218
218
|
|
219
219
|
# Handling exception ensure observability without disrupting operation
|
@@ -222,8 +222,8 @@ def chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
222
222
|
message_prompt = kwargs.get('messages', "")
|
223
223
|
formatted_messages = []
|
224
224
|
for message in message_prompt:
|
225
|
-
role = message
|
226
|
-
content = message
|
225
|
+
role = message["role"]
|
226
|
+
content = message["content"]
|
227
227
|
|
228
228
|
if isinstance(content, list):
|
229
229
|
content_str = ", ".join(
|
@@ -363,7 +363,7 @@ def embeddings(gen_ai_endpoint, version, environment, application_name,
|
|
363
363
|
|
364
364
|
try:
|
365
365
|
# Get prompt from kwargs and store as a single string
|
366
|
-
prompt = ', '.join(kwargs.get('
|
366
|
+
prompt = ', '.join(kwargs.get('inputs', []))
|
367
367
|
|
368
368
|
# Calculate cost of the operation
|
369
369
|
cost = get_embed_model_cost(kwargs.get('model', "mistral-embed"),
|
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.18.
|
4
|
-
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications, facilitating the integration of observability into your GenAI-driven projects
|
3
|
+
Version: 1.18.2
|
4
|
+
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
|
6
|
-
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT
|
6
|
+
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
7
7
|
Author: OpenLIT
|
8
8
|
Requires-Python: >=3.7.1,<4.0.0
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
@@ -28,9 +28,9 @@ openlit/instrumentation/llamaindex/__init__.py,sha256=vPtK65G6b-TwJERowVRUVl7f_n
|
|
28
28
|
openlit/instrumentation/llamaindex/llamaindex.py,sha256=uiIigbwhonSbJWA7LpgOVI1R4kxxPODS1K5wyHIQ4hM,4048
|
29
29
|
openlit/instrumentation/milvus/__init__.py,sha256=qi1yfmMrvkDtnrN_6toW8qC9BRL78bq7ayWpObJ8Bq4,2961
|
30
30
|
openlit/instrumentation/milvus/milvus.py,sha256=qhKIoggBAJhRctRrBYz69AcvXH-eh7oBn_l9WfxpAjI,9121
|
31
|
-
openlit/instrumentation/mistral/__init__.py,sha256=
|
32
|
-
openlit/instrumentation/mistral/async_mistral.py,sha256=
|
33
|
-
openlit/instrumentation/mistral/mistral.py,sha256=
|
31
|
+
openlit/instrumentation/mistral/__init__.py,sha256=niWn0gYNOTPS5zoTjtCciDqQVj-iJehnpdh7ElB-H9w,3088
|
32
|
+
openlit/instrumentation/mistral/async_mistral.py,sha256=l-kcaGPrX3sqPH-RXWo6ope0Ui3nUvExNJ4KX9QgDMY,22246
|
33
|
+
openlit/instrumentation/mistral/mistral.py,sha256=Q7MMRvVFsM8o0_ebZ0EfnhGjs16SJSnmu-oE798gYMQ,22087
|
34
34
|
openlit/instrumentation/ollama/__init__.py,sha256=cOax8PiypDuo_FC4WvDCYBRo7lH5nV9xU92h7k-eZbg,3812
|
35
35
|
openlit/instrumentation/ollama/async_ollama.py,sha256=7lbikD-I9k8VL63idqj3VMEfiEKJmFNUPR8Xb6g2phQ,31366
|
36
36
|
openlit/instrumentation/ollama/ollama.py,sha256=lBt1d3rFnF1tFbfdOccwjEafHnmTAUGsiOKSHku6Fkw,31277
|
@@ -53,7 +53,7 @@ openlit/instrumentation/vllm/vllm.py,sha256=lDzM7F5pgxvh8nKL0dcKB4TD0Mc9wXOWeXOs
|
|
53
53
|
openlit/otel/metrics.py,sha256=O7NoaDz0bY19mqpE4-0PcKwEe-B-iJFRgOCaanAuZAc,4291
|
54
54
|
openlit/otel/tracing.py,sha256=vL1ifMbARPBpqK--yXYsCM6y5dSu5LFIKqkhZXtYmUc,3712
|
55
55
|
openlit/semcov/__init__.py,sha256=EvoNOKtc7UKwLZ3Gp0-B1zwmeTcAIbx8O7wvAw8wXP4,7498
|
56
|
-
openlit-1.18.
|
57
|
-
openlit-1.18.
|
58
|
-
openlit-1.18.
|
59
|
-
openlit-1.18.
|
56
|
+
openlit-1.18.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
57
|
+
openlit-1.18.2.dist-info/METADATA,sha256=eD-PsH7RbUA7EOICMoUHzO_f1g_Sa0b6UcNvHnBwY-8,14347
|
58
|
+
openlit-1.18.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
59
|
+
openlit-1.18.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|