openlit 1.34.16__tar.gz → 1.34.17__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. {openlit-1.34.16 → openlit-1.34.17}/PKG-INFO +1 -1
  2. {openlit-1.34.16 → openlit-1.34.17}/pyproject.toml +1 -1
  3. openlit-1.34.17/src/openlit/instrumentation/mistral/__init__.py +88 -0
  4. openlit-1.34.17/src/openlit/instrumentation/mistral/async_mistral.py +200 -0
  5. openlit-1.34.17/src/openlit/instrumentation/mistral/mistral.py +200 -0
  6. openlit-1.34.17/src/openlit/instrumentation/mistral/utils.py +298 -0
  7. openlit-1.34.16/src/openlit/instrumentation/mistral/__init__.py +0 -80
  8. openlit-1.34.16/src/openlit/instrumentation/mistral/async_mistral.py +0 -611
  9. openlit-1.34.16/src/openlit/instrumentation/mistral/mistral.py +0 -611
  10. {openlit-1.34.16 → openlit-1.34.17}/LICENSE +0 -0
  11. {openlit-1.34.16 → openlit-1.34.17}/README.md +0 -0
  12. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/__helpers.py +0 -0
  13. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/__init__.py +0 -0
  14. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/evals/__init__.py +0 -0
  15. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/evals/all.py +0 -0
  16. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/evals/bias_detection.py +0 -0
  17. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/evals/hallucination.py +0 -0
  18. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/evals/toxicity.py +0 -0
  19. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/evals/utils.py +0 -0
  20. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/guard/__init__.py +0 -0
  21. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/guard/all.py +0 -0
  22. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/guard/prompt_injection.py +0 -0
  23. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/guard/restrict_topic.py +0 -0
  24. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/guard/sensitive_topic.py +0 -0
  25. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/guard/utils.py +0 -0
  26. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/ag2/__init__.py +0 -0
  27. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/ag2/ag2.py +0 -0
  28. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/ai21/__init__.py +0 -0
  29. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/ai21/ai21.py +0 -0
  30. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/ai21/async_ai21.py +0 -0
  31. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/ai21/utils.py +0 -0
  32. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
  33. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
  34. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
  35. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/anthropic/utils.py +0 -0
  36. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/assemblyai/__init__.py +0 -0
  37. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/assemblyai/assemblyai.py +0 -0
  38. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/assemblyai/utils.py +0 -0
  39. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/astra/__init__.py +0 -0
  40. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/astra/astra.py +0 -0
  41. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/astra/async_astra.py +0 -0
  42. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/astra/utils.py +0 -0
  43. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/azure_ai_inference/__init__.py +0 -0
  44. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +0 -0
  45. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +0 -0
  46. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/azure_ai_inference/utils.py +0 -0
  47. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/bedrock/__init__.py +0 -0
  48. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/bedrock/bedrock.py +0 -0
  49. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/bedrock/utils.py +0 -0
  50. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/chroma/__init__.py +0 -0
  51. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/chroma/chroma.py +0 -0
  52. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/cohere/__init__.py +0 -0
  53. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/cohere/async_cohere.py +0 -0
  54. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/cohere/cohere.py +0 -0
  55. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/controlflow/__init__.py +0 -0
  56. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/controlflow/controlflow.py +0 -0
  57. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/crawl4ai/__init__.py +0 -0
  58. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/crawl4ai/async_crawl4ai.py +0 -0
  59. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/crawl4ai/crawl4ai.py +0 -0
  60. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/crewai/__init__.py +0 -0
  61. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/crewai/crewai.py +0 -0
  62. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/dynamiq/__init__.py +0 -0
  63. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/dynamiq/dynamiq.py +0 -0
  64. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
  65. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
  66. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
  67. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/elevenlabs/utils.py +0 -0
  68. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
  69. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
  70. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/firecrawl/__init__.py +0 -0
  71. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/firecrawl/firecrawl.py +0 -0
  72. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/google_ai_studio/__init__.py +0 -0
  73. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +0 -0
  74. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +0 -0
  75. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/google_ai_studio/utils.py +0 -0
  76. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
  77. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
  78. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/gpt4all/utils.py +0 -0
  79. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/gpu/__init__.py +0 -0
  80. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/groq/__init__.py +0 -0
  81. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/groq/async_groq.py +0 -0
  82. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/groq/groq.py +0 -0
  83. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/groq/utils.py +0 -0
  84. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/haystack/__init__.py +0 -0
  85. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/haystack/haystack.py +0 -0
  86. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/julep/__init__.py +0 -0
  87. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/julep/async_julep.py +0 -0
  88. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/julep/julep.py +0 -0
  89. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/langchain/__init__.py +0 -0
  90. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/langchain/async_langchain.py +0 -0
  91. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/langchain/langchain.py +0 -0
  92. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/letta/__init__.py +0 -0
  93. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/letta/letta.py +0 -0
  94. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/litellm/__init__.py +0 -0
  95. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/litellm/async_litellm.py +0 -0
  96. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/litellm/litellm.py +0 -0
  97. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/litellm/utils.py +0 -0
  98. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
  99. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
  100. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/mem0/__init__.py +0 -0
  101. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/mem0/mem0.py +0 -0
  102. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/milvus/__init__.py +0 -0
  103. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/milvus/milvus.py +0 -0
  104. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/multion/__init__.py +0 -0
  105. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/multion/async_multion.py +0 -0
  106. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/multion/multion.py +0 -0
  107. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/ollama/__init__.py +0 -0
  108. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/ollama/async_ollama.py +0 -0
  109. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/ollama/ollama.py +0 -0
  110. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/ollama/utils.py +0 -0
  111. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/openai/__init__.py +0 -0
  112. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/openai/async_openai.py +0 -0
  113. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/openai/openai.py +0 -0
  114. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/openai_agents/__init__.py +0 -0
  115. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/openai_agents/openai_agents.py +0 -0
  116. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/phidata/__init__.py +0 -0
  117. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/phidata/phidata.py +0 -0
  118. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
  119. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
  120. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/premai/__init__.py +0 -0
  121. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/premai/premai.py +0 -0
  122. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/premai/utils.py +0 -0
  123. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/pydantic_ai/__init__.py +0 -0
  124. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/pydantic_ai/pydantic_ai.py +0 -0
  125. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/pydantic_ai/utils.py +0 -0
  126. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
  127. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/qdrant/async_qdrant.py +0 -0
  128. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
  129. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/reka/__init__.py +0 -0
  130. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/reka/async_reka.py +0 -0
  131. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/reka/reka.py +0 -0
  132. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/reka/utils.py +0 -0
  133. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/together/__init__.py +0 -0
  134. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/together/async_together.py +0 -0
  135. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/together/together.py +0 -0
  136. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/together/utils.py +0 -0
  137. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/transformers/__init__.py +0 -0
  138. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/transformers/transformers.py +0 -0
  139. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/transformers/utils.py +0 -0
  140. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
  141. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
  142. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
  143. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/vllm/__init__.py +0 -0
  144. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/vllm/utils.py +0 -0
  145. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/instrumentation/vllm/vllm.py +0 -0
  146. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/otel/events.py +0 -0
  147. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/otel/metrics.py +0 -0
  148. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/otel/tracing.py +0 -0
  149. {openlit-1.34.16 → openlit-1.34.17}/src/openlit/semcov/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: openlit
3
- Version: 1.34.16
3
+ Version: 1.34.17
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  License: Apache-2.0
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "openlit"
3
- version = "1.34.16"
3
+ version = "1.34.17"
4
4
  description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
5
5
  authors = ["OpenLIT"]
6
6
  license = "Apache-2.0"
@@ -0,0 +1,88 @@
1
+ """Initializer of Auto Instrumentation of Mistral Functions"""
2
+
3
+ from typing import Collection
4
+ import importlib.metadata
5
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
6
+ from wrapt import wrap_function_wrapper
7
+
8
+ from openlit.instrumentation.mistral.mistral import (
9
+ complete,
10
+ stream,
11
+ embed
12
+ )
13
+ from openlit.instrumentation.mistral.async_mistral import (
14
+ async_complete,
15
+ async_stream,
16
+ async_embed
17
+ )
18
+
19
+ _instruments = ("mistralai >= 1.0.0",)
20
+
21
+ class MistralInstrumentor(BaseInstrumentor):
22
+ """
23
+ An instrumentor for Mistral client library.
24
+ """
25
+
26
+ def instrumentation_dependencies(self) -> Collection[str]:
27
+ return _instruments
28
+
29
+ def _instrument(self, **kwargs):
30
+ application_name = kwargs.get("application_name", "default")
31
+ environment = kwargs.get("environment", "default")
32
+ tracer = kwargs.get("tracer")
33
+ metrics = kwargs.get("metrics_dict")
34
+ pricing_info = kwargs.get("pricing_info", {})
35
+ capture_message_content = kwargs.get("capture_message_content", False)
36
+ disable_metrics = kwargs.get("disable_metrics")
37
+ version = importlib.metadata.version("mistralai")
38
+
39
+ # sync chat completions
40
+ wrap_function_wrapper(
41
+ "mistralai.chat",
42
+ "Chat.complete",
43
+ complete(version, environment, application_name,
44
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
45
+ )
46
+
47
+ # sync chat streaming
48
+ wrap_function_wrapper(
49
+ "mistralai.chat",
50
+ "Chat.stream",
51
+ stream(version, environment, application_name,
52
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
53
+ )
54
+
55
+ # sync embeddings
56
+ wrap_function_wrapper(
57
+ "mistralai.embeddings",
58
+ "Embeddings.create",
59
+ embed(version, environment, application_name,
60
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
61
+ )
62
+
63
+ # async chat completions
64
+ wrap_function_wrapper(
65
+ "mistralai.chat",
66
+ "Chat.complete_async",
67
+ async_complete(version, environment, application_name,
68
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
69
+ )
70
+
71
+ # async chat streaming
72
+ wrap_function_wrapper(
73
+ "mistralai.chat",
74
+ "Chat.stream_async",
75
+ async_stream(version, environment, application_name,
76
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
77
+ )
78
+
79
+ # async embeddings
80
+ wrap_function_wrapper(
81
+ "mistralai.embeddings",
82
+ "Embeddings.create_async",
83
+ async_embed(version, environment, application_name,
84
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
85
+ )
86
+
87
+ def _uninstrument(self, **kwargs):
88
+ pass
@@ -0,0 +1,200 @@
1
+ """
2
+ Module for monitoring Mistral API calls (async version).
3
+ """
4
+
5
+ import time
6
+ from opentelemetry.trace import SpanKind
7
+ from openlit.__helpers import (
8
+ handle_exception,
9
+ set_server_address_and_port,
10
+ )
11
+ from openlit.instrumentation.mistral.utils import (
12
+ process_chunk,
13
+ process_chat_response,
14
+ process_streaming_chat_response,
15
+ process_embedding_response,
16
+ )
17
+ from openlit.semcov import SemanticConvention
18
+
19
+ def async_complete(version, environment, application_name,
20
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
21
+ """
22
+ Generates a telemetry wrapper for GenAI complete function call
23
+ """
24
+
25
+ async def wrapper(wrapped, instance, args, kwargs):
26
+ """
27
+ Wraps the GenAI complete function call.
28
+ """
29
+
30
+ server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
31
+ request_model = kwargs.get("model", "mistral-small-latest")
32
+
33
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
34
+
35
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
36
+ start_time = time.time()
37
+ response = await wrapped(*args, **kwargs)
38
+ response = process_chat_response(
39
+ response=response,
40
+ request_model=request_model,
41
+ pricing_info=pricing_info,
42
+ server_port=server_port,
43
+ server_address=server_address,
44
+ environment=environment,
45
+ application_name=application_name,
46
+ metrics=metrics,
47
+ start_time=start_time,
48
+ span=span,
49
+ capture_message_content=capture_message_content,
50
+ disable_metrics=disable_metrics,
51
+ version=version,
52
+ **kwargs
53
+ )
54
+
55
+ return response
56
+
57
+ return wrapper
58
+
59
+ def async_stream(version, environment, application_name,
60
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
61
+ """
62
+ Generates a telemetry wrapper for GenAI stream function call
63
+ """
64
+
65
+ class TracedAsyncStream:
66
+ """
67
+ Wrapper for async streaming responses to collect telemetry.
68
+ """
69
+
70
+ def __init__(
71
+ self,
72
+ wrapped,
73
+ span,
74
+ span_name,
75
+ kwargs,
76
+ server_address,
77
+ server_port,
78
+ **args,
79
+ ):
80
+ self.__wrapped__ = wrapped
81
+ self._span = span
82
+ self._span_name = span_name
83
+ self._llmresponse = ""
84
+ self._response_id = ""
85
+ self._response_model = ""
86
+ self._finish_reason = ""
87
+ self._tools = None
88
+ self._input_tokens = 0
89
+ self._output_tokens = 0
90
+
91
+ self._args = args
92
+ self._kwargs = kwargs
93
+ self._start_time = time.time()
94
+ self._end_time = None
95
+ self._timestamps = []
96
+ self._ttft = 0
97
+ self._tbt = 0
98
+ self._server_address = server_address
99
+ self._server_port = server_port
100
+
101
+ async def __aenter__(self):
102
+ await self.__wrapped__.__aenter__()
103
+ return self
104
+
105
+ async def __aexit__(self, exc_type, exc_value, traceback):
106
+ await self.__wrapped__.__aexit__(exc_type, exc_value, traceback)
107
+
108
+ def __aiter__(self):
109
+ return self
110
+
111
+ async def __getattr__(self, name):
112
+ """Delegate attribute access to the wrapped object."""
113
+ return getattr(await self.__wrapped__, name)
114
+
115
+ async def __anext__(self):
116
+ try:
117
+ chunk = await self.__wrapped__.__anext__()
118
+ process_chunk(self, chunk)
119
+ return chunk
120
+ except StopAsyncIteration:
121
+ try:
122
+ with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
123
+ process_streaming_chat_response(
124
+ self,
125
+ pricing_info=pricing_info,
126
+ environment=environment,
127
+ application_name=application_name,
128
+ metrics=metrics,
129
+ capture_message_content=capture_message_content,
130
+ disable_metrics=disable_metrics,
131
+ version=version
132
+ )
133
+
134
+ except Exception as e:
135
+ handle_exception(self._span, e)
136
+
137
+ raise
138
+
139
+ async def wrapper(wrapped, instance, args, kwargs):
140
+ """
141
+ Wraps the GenAI stream function call.
142
+ """
143
+
144
+ server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
145
+ request_model = kwargs.get("model", "mistral-small-latest")
146
+
147
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
148
+
149
+ # Stream endpoint is always streaming
150
+ awaited_wrapped = await wrapped(*args, **kwargs)
151
+ span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
152
+
153
+ return TracedAsyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
154
+
155
+ return wrapper
156
+
157
+ def async_embed(version, environment, application_name,
158
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
159
+ """
160
+ Generates a telemetry wrapper for GenAI embedding function call
161
+ """
162
+
163
+ async def wrapper(wrapped, instance, args, kwargs):
164
+ """
165
+ Wraps the GenAI embedding function call.
166
+ """
167
+
168
+ server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
169
+ request_model = kwargs.get("model", "mistral-embed")
170
+
171
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
172
+
173
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
174
+ start_time = time.time()
175
+ response = await wrapped(*args, **kwargs)
176
+
177
+ try:
178
+ response = process_embedding_response(
179
+ response=response,
180
+ request_model=request_model,
181
+ pricing_info=pricing_info,
182
+ server_port=server_port,
183
+ server_address=server_address,
184
+ environment=environment,
185
+ application_name=application_name,
186
+ metrics=metrics,
187
+ start_time=start_time,
188
+ span=span,
189
+ capture_message_content=capture_message_content,
190
+ disable_metrics=disable_metrics,
191
+ version=version,
192
+ **kwargs
193
+ )
194
+
195
+ except Exception as e:
196
+ handle_exception(span, e)
197
+
198
+ return response
199
+
200
+ return wrapper
@@ -0,0 +1,200 @@
1
+ """
2
+ Module for monitoring Mistral API calls.
3
+ """
4
+
5
+ import time
6
+ from opentelemetry.trace import SpanKind
7
+ from openlit.__helpers import (
8
+ handle_exception,
9
+ set_server_address_and_port,
10
+ )
11
+ from openlit.instrumentation.mistral.utils import (
12
+ process_chunk,
13
+ process_chat_response,
14
+ process_streaming_chat_response,
15
+ process_embedding_response,
16
+ )
17
+ from openlit.semcov import SemanticConvention
18
+
19
+ def complete(version, environment, application_name,
20
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
21
+ """
22
+ Generates a telemetry wrapper for GenAI complete function call
23
+ """
24
+
25
+ def wrapper(wrapped, instance, args, kwargs):
26
+ """
27
+ Wraps the GenAI complete function call.
28
+ """
29
+
30
+ server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
31
+ request_model = kwargs.get("model", "mistral-small-latest")
32
+
33
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
34
+
35
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
36
+ start_time = time.time()
37
+ response = wrapped(*args, **kwargs)
38
+ response = process_chat_response(
39
+ response=response,
40
+ request_model=request_model,
41
+ pricing_info=pricing_info,
42
+ server_port=server_port,
43
+ server_address=server_address,
44
+ environment=environment,
45
+ application_name=application_name,
46
+ metrics=metrics,
47
+ start_time=start_time,
48
+ span=span,
49
+ capture_message_content=capture_message_content,
50
+ disable_metrics=disable_metrics,
51
+ version=version,
52
+ **kwargs
53
+ )
54
+
55
+ return response
56
+
57
+ return wrapper
58
+
59
+ def stream(version, environment, application_name,
60
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
61
+ """
62
+ Generates a telemetry wrapper for GenAI stream function call
63
+ """
64
+
65
+ class TracedSyncStream:
66
+ """
67
+ Wrapper for streaming responses to collect telemetry.
68
+ """
69
+
70
+ def __init__(
71
+ self,
72
+ wrapped,
73
+ span,
74
+ span_name,
75
+ kwargs,
76
+ server_address,
77
+ server_port,
78
+ **args,
79
+ ):
80
+ self.__wrapped__ = wrapped
81
+ self._span = span
82
+ self._span_name = span_name
83
+ self._llmresponse = ""
84
+ self._response_id = ""
85
+ self._response_model = ""
86
+ self._finish_reason = ""
87
+ self._tools = None
88
+ self._input_tokens = 0
89
+ self._output_tokens = 0
90
+
91
+ self._args = args
92
+ self._kwargs = kwargs
93
+ self._start_time = time.time()
94
+ self._end_time = None
95
+ self._timestamps = []
96
+ self._ttft = 0
97
+ self._tbt = 0
98
+ self._server_address = server_address
99
+ self._server_port = server_port
100
+
101
+ def __enter__(self):
102
+ self.__wrapped__.__enter__()
103
+ return self
104
+
105
+ def __exit__(self, exc_type, exc_value, traceback):
106
+ self.__wrapped__.__exit__(exc_type, exc_value, traceback)
107
+
108
+ def __iter__(self):
109
+ return self
110
+
111
+ def __getattr__(self, name):
112
+ """Delegate attribute access to the wrapped object."""
113
+ return getattr(self.__wrapped__, name)
114
+
115
+ def __next__(self):
116
+ try:
117
+ chunk = self.__wrapped__.__next__()
118
+ process_chunk(self, chunk)
119
+ return chunk
120
+ except StopIteration:
121
+ try:
122
+ with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
123
+ process_streaming_chat_response(
124
+ self,
125
+ pricing_info=pricing_info,
126
+ environment=environment,
127
+ application_name=application_name,
128
+ metrics=metrics,
129
+ capture_message_content=capture_message_content,
130
+ disable_metrics=disable_metrics,
131
+ version=version
132
+ )
133
+
134
+ except Exception as e:
135
+ handle_exception(self._span, e)
136
+
137
+ raise
138
+
139
+ def wrapper(wrapped, instance, args, kwargs):
140
+ """
141
+ Wraps the GenAI stream function call.
142
+ """
143
+
144
+ server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
145
+ request_model = kwargs.get("model", "mistral-small-latest")
146
+
147
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
148
+
149
+ # Stream endpoint is always streaming
150
+ awaited_wrapped = wrapped(*args, **kwargs)
151
+ span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
152
+
153
+ return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
154
+
155
+ return wrapper
156
+
157
+ def embed(version, environment, application_name,
158
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
159
+ """
160
+ Generates a telemetry wrapper for GenAI embedding function call
161
+ """
162
+
163
+ def wrapper(wrapped, instance, args, kwargs):
164
+ """
165
+ Wraps the GenAI embedding function call.
166
+ """
167
+
168
+ server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
169
+ request_model = kwargs.get("model", "mistral-embed")
170
+
171
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
172
+
173
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
174
+ start_time = time.time()
175
+ response = wrapped(*args, **kwargs)
176
+
177
+ try:
178
+ response = process_embedding_response(
179
+ response=response,
180
+ request_model=request_model,
181
+ pricing_info=pricing_info,
182
+ server_port=server_port,
183
+ server_address=server_address,
184
+ environment=environment,
185
+ application_name=application_name,
186
+ metrics=metrics,
187
+ start_time=start_time,
188
+ span=span,
189
+ capture_message_content=capture_message_content,
190
+ disable_metrics=disable_metrics,
191
+ version=version,
192
+ **kwargs
193
+ )
194
+
195
+ except Exception as e:
196
+ handle_exception(span, e)
197
+
198
+ return response
199
+
200
+ return wrapper