openlit 1.20.0__tar.gz → 1.22.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. {openlit-1.20.0 → openlit-1.22.0}/PKG-INFO +20 -18
  2. {openlit-1.20.0 → openlit-1.22.0}/README.md +19 -17
  3. {openlit-1.20.0 → openlit-1.22.0}/pyproject.toml +1 -1
  4. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/__init__.py +3 -0
  5. openlit-1.22.0/src/openlit/instrumentation/azure_ai_inference/__init__.py +71 -0
  6. openlit-1.22.0/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +432 -0
  7. openlit-1.22.0/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +432 -0
  8. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/google_ai_studio/__init__.py +1 -1
  9. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +11 -7
  10. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +11 -7
  11. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/semcov/__init__.py +1 -0
  12. {openlit-1.20.0 → openlit-1.22.0}/LICENSE +0 -0
  13. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/__helpers.py +0 -0
  14. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
  15. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
  16. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
  17. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/bedrock/__init__.py +0 -0
  18. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/bedrock/bedrock.py +0 -0
  19. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/chroma/__init__.py +0 -0
  20. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/chroma/chroma.py +0 -0
  21. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/cohere/__init__.py +0 -0
  22. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/cohere/cohere.py +0 -0
  23. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
  24. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
  25. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
  26. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
  27. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
  28. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
  29. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
  30. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/gpu/__init__.py +0 -0
  31. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/groq/__init__.py +0 -0
  32. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/groq/async_groq.py +0 -0
  33. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/groq/groq.py +0 -0
  34. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/haystack/__init__.py +0 -0
  35. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/haystack/haystack.py +0 -0
  36. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/langchain/__init__.py +0 -0
  37. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/langchain/langchain.py +0 -0
  38. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
  39. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
  40. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/milvus/__init__.py +0 -0
  41. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/milvus/milvus.py +0 -0
  42. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/mistral/__init__.py +0 -0
  43. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/mistral/async_mistral.py +0 -0
  44. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/mistral/mistral.py +0 -0
  45. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/ollama/__init__.py +0 -0
  46. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/ollama/async_ollama.py +0 -0
  47. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/ollama/ollama.py +0 -0
  48. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/openai/__init__.py +0 -0
  49. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/openai/async_azure_openai.py +0 -0
  50. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/openai/async_openai.py +0 -0
  51. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/openai/azure_openai.py +0 -0
  52. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/openai/openai.py +0 -0
  53. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
  54. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
  55. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
  56. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
  57. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/transformers/__init__.py +0 -0
  58. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/transformers/transformers.py +0 -0
  59. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
  60. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
  61. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
  62. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/vllm/__init__.py +0 -0
  63. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/instrumentation/vllm/vllm.py +0 -0
  64. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/otel/metrics.py +0 -0
  65. {openlit-1.20.0 → openlit-1.22.0}/src/openlit/otel/tracing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: openlit
3
- Version: 1.20.0
3
+ Version: 1.22.0
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -54,23 +54,25 @@ This project adheres to the [Semantic Conventions](https://github.com/open-telem
54
54
 
55
55
  ## Auto Instrumentation Capabilities
56
56
 
57
- | LLMs | Vector DBs | Frameworks | GPUs |
58
- |-----------------------------------------------------------------|----------------------------------------------|----------------------------------------------|---------------|
59
- | [✅ OpenAI](https://docs.openlit.io/latest/integrations/openai) | [✅ ChromaDB](https://docs.openlit.io/latest/integrations/chromadb) | [✅ Langchain](https://docs.openlit.io/latest/integrations/langchain) | [✅ NVIDIA GPUs](https://docs.openlit.io/latest/integrations/nvidia-gpu) |
60
- | [✅ Ollama](https://docs.openlit.io/latest/integrations/ollama) | [✅ Pinecone](https://docs.openlit.io/latest/integrations/pinecone) | [✅ LiteLLM](https://docs.openlit.io/latest/integrations/litellm) | |
61
- | [✅ Anthropic](https://docs.openlit.io/latest/integrations/anthropic) | [✅ Qdrant](https://docs.openlit.io/latest/integrations/qdrant) | [✅ LlamaIndex](https://docs.openlit.io/latest/integrations/llama-index) | |
62
- | [✅ GPT4All](https://docs.openlit.io/latest/integrations/gpt4all) | [✅ Milvus](https://docs.openlit.io/latest/integrations/milvus) | [✅ Haystack](https://docs.openlit.io/latest/integrations/haystack) | |
63
- | [✅ Cohere](https://docs.openlit.io/latest/integrations/cohere) | | [✅ EmbedChain](https://docs.openlit.io/latest/integrations/embedchain) | |
64
- | [✅ Mistral](https://docs.openlit.io/latest/integrations/mistral) | | [✅ Guardrails](https://docs.openlit.io/latest/integrations/guardrails) | |
65
- | [✅ Azure OpenAI](https://docs.openlit.io/latest/integrations/azure-openai) | | | |
66
- | [✅ HuggingFace Transformers](https://docs.openlit.io/latest/integrations/huggingface) | | | |
67
- | [✅ Amazon Bedrock](https://docs.openlit.io/latest/integrations/bedrock) | | | |
68
- | [✅ Vertex AI](https://docs.openlit.io/latest/integrations/vertexai) | | | |
69
- | [✅ Groq](https://docs.openlit.io/latest/integrations/groq) | | | |
70
- | [✅ ElevenLabs](https://docs.openlit.io/latest/integrations/elevenlabs) | | | |
71
- | [✅ vLLM](https://docs.openlit.io/latest/integrations/vllm) | | | |
72
- | [✅ OLA Krutrim](https://docs.openlit.io/latest/integrations/krutrim) | | | |
73
- | [✅ Google AI Studio](https://docs.openlit.io/latest/integrations/google-ai-studio) | | | |
57
+ | LLMs | Vector DBs | Frameworks | GPUs |
58
+ |--------------------------------------------------------------------------|----------------------------------------------|----------------------------------------------|---------------|
59
+ | [✅ OpenAI](https://docs.openlit.io/latest/integrations/openai) | [✅ ChromaDB](https://docs.openlit.io/latest/integrations/chromadb) | [✅ Langchain](https://docs.openlit.io/latest/integrations/langchain) | [✅ NVIDIA GPUs](https://docs.openlit.io/latest/integrations/nvidia-gpu) |
60
+ | [✅ Ollama](https://docs.openlit.io/latest/integrations/ollama) | [✅ Pinecone](https://docs.openlit.io/latest/integrations/pinecone) | [✅ LiteLLM](https://docs.openlit.io/latest/integrations/litellm) | |
61
+ | [✅ Anthropic](https://docs.openlit.io/latest/integrations/anthropic) | [✅ Qdrant](https://docs.openlit.io/latest/integrations/qdrant) | [✅ LlamaIndex](https://docs.openlit.io/latest/integrations/llama-index) | |
62
+ | [✅ GPT4All](https://docs.openlit.io/latest/integrations/gpt4all) | [✅ Milvus](https://docs.openlit.io/latest/integrations/milvus) | [✅ Haystack](https://docs.openlit.io/latest/integrations/haystack) | |
63
+ | [✅ Cohere](https://docs.openlit.io/latest/integrations/cohere) | | [✅ EmbedChain](https://docs.openlit.io/latest/integrations/embedchain) | |
64
+ | [✅ Mistral](https://docs.openlit.io/latest/integrations/mistral) | | [✅ Guardrails](https://docs.openlit.io/latest/integrations/guardrails) | |
65
+ | [✅ Azure OpenAI](https://docs.openlit.io/latest/integrations/azure-openai) | | | |
66
+ | [✅ Azure AI Inference](https://docs.openlit.io/latest/integrations/azure-ai-inference) | | | |
67
+ | [✅ GitHub AI Models](https://docs.openlit.io/latest/integrations/github-models) | | | |
68
+ | [✅ HuggingFace Transformers](https://docs.openlit.io/latest/integrations/huggingface) | | | |
69
+ | [✅ Amazon Bedrock](https://docs.openlit.io/latest/integrations/bedrock) | | | |
70
+ | [✅ Vertex AI](https://docs.openlit.io/latest/integrations/vertexai) | | | |
71
+ | [✅ Groq](https://docs.openlit.io/latest/integrations/groq) | | | |
72
+ | [✅ ElevenLabs](https://docs.openlit.io/latest/integrations/elevenlabs) | | | |
73
+ | [✅ vLLM](https://docs.openlit.io/latest/integrations/vllm) | | | |
74
+ | [✅ OLA Krutrim](https://docs.openlit.io/latest/integrations/krutrim) | | | |
75
+ | [✅ Google AI Studio](https://docs.openlit.io/latest/integrations/google-ai-studio) | | | |
74
76
 
75
77
  ## Supported Destinations
76
78
  - [✅ OpenTelemetry Collector](https://docs.openlit.io/latest/connections/otelcol)
@@ -27,23 +27,25 @@ This project adheres to the [Semantic Conventions](https://github.com/open-telem
27
27
 
28
28
  ## Auto Instrumentation Capabilities
29
29
 
30
- | LLMs | Vector DBs | Frameworks | GPUs |
31
- |-----------------------------------------------------------------|----------------------------------------------|----------------------------------------------|---------------|
32
- | [✅ OpenAI](https://docs.openlit.io/latest/integrations/openai) | [✅ ChromaDB](https://docs.openlit.io/latest/integrations/chromadb) | [✅ Langchain](https://docs.openlit.io/latest/integrations/langchain) | [✅ NVIDIA GPUs](https://docs.openlit.io/latest/integrations/nvidia-gpu) |
33
- | [✅ Ollama](https://docs.openlit.io/latest/integrations/ollama) | [✅ Pinecone](https://docs.openlit.io/latest/integrations/pinecone) | [✅ LiteLLM](https://docs.openlit.io/latest/integrations/litellm) | |
34
- | [✅ Anthropic](https://docs.openlit.io/latest/integrations/anthropic) | [✅ Qdrant](https://docs.openlit.io/latest/integrations/qdrant) | [✅ LlamaIndex](https://docs.openlit.io/latest/integrations/llama-index) | |
35
- | [✅ GPT4All](https://docs.openlit.io/latest/integrations/gpt4all) | [✅ Milvus](https://docs.openlit.io/latest/integrations/milvus) | [✅ Haystack](https://docs.openlit.io/latest/integrations/haystack) | |
36
- | [✅ Cohere](https://docs.openlit.io/latest/integrations/cohere) | | [✅ EmbedChain](https://docs.openlit.io/latest/integrations/embedchain) | |
37
- | [✅ Mistral](https://docs.openlit.io/latest/integrations/mistral) | | [✅ Guardrails](https://docs.openlit.io/latest/integrations/guardrails) | |
38
- | [✅ Azure OpenAI](https://docs.openlit.io/latest/integrations/azure-openai) | | | |
39
- | [✅ HuggingFace Transformers](https://docs.openlit.io/latest/integrations/huggingface) | | | |
40
- | [✅ Amazon Bedrock](https://docs.openlit.io/latest/integrations/bedrock) | | | |
41
- | [✅ Vertex AI](https://docs.openlit.io/latest/integrations/vertexai) | | | |
42
- | [✅ Groq](https://docs.openlit.io/latest/integrations/groq) | | | |
43
- | [✅ ElevenLabs](https://docs.openlit.io/latest/integrations/elevenlabs) | | | |
44
- | [✅ vLLM](https://docs.openlit.io/latest/integrations/vllm) | | | |
45
- | [✅ OLA Krutrim](https://docs.openlit.io/latest/integrations/krutrim) | | | |
46
- | [✅ Google AI Studio](https://docs.openlit.io/latest/integrations/google-ai-studio) | | | |
30
+ | LLMs | Vector DBs | Frameworks | GPUs |
31
+ |--------------------------------------------------------------------------|----------------------------------------------|----------------------------------------------|---------------|
32
+ | [✅ OpenAI](https://docs.openlit.io/latest/integrations/openai) | [✅ ChromaDB](https://docs.openlit.io/latest/integrations/chromadb) | [✅ Langchain](https://docs.openlit.io/latest/integrations/langchain) | [✅ NVIDIA GPUs](https://docs.openlit.io/latest/integrations/nvidia-gpu) |
33
+ | [✅ Ollama](https://docs.openlit.io/latest/integrations/ollama) | [✅ Pinecone](https://docs.openlit.io/latest/integrations/pinecone) | [✅ LiteLLM](https://docs.openlit.io/latest/integrations/litellm) | |
34
+ | [✅ Anthropic](https://docs.openlit.io/latest/integrations/anthropic) | [✅ Qdrant](https://docs.openlit.io/latest/integrations/qdrant) | [✅ LlamaIndex](https://docs.openlit.io/latest/integrations/llama-index) | |
35
+ | [✅ GPT4All](https://docs.openlit.io/latest/integrations/gpt4all) | [✅ Milvus](https://docs.openlit.io/latest/integrations/milvus) | [✅ Haystack](https://docs.openlit.io/latest/integrations/haystack) | |
36
+ | [✅ Cohere](https://docs.openlit.io/latest/integrations/cohere) | | [✅ EmbedChain](https://docs.openlit.io/latest/integrations/embedchain) | |
37
+ | [✅ Mistral](https://docs.openlit.io/latest/integrations/mistral) | | [✅ Guardrails](https://docs.openlit.io/latest/integrations/guardrails) | |
38
+ | [✅ Azure OpenAI](https://docs.openlit.io/latest/integrations/azure-openai) | | | |
39
+ | [✅ Azure AI Inference](https://docs.openlit.io/latest/integrations/azure-ai-inference) | | | |
40
+ | [✅ GitHub AI Models](https://docs.openlit.io/latest/integrations/github-models) | | | |
41
+ | [✅ HuggingFace Transformers](https://docs.openlit.io/latest/integrations/huggingface) | | | |
42
+ | [✅ Amazon Bedrock](https://docs.openlit.io/latest/integrations/bedrock) | | | |
43
+ | [✅ Vertex AI](https://docs.openlit.io/latest/integrations/vertexai) | | | |
44
+ | [✅ Groq](https://docs.openlit.io/latest/integrations/groq) | | | |
45
+ | [✅ ElevenLabs](https://docs.openlit.io/latest/integrations/elevenlabs) | | | |
46
+ | [✅ vLLM](https://docs.openlit.io/latest/integrations/vllm) | | | |
47
+ | [✅ OLA Krutrim](https://docs.openlit.io/latest/integrations/krutrim) | | | |
48
+ | [✅ Google AI Studio](https://docs.openlit.io/latest/integrations/google-ai-studio) | | | |
47
49
 
48
50
  ## Supported Destinations
49
51
  - [✅ OpenTelemetry Collector](https://docs.openlit.io/latest/connections/otelcol)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "openlit"
3
- version = "1.20.0"
3
+ version = "1.22.0"
4
4
  description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
5
5
  authors = ["OpenLIT"]
6
6
  repository = "https://github.com/openlit/openlit/tree/main/openlit/python"
@@ -34,6 +34,7 @@ from openlit.instrumentation.gpt4all import GPT4AllInstrumentor
34
34
  from openlit.instrumentation.elevenlabs import ElevenLabsInstrumentor
35
35
  from openlit.instrumentation.vllm import VLLMInstrumentor
36
36
  from openlit.instrumentation.google_ai_studio import GoogleAIStudioInstrumentor
37
+ from openlit.instrumentation.azure_ai_inference import AzureAIInferenceInstrumentor
37
38
  from openlit.instrumentation.langchain import LangChainInstrumentor
38
39
  from openlit.instrumentation.llamaindex import LlamaIndexInstrumentor
39
40
  from openlit.instrumentation.haystack import HaystackInstrumentor
@@ -198,6 +199,7 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
198
199
  "elevenlabs": "elevenlabs",
199
200
  "vllm": "vllm",
200
201
  "google-ai-studio": "google.generativeai",
202
+ "azure-ai-inference": "azure.ai.inference",
201
203
  "langchain": "langchain",
202
204
  "llama_index": "llama_index",
203
205
  "haystack": "haystack",
@@ -276,6 +278,7 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
276
278
  "elevenlabs": ElevenLabsInstrumentor(),
277
279
  "vllm": VLLMInstrumentor(),
278
280
  "google-ai-studio": GoogleAIStudioInstrumentor(),
281
+ "azure-ai-inference": AzureAIInferenceInstrumentor(),
279
282
  "langchain": LangChainInstrumentor(),
280
283
  "llama_index": LlamaIndexInstrumentor(),
281
284
  "haystack": HaystackInstrumentor(),
@@ -0,0 +1,71 @@
1
+ # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
+ """Initializer of Auto Instrumentation of Azure AI Inference Functions"""
3
+
4
+ from typing import Collection
5
+ import importlib.metadata
6
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
+ from wrapt import wrap_function_wrapper
8
+
9
+ from openlit.instrumentation.azure_ai_inference.azure_ai_inference import (
10
+ complete, embedding
11
+ )
12
+
13
+ from openlit.instrumentation.azure_ai_inference.async_azure_ai_inference import (
14
+ async_complete, async_embedding
15
+ )
16
+
17
+ _instruments = ("azure-ai-inference >= 1.0.0b4",)
18
+
19
+ class AzureAIInferenceInstrumentor(BaseInstrumentor):
20
+ """
21
+ An instrumentor for azure-ai-inference's client library.
22
+ """
23
+
24
+ def instrumentation_dependencies(self) -> Collection[str]:
25
+ return _instruments
26
+
27
+ def _instrument(self, **kwargs):
28
+ application_name = kwargs.get("application_name", "default_application")
29
+ environment = kwargs.get("environment", "default_environment")
30
+ tracer = kwargs.get("tracer")
31
+ metrics = kwargs.get("metrics_dict")
32
+ pricing_info = kwargs.get("pricing_info", {})
33
+ trace_content = kwargs.get("trace_content", False)
34
+ disable_metrics = kwargs.get("disable_metrics")
35
+ version = importlib.metadata.version("azure-ai-inference")
36
+
37
+ # sync generate
38
+ wrap_function_wrapper(
39
+ "azure.ai.inference",
40
+ "ChatCompletionsClient.complete",
41
+ complete("azure_ai.complete", version, environment, application_name,
42
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
43
+ )
44
+
45
+ # sync embedding
46
+ wrap_function_wrapper(
47
+ "azure.ai.inference",
48
+ "EmbeddingsClient.embed",
49
+ embedding("azure_ai.embed", version, environment, application_name,
50
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
51
+ )
52
+
53
+ # async generate
54
+ wrap_function_wrapper(
55
+ "azure.ai.inference.aio",
56
+ "ChatCompletionsClient.complete",
57
+ async_complete("azure_ai.complete", version, environment, application_name,
58
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
59
+ )
60
+
61
+ # async embedding
62
+ wrap_function_wrapper(
63
+ "azure.ai.inference.aio",
64
+ "EmbeddingsClient.embed",
65
+ async_embedding("azure_ai.embed", version, environment, application_name,
66
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
67
+ )
68
+
69
+ def _uninstrument(self, **kwargs):
70
+ # Proper uninstrumentation logic to revert patched methods
71
+ pass