openlit 1.33.9__py3-none-any.whl → 1.33.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. openlit/__helpers.py +78 -0
  2. openlit/__init__.py +41 -13
  3. openlit/instrumentation/ag2/__init__.py +9 -10
  4. openlit/instrumentation/ag2/ag2.py +134 -69
  5. openlit/instrumentation/ai21/__init__.py +6 -5
  6. openlit/instrumentation/ai21/ai21.py +71 -534
  7. openlit/instrumentation/ai21/async_ai21.py +71 -534
  8. openlit/instrumentation/ai21/utils.py +407 -0
  9. openlit/instrumentation/anthropic/__init__.py +3 -3
  10. openlit/instrumentation/anthropic/anthropic.py +5 -5
  11. openlit/instrumentation/anthropic/async_anthropic.py +5 -5
  12. openlit/instrumentation/assemblyai/__init__.py +2 -2
  13. openlit/instrumentation/assemblyai/assemblyai.py +3 -3
  14. openlit/instrumentation/astra/__init__.py +25 -25
  15. openlit/instrumentation/astra/astra.py +7 -7
  16. openlit/instrumentation/astra/async_astra.py +7 -7
  17. openlit/instrumentation/azure_ai_inference/__init__.py +5 -5
  18. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +11 -11
  19. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +11 -11
  20. openlit/instrumentation/bedrock/__init__.py +2 -2
  21. openlit/instrumentation/bedrock/bedrock.py +3 -3
  22. openlit/instrumentation/chroma/__init__.py +9 -9
  23. openlit/instrumentation/chroma/chroma.py +7 -7
  24. openlit/instrumentation/cohere/__init__.py +7 -7
  25. openlit/instrumentation/cohere/async_cohere.py +10 -10
  26. openlit/instrumentation/cohere/cohere.py +11 -11
  27. openlit/instrumentation/controlflow/__init__.py +4 -4
  28. openlit/instrumentation/controlflow/controlflow.py +5 -5
  29. openlit/instrumentation/crawl4ai/__init__.py +3 -3
  30. openlit/instrumentation/crawl4ai/async_crawl4ai.py +5 -5
  31. openlit/instrumentation/crawl4ai/crawl4ai.py +5 -5
  32. openlit/instrumentation/crewai/__init__.py +3 -3
  33. openlit/instrumentation/crewai/crewai.py +6 -4
  34. openlit/instrumentation/dynamiq/__init__.py +5 -5
  35. openlit/instrumentation/dynamiq/dynamiq.py +5 -5
  36. openlit/instrumentation/elevenlabs/__init__.py +5 -5
  37. openlit/instrumentation/elevenlabs/async_elevenlabs.py +4 -5
  38. openlit/instrumentation/elevenlabs/elevenlabs.py +4 -5
  39. openlit/instrumentation/embedchain/__init__.py +2 -2
  40. openlit/instrumentation/embedchain/embedchain.py +9 -9
  41. openlit/instrumentation/firecrawl/__init__.py +3 -3
  42. openlit/instrumentation/firecrawl/firecrawl.py +5 -5
  43. openlit/instrumentation/google_ai_studio/__init__.py +3 -3
  44. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +3 -3
  45. openlit/instrumentation/google_ai_studio/google_ai_studio.py +3 -3
  46. openlit/instrumentation/gpt4all/__init__.py +5 -5
  47. openlit/instrumentation/gpt4all/gpt4all.py +350 -225
  48. openlit/instrumentation/gpu/__init__.py +5 -5
  49. openlit/instrumentation/groq/__init__.py +5 -5
  50. openlit/instrumentation/groq/async_groq.py +359 -243
  51. openlit/instrumentation/groq/groq.py +359 -243
  52. openlit/instrumentation/haystack/__init__.py +2 -2
  53. openlit/instrumentation/haystack/haystack.py +5 -5
  54. openlit/instrumentation/julep/__init__.py +7 -7
  55. openlit/instrumentation/julep/async_julep.py +6 -6
  56. openlit/instrumentation/julep/julep.py +6 -6
  57. openlit/instrumentation/langchain/__init__.py +15 -9
  58. openlit/instrumentation/langchain/async_langchain.py +388 -0
  59. openlit/instrumentation/langchain/langchain.py +110 -497
  60. openlit/instrumentation/letta/__init__.py +7 -7
  61. openlit/instrumentation/letta/letta.py +10 -8
  62. openlit/instrumentation/litellm/__init__.py +9 -10
  63. openlit/instrumentation/litellm/async_litellm.py +321 -250
  64. openlit/instrumentation/litellm/litellm.py +319 -248
  65. openlit/instrumentation/llamaindex/__init__.py +2 -2
  66. openlit/instrumentation/llamaindex/llamaindex.py +5 -5
  67. openlit/instrumentation/mem0/__init__.py +2 -2
  68. openlit/instrumentation/mem0/mem0.py +5 -5
  69. openlit/instrumentation/milvus/__init__.py +2 -2
  70. openlit/instrumentation/milvus/milvus.py +7 -7
  71. openlit/instrumentation/mistral/__init__.py +13 -13
  72. openlit/instrumentation/mistral/async_mistral.py +426 -253
  73. openlit/instrumentation/mistral/mistral.py +424 -250
  74. openlit/instrumentation/multion/__init__.py +7 -7
  75. openlit/instrumentation/multion/async_multion.py +9 -7
  76. openlit/instrumentation/multion/multion.py +9 -7
  77. openlit/instrumentation/ollama/__init__.py +19 -39
  78. openlit/instrumentation/ollama/async_ollama.py +137 -563
  79. openlit/instrumentation/ollama/ollama.py +136 -563
  80. openlit/instrumentation/ollama/utils.py +333 -0
  81. openlit/instrumentation/openai/__init__.py +11 -11
  82. openlit/instrumentation/openai/async_openai.py +25 -27
  83. openlit/instrumentation/openai/openai.py +25 -27
  84. openlit/instrumentation/phidata/__init__.py +2 -2
  85. openlit/instrumentation/phidata/phidata.py +6 -4
  86. openlit/instrumentation/pinecone/__init__.py +6 -6
  87. openlit/instrumentation/pinecone/pinecone.py +7 -7
  88. openlit/instrumentation/premai/__init__.py +5 -5
  89. openlit/instrumentation/premai/premai.py +268 -219
  90. openlit/instrumentation/qdrant/__init__.py +2 -2
  91. openlit/instrumentation/qdrant/async_qdrant.py +7 -7
  92. openlit/instrumentation/qdrant/qdrant.py +7 -7
  93. openlit/instrumentation/reka/__init__.py +5 -5
  94. openlit/instrumentation/reka/async_reka.py +93 -55
  95. openlit/instrumentation/reka/reka.py +93 -55
  96. openlit/instrumentation/together/__init__.py +9 -9
  97. openlit/instrumentation/together/async_together.py +284 -242
  98. openlit/instrumentation/together/together.py +284 -242
  99. openlit/instrumentation/transformers/__init__.py +3 -3
  100. openlit/instrumentation/transformers/transformers.py +79 -48
  101. openlit/instrumentation/vertexai/__init__.py +19 -69
  102. openlit/instrumentation/vertexai/async_vertexai.py +333 -990
  103. openlit/instrumentation/vertexai/vertexai.py +333 -990
  104. openlit/instrumentation/vllm/__init__.py +3 -3
  105. openlit/instrumentation/vllm/vllm.py +65 -35
  106. openlit/otel/events.py +85 -0
  107. openlit/otel/tracing.py +3 -13
  108. openlit/semcov/__init__.py +16 -4
  109. {openlit-1.33.9.dist-info → openlit-1.33.11.dist-info}/METADATA +2 -2
  110. openlit-1.33.11.dist-info/RECORD +125 -0
  111. openlit-1.33.9.dist-info/RECORD +0 -121
  112. {openlit-1.33.9.dist-info → openlit-1.33.11.dist-info}/LICENSE +0 -0
  113. {openlit-1.33.9.dist-info → openlit-1.33.11.dist-info}/WHEEL +0 -0
@@ -21,15 +21,15 @@ class TransformersInstrumentor(BaseInstrumentor):
21
21
  tracer = kwargs.get("tracer")
22
22
  metrics = kwargs.get("metrics_dict")
23
23
  pricing_info = kwargs.get("pricing_info")
24
- trace_content = kwargs.get("trace_content")
24
+ capture_message_content = kwargs.get("capture_message_content")
25
25
  disable_metrics = kwargs.get("disable_metrics")
26
26
  version = importlib.metadata.version("transformers")
27
27
 
28
28
  wrap_function_wrapper(
29
29
  "transformers",
30
30
  "TextGenerationPipeline.__call__",
31
- text_wrap("huggingface.text_generation", version, environment, application_name,
32
- tracer, pricing_info, trace_content, metrics, disable_metrics),
31
+ text_wrap(version, environment, application_name,
32
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
33
33
  )
34
34
 
35
35
  @staticmethod
@@ -1,19 +1,25 @@
1
- # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument
2
1
  """
3
2
  Module for monitoring ChromaDB.
4
3
  """
5
4
 
6
5
  import logging
6
+ import time
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
- from openlit.__helpers import handle_exception, general_tokens
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
+ from openlit.__helpers import (
10
+ get_chat_model_cost,
11
+ handle_exception,
12
+ general_tokens,
13
+ create_metrics_attributes,
14
+ set_server_address_and_port
15
+ )
10
16
  from openlit.semcov import SemanticConvetion
11
17
 
12
18
  # Initialize logger for logging potential issues and operations
13
19
  logger = logging.getLogger(__name__)
14
20
 
15
- def text_wrap(gen_ai_endpoint, version, environment, application_name,
16
- tracer, pricing_info, trace_content, metrics, disable_metrics):
21
+ def text_wrap(version, environment, application_name,
22
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
17
23
  """
18
24
  Creates a wrapper around a function call to trace and log its execution metrics.
19
25
 
@@ -21,13 +27,12 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
21
27
  log its operation, and trace its execution using OpenTelemetry.
22
28
 
23
29
  Parameters:
24
- - gen_ai_endpoint (str): A descriptor or name for the endpoint being traced.
25
30
  - version (str): The version of the Langchain application.
26
31
  - environment (str): The deployment environment (e.g., 'production', 'development').
27
32
  - application_name (str): Name of the Langchain application.
28
33
  - tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
29
34
  - pricing_info (dict): Information about the pricing for internal metrics (currently not used).
30
- - trace_content (bool): Flag indicating whether to trace the content of the response.
35
+ - capture_message_content (bool): Flag indicating whether to trace the content of the response.
31
36
 
32
37
  Returns:
33
38
  - function: A higher-order function that takes a function 'wrapped' and returns
@@ -55,8 +60,15 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
55
60
  errors are handled and logged appropriately.
56
61
  """
57
62
 
58
- with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
63
+ server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 80)
64
+ request_model = instance.model.config.name_or_path
65
+
66
+ span_name = f"{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
67
+
68
+ with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
69
+ start_time = time.time()
59
70
  response = wrapped(*args, **kwargs)
71
+ end_time = time.time()
60
72
 
61
73
  # pylint: disable=protected-access
62
74
  forward_params = instance._forward_params
@@ -67,45 +79,56 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
67
79
  else:
68
80
  prompt = kwargs.get("args", "")
69
81
 
70
- prompt_tokens = general_tokens(prompt[0])
82
+ input_tokens = general_tokens(prompt[0])
71
83
 
72
84
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
73
- span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
74
- gen_ai_endpoint)
75
- span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
76
- SemanticConvetion.GEN_AI_SYSTEM_HUGGING_FACE)
77
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
78
- environment)
79
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
80
- application_name)
81
85
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
82
86
  SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
87
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
88
+ SemanticConvetion.GEN_AI_SYSTEM_HUGGING_FACE)
83
89
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
84
- instance.model.config.name_or_path)
90
+ request_model)
85
91
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
86
92
  forward_params.get("temperature", "null"))
87
93
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
88
94
  forward_params.get("top_p", "null"))
89
95
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
90
96
  forward_params.get("max_length", -1))
91
- if trace_content:
97
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
98
+ input_tokens)
99
+ span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
100
+ server_address)
101
+ span.set_attribute(SemanticConvetion.SERVER_PORT,
102
+ server_port)
103
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
104
+ request_model)
105
+
106
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
107
+ environment)
108
+ span.set_attribute(SERVICE_NAME,
109
+ application_name)
110
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
111
+ False)
112
+ span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT,
113
+ end_time - start_time)
114
+ span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
115
+ version)
116
+ if capture_message_content:
92
117
  span.add_event(
93
118
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
94
119
  attributes={
95
120
  SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
96
121
  },
97
122
  )
98
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
99
- prompt_tokens)
100
123
 
101
124
  i = 0
102
- completion_tokens = 0
125
+ output_tokens = 0
103
126
  for completion in response:
104
127
  if len(response) > 1:
105
128
  attribute_name = f"gen_ai.content.completion.{i}"
106
129
  else:
107
130
  attribute_name = SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT
108
- if trace_content:
131
+ if capture_message_content:
109
132
  # pylint: disable=bare-except
110
133
  try:
111
134
  llm_response = completion.get('generated_text', '')
@@ -115,43 +138,51 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
115
138
  span.add_event(
116
139
  name=attribute_name,
117
140
  attributes={
118
- # pylint: disable=line-too-long
119
141
  SemanticConvetion.GEN_AI_CONTENT_COMPLETION: llm_response,
120
142
  },
121
143
  )
122
- completion_tokens += general_tokens(llm_response)
144
+ output_tokens += general_tokens(llm_response)
123
145
 
124
146
  i=i+1
125
147
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
126
- completion_tokens)
148
+ output_tokens)
127
149
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
128
- prompt_tokens + completion_tokens)
150
+ input_tokens + output_tokens)
151
+
152
+ # Calculate cost of the operation
153
+ cost = get_chat_model_cost(request_model,
154
+ pricing_info, input_tokens,
155
+ output_tokens)
156
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
157
+ cost)
158
+
129
159
  span.set_status(Status(StatusCode.OK))
130
160
 
131
161
  if disable_metrics is False:
132
- attributes = {
133
- TELEMETRY_SDK_NAME:
134
- "openlit",
135
- SemanticConvetion.GEN_AI_APPLICATION_NAME:
136
- application_name,
137
- SemanticConvetion.GEN_AI_SYSTEM:
138
- SemanticConvetion.GEN_AI_SYSTEM_HUGGING_FACE,
139
- SemanticConvetion.GEN_AI_ENVIRONMENT:
140
- environment,
141
- SemanticConvetion.GEN_AI_OPERATION:
142
- SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
143
- SemanticConvetion.GEN_AI_REQUEST_MODEL:
144
- instance.model.config.name_or_path
145
- }
162
+ attributes = create_metrics_attributes(
163
+ service_name=application_name,
164
+ deployment_environment=environment,
165
+ operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
166
+ system=SemanticConvetion.GEN_AI_SYSTEM_HUGGING_FACE,
167
+ request_model=request_model,
168
+ server_address=server_address,
169
+ server_port=server_port,
170
+ response_model=request_model,
171
+ )
146
172
 
173
+ metrics["genai_client_usage_tokens"].record(
174
+ input_tokens + output_tokens, attributes
175
+ )
176
+ metrics["genai_client_operation_duration"].record(
177
+ end_time - start_time, attributes
178
+ )
179
+ metrics["genai_server_ttft"].record(
180
+ end_time - start_time, attributes
181
+ )
147
182
  metrics["genai_requests"].add(1, attributes)
148
- metrics["genai_total_tokens"].add(
149
- prompt_tokens +
150
- completion_tokens, attributes)
151
- metrics["genai_completion_tokens"].add(
152
- completion_tokens, attributes)
153
- metrics["genai_prompt_tokens"].add(
154
- prompt_tokens, attributes)
183
+ metrics["genai_completion_tokens"].add(output_tokens, attributes)
184
+ metrics["genai_prompt_tokens"].add(input_tokens, attributes)
185
+ metrics["genai_cost"].record(cost, attributes)
155
186
 
156
187
  # Return original response
157
188
  return response
@@ -7,16 +7,10 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
7
  from wrapt import wrap_function_wrapper
8
8
 
9
9
  from openlit.instrumentation.vertexai.vertexai import (
10
- generate_content, predict, predict_streaming,
11
- send_message, start_chat, start_chat_streaming,
12
- embeddings
10
+ send_message
13
11
  )
14
12
  from openlit.instrumentation.vertexai.async_vertexai import (
15
- generate_content_async, predict_async,
16
- predict_streaming_async,
17
- send_message_async,
18
- start_chat_async, start_chat_streaming_async,
19
- embeddings_async
13
+ async_send_message
20
14
  )
21
15
 
22
16
 
@@ -36,7 +30,7 @@ class VertexAIInstrumentor(BaseInstrumentor):
36
30
  tracer = kwargs.get("tracer")
37
31
  metrics = kwargs.get("metrics_dict")
38
32
  pricing_info = kwargs.get("pricing_info", {})
39
- trace_content = kwargs.get("trace_content", False)
33
+ capture_message_content = kwargs.get("capture_message_content", False)
40
34
  disable_metrics = kwargs.get("disable_metrics")
41
35
  version = importlib.metadata.version("google-cloud-aiplatform")
42
36
 
@@ -44,102 +38,58 @@ class VertexAIInstrumentor(BaseInstrumentor):
44
38
  wrap_function_wrapper(
45
39
  "vertexai.generative_models",
46
40
  "GenerativeModel.generate_content",
47
- generate_content("vertexai.generate_content", version, environment, application_name,
48
- tracer, pricing_info, trace_content, metrics, disable_metrics),
41
+ send_message(version, environment, application_name,
42
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
49
43
  )
50
44
 
51
45
  wrap_function_wrapper(
52
46
  "vertexai.generative_models",
53
47
  "ChatSession.send_message",
54
- send_message("vertexai.send_message", version, environment, application_name,
55
- tracer, pricing_info, trace_content, metrics, disable_metrics),
56
- )
57
-
58
- wrap_function_wrapper(
59
- "vertexai.language_models",
60
- "TextGenerationModel.predict",
61
- predict("vertexai.predict", version, environment, application_name,
62
- tracer, pricing_info, trace_content, metrics, disable_metrics),
63
- )
64
-
65
- wrap_function_wrapper(
66
- "vertexai.language_models",
67
- "TextGenerationModel.predict_streaming",
68
- predict_streaming("vertexai.predict", version, environment, application_name,
69
- tracer, pricing_info, trace_content, metrics, disable_metrics),
48
+ send_message(version, environment, application_name,
49
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
70
50
  )
71
51
 
72
52
  wrap_function_wrapper(
73
53
  "vertexai.language_models",
74
54
  "ChatSession.send_message",
75
- start_chat("vertexai.send_message", version, environment, application_name,
76
- tracer, pricing_info, trace_content, metrics, disable_metrics),
55
+ send_message(version, environment, application_name,
56
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
77
57
  )
78
58
 
79
59
  wrap_function_wrapper(
80
60
  "vertexai.language_models",
81
61
  "ChatSession.send_message_streaming",
82
- start_chat_streaming("vertexai.send_message", version, environment, application_name,
83
- tracer, pricing_info, trace_content, metrics, disable_metrics),
84
- )
85
-
86
- wrap_function_wrapper(
87
- "vertexai.language_models",
88
- "TextEmbeddingModel.get_embeddings",
89
- embeddings("vertexai.get_embeddings", version, environment, application_name,
90
- tracer, pricing_info, trace_content, metrics, disable_metrics),
62
+ send_message(version, environment, application_name,
63
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
91
64
  )
92
65
 
93
66
  #async
94
67
  wrap_function_wrapper(
95
68
  "vertexai.generative_models",
96
69
  "GenerativeModel.generate_content_async",
97
- generate_content_async("vertexai.generate_content", version, environment,
98
- application_name, tracer, pricing_info, trace_content,
99
- metrics, disable_metrics),
70
+ async_send_message(version, environment, application_name,
71
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
100
72
  )
101
73
 
102
74
  wrap_function_wrapper(
103
75
  "vertexai.generative_models",
104
76
  "ChatSession.send_message_async",
105
- send_message_async("vertexai.send_message", version, environment, application_name,
106
- tracer, pricing_info, trace_content, metrics, disable_metrics),
107
- )
108
-
109
- wrap_function_wrapper(
110
- "vertexai.language_models",
111
- "TextGenerationModel.predict_async",
112
- predict_async("vertexai.predict", version, environment, application_name,
113
- tracer, pricing_info, trace_content, metrics, disable_metrics),
114
- )
115
-
116
- wrap_function_wrapper(
117
- "vertexai.language_models",
118
- "TextGenerationModel.predict_streaming_async",
119
- predict_streaming_async("vertexai.predict", version, environment, application_name,
120
- tracer, pricing_info, trace_content, metrics, disable_metrics),
77
+ async_send_message(version, environment, application_name,
78
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
121
79
  )
122
80
 
123
81
  wrap_function_wrapper(
124
82
  "vertexai.language_models",
125
83
  "ChatSession.send_message_async",
126
- start_chat_async("vertexai.send_message", version, environment, application_name,
127
- tracer, pricing_info, trace_content, metrics, disable_metrics),
84
+ async_send_message(version, environment, application_name,
85
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
128
86
  )
129
87
 
130
88
  wrap_function_wrapper(
131
89
  "vertexai.language_models",
132
90
  "ChatSession.send_message_streaming_async",
133
- start_chat_streaming_async("vertexai.send_message", version, environment,
134
- application_name, tracer, pricing_info, trace_content,
135
- metrics, disable_metrics),
136
- )
137
-
138
- wrap_function_wrapper(
139
- "vertexai.language_models",
140
- "TextEmbeddingModel.get_embeddings_async",
141
- embeddings_async("vertexai.get_embeddings", version, environment, application_name,
142
- tracer, pricing_info, trace_content, metrics, disable_metrics),
91
+ async_send_message(version, environment, application_name,
92
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
143
93
  )
144
94
 
145
95
  def _uninstrument(self, **kwargs):