openlit 1.33.20__tar.gz → 1.33.22__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. {openlit-1.33.20 → openlit-1.33.22}/PKG-INFO +1 -1
  2. {openlit-1.33.20 → openlit-1.33.22}/pyproject.toml +1 -1
  3. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/__helpers.py +57 -0
  4. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/google_ai_studio/__init__.py +21 -6
  5. openlit-1.33.22/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +159 -0
  6. openlit-1.33.22/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +159 -0
  7. openlit-1.33.22/src/openlit/instrumentation/google_ai_studio/utils.py +245 -0
  8. openlit-1.33.22/src/openlit/instrumentation/ollama/__init__.py +97 -0
  9. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/ollama/async_ollama.py +4 -2
  10. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/ollama/ollama.py +4 -2
  11. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/ollama/utils.py +8 -4
  12. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/transformers/__init__.py +11 -7
  13. openlit-1.33.22/src/openlit/instrumentation/transformers/transformers.py +60 -0
  14. openlit-1.33.22/src/openlit/instrumentation/transformers/utils.py +183 -0
  15. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/otel/metrics.py +5 -0
  16. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/semcov/__init__.py +2 -0
  17. openlit-1.33.20/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +0 -227
  18. openlit-1.33.20/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +0 -227
  19. openlit-1.33.20/src/openlit/instrumentation/ollama/__init__.py +0 -84
  20. openlit-1.33.20/src/openlit/instrumentation/transformers/transformers.py +0 -197
  21. {openlit-1.33.20 → openlit-1.33.22}/LICENSE +0 -0
  22. {openlit-1.33.20 → openlit-1.33.22}/README.md +0 -0
  23. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/__init__.py +0 -0
  24. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/evals/__init__.py +0 -0
  25. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/evals/all.py +0 -0
  26. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/evals/bias_detection.py +0 -0
  27. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/evals/hallucination.py +0 -0
  28. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/evals/toxicity.py +0 -0
  29. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/evals/utils.py +0 -0
  30. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/guard/__init__.py +0 -0
  31. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/guard/all.py +0 -0
  32. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/guard/prompt_injection.py +0 -0
  33. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/guard/restrict_topic.py +0 -0
  34. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/guard/sensitive_topic.py +0 -0
  35. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/guard/utils.py +0 -0
  36. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/ag2/__init__.py +0 -0
  37. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/ag2/ag2.py +0 -0
  38. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/ai21/__init__.py +0 -0
  39. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/ai21/ai21.py +0 -0
  40. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/ai21/async_ai21.py +0 -0
  41. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/ai21/utils.py +0 -0
  42. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
  43. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
  44. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
  45. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/anthropic/utils.py +0 -0
  46. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/assemblyai/__init__.py +0 -0
  47. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/assemblyai/assemblyai.py +0 -0
  48. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/astra/__init__.py +0 -0
  49. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/astra/astra.py +0 -0
  50. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/astra/async_astra.py +0 -0
  51. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/astra/utils.py +0 -0
  52. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/azure_ai_inference/__init__.py +0 -0
  53. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +0 -0
  54. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +0 -0
  55. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/azure_ai_inference/utils.py +0 -0
  56. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/bedrock/__init__.py +0 -0
  57. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/bedrock/bedrock.py +0 -0
  58. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/bedrock/utils.py +0 -0
  59. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/chroma/__init__.py +0 -0
  60. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/chroma/chroma.py +0 -0
  61. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/cohere/__init__.py +0 -0
  62. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/cohere/async_cohere.py +0 -0
  63. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/cohere/cohere.py +0 -0
  64. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/controlflow/__init__.py +0 -0
  65. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/controlflow/controlflow.py +0 -0
  66. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/crawl4ai/__init__.py +0 -0
  67. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/crawl4ai/async_crawl4ai.py +0 -0
  68. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/crawl4ai/crawl4ai.py +0 -0
  69. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/crewai/__init__.py +0 -0
  70. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/crewai/crewai.py +0 -0
  71. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/dynamiq/__init__.py +0 -0
  72. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/dynamiq/dynamiq.py +0 -0
  73. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
  74. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
  75. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
  76. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
  77. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
  78. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/firecrawl/__init__.py +0 -0
  79. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/firecrawl/firecrawl.py +0 -0
  80. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
  81. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
  82. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/gpu/__init__.py +0 -0
  83. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/groq/__init__.py +0 -0
  84. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/groq/async_groq.py +0 -0
  85. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/groq/groq.py +0 -0
  86. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/haystack/__init__.py +0 -0
  87. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/haystack/haystack.py +0 -0
  88. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/julep/__init__.py +0 -0
  89. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/julep/async_julep.py +0 -0
  90. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/julep/julep.py +0 -0
  91. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/langchain/__init__.py +0 -0
  92. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/langchain/async_langchain.py +0 -0
  93. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/langchain/langchain.py +0 -0
  94. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/letta/__init__.py +0 -0
  95. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/letta/letta.py +0 -0
  96. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/litellm/__init__.py +0 -0
  97. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/litellm/async_litellm.py +0 -0
  98. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/litellm/litellm.py +0 -0
  99. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
  100. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
  101. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/mem0/__init__.py +0 -0
  102. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/mem0/mem0.py +0 -0
  103. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/milvus/__init__.py +0 -0
  104. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/milvus/milvus.py +0 -0
  105. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/mistral/__init__.py +0 -0
  106. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/mistral/async_mistral.py +0 -0
  107. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/mistral/mistral.py +0 -0
  108. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/multion/__init__.py +0 -0
  109. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/multion/async_multion.py +0 -0
  110. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/multion/multion.py +0 -0
  111. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/openai/__init__.py +0 -0
  112. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/openai/async_openai.py +0 -0
  113. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/openai/openai.py +0 -0
  114. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/openai_agents/__init__.py +0 -0
  115. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/openai_agents/openai_agents.py +0 -0
  116. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/phidata/__init__.py +0 -0
  117. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/phidata/phidata.py +0 -0
  118. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
  119. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
  120. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/premai/__init__.py +0 -0
  121. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/premai/premai.py +0 -0
  122. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
  123. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/qdrant/async_qdrant.py +0 -0
  124. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
  125. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/reka/__init__.py +0 -0
  126. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/reka/async_reka.py +0 -0
  127. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/reka/reka.py +0 -0
  128. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/together/__init__.py +0 -0
  129. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/together/async_together.py +0 -0
  130. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/together/together.py +0 -0
  131. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
  132. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
  133. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
  134. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/vllm/__init__.py +0 -0
  135. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/instrumentation/vllm/vllm.py +0 -0
  136. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/otel/events.py +0 -0
  137. {openlit-1.33.20 → openlit-1.33.22}/src/openlit/otel/tracing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: openlit
3
- Version: 1.33.20
3
+ Version: 1.33.22
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  License: Apache-2.0
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "openlit"
3
- version = "1.33.20"
3
+ version = "1.33.22"
4
4
  description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
5
5
  authors = ["OpenLIT"]
6
6
  license = "Apache-2.0"
@@ -240,6 +240,11 @@ def extract_and_format_input(messages):
240
240
  fixed_roles = ['user', 'assistant', 'system', 'tool', 'developer']
241
241
  formatted_messages = {role_key: {'role': '', 'content': ''} for role_key in fixed_roles}
242
242
 
243
+ # Check if input is a simple string
244
+ if isinstance(messages, str):
245
+ formatted_messages['user'] = {'role': 'user', 'content': messages}
246
+ return formatted_messages
247
+
243
248
  for message in messages:
244
249
  message = response_as_dict(message)
245
250
 
@@ -276,3 +281,55 @@ def concatenate_all_contents(formatted_messages):
276
281
  for message_data in formatted_messages.values()
277
282
  if message_data['content']
278
283
  )
284
+
285
+ def format_and_concatenate(messages):
286
+ """
287
+ Process a list of messages to extract content, categorize them by role,
288
+ and concatenate all 'content' fields into a single string with role: content format.
289
+ """
290
+
291
+ formatted_messages = {}
292
+
293
+ # Check if input is a simple string
294
+ if isinstance(messages, str):
295
+ formatted_messages['user'] = {'role': 'user', 'content': messages}
296
+ elif isinstance(messages, list) and all(isinstance(m, str) for m in messages):
297
+ # If it's a list of strings, each string is 'user' input
298
+ user_content = ' '.join(messages)
299
+ formatted_messages['user'] = {'role': 'user', 'content': user_content}
300
+ else:
301
+ for message in messages:
302
+ message = response_as_dict(message)
303
+ role = message.get('role', 'unknown') # Default to 'unknown' if no role is specified
304
+ content = message.get('content', '')
305
+
306
+ # Initialize role in formatted messages if not present
307
+ if role not in formatted_messages:
308
+ formatted_messages[role] = {'role': role, 'content': ''}
309
+
310
+ # Handle list of dictionaries in content
311
+ if isinstance(content, list):
312
+ content_str = []
313
+ for item in content:
314
+ if isinstance(item, dict):
315
+ # Collect text or other attributes as needed
316
+ text = item.get('text', '')
317
+ image_url = item.get('image_url', '')
318
+ content_str.append(text)
319
+ content_str.append(image_url)
320
+ content_str = ", ".join(filter(None, content_str))
321
+ else:
322
+ content_str = content
323
+
324
+ # Concatenate content
325
+ if formatted_messages[role]['content']:
326
+ formatted_messages[role]['content'] += ' ' + content_str
327
+ else:
328
+ formatted_messages[role]['content'] = content_str
329
+
330
+ # Concatenate role and content for all messages
331
+ return ' '.join(
332
+ f"{message_data['role']}: {message_data['content']}"
333
+ for message_data in formatted_messages.values()
334
+ if message_data['content']
335
+ )
@@ -7,11 +7,11 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
7
  from wrapt import wrap_function_wrapper
8
8
 
9
9
  from openlit.instrumentation.google_ai_studio.google_ai_studio import (
10
- generate
10
+ generate, generate_stream
11
11
  )
12
12
 
13
13
  from openlit.instrumentation.google_ai_studio.async_google_ai_studio import (
14
- async_generate
14
+ async_generate, async_generate_stream
15
15
  )
16
16
 
17
17
  _instruments = ("google-genai >= 1.3.0",)
@@ -39,16 +39,31 @@ class GoogleAIStudioInstrumentor(BaseInstrumentor):
39
39
  "google.genai.models",
40
40
  "Models.generate_content",
41
41
  generate(version, environment, application_name,
42
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
42
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
43
+ )
44
+
45
+ # sync stream generate
46
+ wrap_function_wrapper(
47
+ "google.genai.models",
48
+ "Models.generate_content_stream",
49
+ generate_stream(version, environment, application_name,
50
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
43
51
  )
44
52
 
45
53
  # async generate
46
54
  wrap_function_wrapper(
47
55
  "google.genai.models",
48
56
  "AsyncModels.generate_content",
49
- async_generate(version, environment,
50
- application_name, tracer, pricing_info, capture_message_content, metrics,
51
- disable_metrics),
57
+ async_generate(version, environment, application_name,
58
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
59
+ )
60
+
61
+ # async stream generate
62
+ wrap_function_wrapper(
63
+ "google.genai.models",
64
+ "AsyncModels.generate_content_stream",
65
+ async_generate_stream(version, environment, application_name,
66
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
52
67
  )
53
68
 
54
69
  def _uninstrument(self, **kwargs):
@@ -0,0 +1,159 @@
1
+ """
2
+ Module for monitoring Google AI Studio API calls.
3
+ """
4
+
5
+ import logging
6
+ import time
7
+ from opentelemetry.trace import SpanKind
8
+ from openlit.__helpers import (
9
+ handle_exception,
10
+ set_server_address_and_port
11
+ )
12
+ from openlit.instrumentation.google_ai_studio.utils import (
13
+ process_chat_response,
14
+ process_chunk,
15
+ process_streaming_chat_response
16
+ )
17
+ from openlit.semcov import SemanticConvention
18
+
19
+ # Initialize logger for logging potential issues and operations
20
+ logger = logging.getLogger(__name__)
21
+
22
+ def async_generate(version, environment, application_name,
23
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
24
+ """
25
+ Generates a telemetry wrapper for GenAI function call
26
+ """
27
+
28
+ async def wrapper(wrapped, instance, args, kwargs):
29
+ """
30
+ Wraps the GenAI function call.
31
+ """
32
+
33
+ server_address, server_port = set_server_address_and_port(instance, "generativelanguage.googleapis.com", 443)
34
+ request_model = kwargs.get("model", "gemini-2.0-flash")
35
+
36
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
37
+
38
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
39
+ start_time = time.time()
40
+ response = await wrapped(*args, **kwargs)
41
+
42
+ response = process_chat_response(
43
+ instance = instance,
44
+ response=response,
45
+ request_model=request_model,
46
+ pricing_info=pricing_info,
47
+ server_port=server_port,
48
+ server_address=server_address,
49
+ environment=environment,
50
+ application_name=application_name,
51
+ metrics=metrics,
52
+ start_time=start_time,
53
+ span=span,
54
+ args=args,
55
+ kwargs=kwargs,
56
+ capture_message_content=capture_message_content,
57
+ disable_metrics=disable_metrics,
58
+ version=version,
59
+ )
60
+
61
+ return response
62
+
63
+ return wrapper
64
+
65
+ def async_generate_stream(version, environment, application_name,
66
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
67
+ """
68
+ Generates a telemetry wrapper for GenAI function call
69
+ """
70
+
71
+ class TracedAsyncStream:
72
+ """
73
+ Wrapper for streaming responses to collect telemetry.
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ wrapped,
79
+ span,
80
+ span_name,
81
+ kwargs,
82
+ server_address,
83
+ server_port,
84
+ **args,
85
+ ):
86
+ self.__wrapped__ = wrapped
87
+ self._span = span
88
+ self._span_name = span_name
89
+ self._llmresponse = ''
90
+ self._finish_reason = ''
91
+ self._output_tokens = ''
92
+ self._input_tokens = ''
93
+ self._response_model = ''
94
+ self._tools = None
95
+
96
+ self._args = args
97
+ self._kwargs = kwargs
98
+ self._start_time = time.time()
99
+ self._end_time = None
100
+ self._timestamps = []
101
+ self._ttft = 0
102
+ self._tbt = 0
103
+ self._server_address = server_address
104
+ self._server_port = server_port
105
+
106
+ async def __aenter__(self):
107
+ await self.__wrapped__.__aenter__()
108
+ return self
109
+
110
+ async def __aexit__(self, exc_type, exc_value, traceback):
111
+ await self.__wrapped__.__aexit__(exc_type, exc_value, traceback)
112
+
113
+ def __aiter__(self):
114
+ return self
115
+
116
+ async def __getattr__(self, name):
117
+ """Delegate attribute access to the wrapped object."""
118
+ return getattr(await self.__wrapped__, name)
119
+
120
+ async def __anext__(self):
121
+ try:
122
+ chunk = await self.__wrapped__.__anext__()
123
+ process_chunk(self, chunk)
124
+ return chunk
125
+ except StopAsyncIteration:
126
+ try:
127
+ with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
128
+ process_streaming_chat_response(
129
+ self,
130
+ pricing_info=pricing_info,
131
+ environment=environment,
132
+ application_name=application_name,
133
+ metrics=metrics,
134
+ capture_message_content=capture_message_content,
135
+ disable_metrics=disable_metrics,
136
+ version=version
137
+ )
138
+
139
+ except Exception as e:
140
+ handle_exception(self._span, e)
141
+ logger.error("Error in trace creation: %s", e)
142
+ raise
143
+
144
+ async def wrapper(wrapped, instance, args, kwargs):
145
+ """
146
+ Wraps the GenAI function call.
147
+ """
148
+
149
+ server_address, server_port = set_server_address_and_port(instance, "generativelanguage.googleapis.com", 443)
150
+ request_model = kwargs.get("model", "gemini-2.0-flash")
151
+
152
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
153
+
154
+ awaited_wrapped = await wrapped(*args, **kwargs)
155
+ span = tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT)
156
+
157
+ return TracedAsyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
158
+
159
+ return wrapper
@@ -0,0 +1,159 @@
1
+ """
2
+ Module for monitoring Google AI Studio API calls.
3
+ """
4
+
5
+ import logging
6
+ import time
7
+ from opentelemetry.trace import SpanKind
8
+ from openlit.__helpers import (
9
+ handle_exception,
10
+ set_server_address_and_port
11
+ )
12
+ from openlit.instrumentation.google_ai_studio.utils import (
13
+ process_chat_response,
14
+ process_chunk,
15
+ process_streaming_chat_response
16
+ )
17
+ from openlit.semcov import SemanticConvention
18
+
19
+ # Initialize logger for logging potential issues and operations
20
+ logger = logging.getLogger(__name__)
21
+
22
+ def generate(version, environment, application_name,
23
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
24
+ """
25
+ Generates a telemetry wrapper for GenAI function call
26
+ """
27
+
28
+ def wrapper(wrapped, instance, args, kwargs):
29
+ """
30
+ Wraps the GenAI function call.
31
+ """
32
+
33
+ server_address, server_port = set_server_address_and_port(instance, "generativelanguage.googleapis.com", 443)
34
+ request_model = kwargs.get("model", "gemini-2.0-flash")
35
+
36
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
37
+
38
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
39
+ start_time = time.time()
40
+ response = wrapped(*args, **kwargs)
41
+
42
+ response = process_chat_response(
43
+ instance = instance,
44
+ response=response,
45
+ request_model=request_model,
46
+ pricing_info=pricing_info,
47
+ server_port=server_port,
48
+ server_address=server_address,
49
+ environment=environment,
50
+ application_name=application_name,
51
+ metrics=metrics,
52
+ start_time=start_time,
53
+ span=span,
54
+ args=args,
55
+ kwargs=kwargs,
56
+ capture_message_content=capture_message_content,
57
+ disable_metrics=disable_metrics,
58
+ version=version,
59
+ )
60
+
61
+ return response
62
+
63
+ return wrapper
64
+
65
+ def generate_stream(version, environment, application_name,
66
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
67
+ """
68
+ Generates a telemetry wrapper for GenAI function call
69
+ """
70
+
71
+ class TracedSyncStream:
72
+ """
73
+ Wrapper for streaming responses to collect telemetry.
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ wrapped,
79
+ span,
80
+ span_name,
81
+ kwargs,
82
+ server_address,
83
+ server_port,
84
+ **args,
85
+ ):
86
+ self.__wrapped__ = wrapped
87
+ self._span = span
88
+ self._span_name = span_name
89
+ self._llmresponse = ''
90
+ self._finish_reason = ''
91
+ self._output_tokens = ''
92
+ self._input_tokens = ''
93
+ self._response_model = ''
94
+ self._tools = None
95
+
96
+ self._args = args
97
+ self._kwargs = kwargs
98
+ self._start_time = time.time()
99
+ self._end_time = None
100
+ self._timestamps = []
101
+ self._ttft = 0
102
+ self._tbt = 0
103
+ self._server_address = server_address
104
+ self._server_port = server_port
105
+
106
+ def __enter__(self):
107
+ self.__wrapped__.__enter__()
108
+ return self
109
+
110
+ def __exit__(self, exc_type, exc_value, traceback):
111
+ self.__wrapped__.__exit__(exc_type, exc_value, traceback)
112
+
113
+ def __iter__(self):
114
+ return self
115
+
116
+ def __getattr__(self, name):
117
+ """Delegate attribute access to the wrapped object."""
118
+ return getattr(self.__wrapped__, name)
119
+
120
+ def __next__(self):
121
+ try:
122
+ chunk = self.__wrapped__.__next__()
123
+ process_chunk(self, chunk)
124
+ return chunk
125
+ except StopIteration:
126
+ try:
127
+ with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
128
+ process_streaming_chat_response(
129
+ self,
130
+ pricing_info=pricing_info,
131
+ environment=environment,
132
+ application_name=application_name,
133
+ metrics=metrics,
134
+ capture_message_content=capture_message_content,
135
+ disable_metrics=disable_metrics,
136
+ version=version
137
+ )
138
+
139
+ except Exception as e:
140
+ handle_exception(self._span, e)
141
+ logger.error("Error in trace creation: %s", e)
142
+ raise
143
+
144
+ def wrapper(wrapped, instance, args, kwargs):
145
+ """
146
+ Wraps the GenAI function call.
147
+ """
148
+
149
+ server_address, server_port = set_server_address_and_port(instance, "generativelanguage.googleapis.com", 443)
150
+ request_model = kwargs.get("model", "gemini-2.0-flash")
151
+
152
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
153
+
154
+ awaited_wrapped = wrapped(*args, **kwargs)
155
+ span = tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT)
156
+
157
+ return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
158
+
159
+ return wrapper