openlit 1.20.0__tar.gz → 1.21.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. {openlit-1.20.0 → openlit-1.21.0}/PKG-INFO +1 -1
  2. {openlit-1.20.0 → openlit-1.21.0}/pyproject.toml +1 -1
  3. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +11 -7
  4. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +11 -7
  5. {openlit-1.20.0 → openlit-1.21.0}/LICENSE +0 -0
  6. {openlit-1.20.0 → openlit-1.21.0}/README.md +0 -0
  7. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/__helpers.py +0 -0
  8. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/__init__.py +0 -0
  9. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
  10. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
  11. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
  12. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/bedrock/__init__.py +0 -0
  13. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/bedrock/bedrock.py +0 -0
  14. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/chroma/__init__.py +0 -0
  15. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/chroma/chroma.py +0 -0
  16. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/cohere/__init__.py +0 -0
  17. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/cohere/cohere.py +0 -0
  18. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
  19. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
  20. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
  21. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
  22. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
  23. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/google_ai_studio/__init__.py +0 -0
  24. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
  25. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
  26. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/gpu/__init__.py +0 -0
  27. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/groq/__init__.py +0 -0
  28. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/groq/async_groq.py +0 -0
  29. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/groq/groq.py +0 -0
  30. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/haystack/__init__.py +0 -0
  31. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/haystack/haystack.py +0 -0
  32. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/langchain/__init__.py +0 -0
  33. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/langchain/langchain.py +0 -0
  34. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
  35. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
  36. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/milvus/__init__.py +0 -0
  37. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/milvus/milvus.py +0 -0
  38. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/mistral/__init__.py +0 -0
  39. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/mistral/async_mistral.py +0 -0
  40. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/mistral/mistral.py +0 -0
  41. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/ollama/__init__.py +0 -0
  42. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/ollama/async_ollama.py +0 -0
  43. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/ollama/ollama.py +0 -0
  44. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/openai/__init__.py +0 -0
  45. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/openai/async_azure_openai.py +0 -0
  46. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/openai/async_openai.py +0 -0
  47. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/openai/azure_openai.py +0 -0
  48. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/openai/openai.py +0 -0
  49. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
  50. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
  51. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
  52. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
  53. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/transformers/__init__.py +0 -0
  54. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/transformers/transformers.py +0 -0
  55. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
  56. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
  57. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
  58. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/vllm/__init__.py +0 -0
  59. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/instrumentation/vllm/vllm.py +0 -0
  60. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/otel/metrics.py +0 -0
  61. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/otel/tracing.py +0 -0
  62. {openlit-1.20.0 → openlit-1.21.0}/src/openlit/semcov/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: openlit
3
- Version: 1.20.0
3
+ Version: 1.21.0
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "openlit"
3
- version = "1.20.0"
3
+ version = "1.21.0"
4
4
  description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
5
5
  authors = ["OpenLIT"]
6
6
  repository = "https://github.com/openlit/openlit/tree/main/openlit/python"
@@ -1,6 +1,6 @@
1
1
  # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, possibly-used-before-assignment, protected-access
2
2
  """
3
- Module for monitoring Ollama API calls.
3
+ Module for monitoring Google AI Studio API calls.
4
4
  """
5
5
 
6
6
  import logging
@@ -24,9 +24,9 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
24
24
  gen_ai_endpoint: Endpoint identifier for logging and tracing.
25
25
  version: Version of the monitoring package.
26
26
  environment: Deployment environment (e.g., production, staging).
27
- application_name: Name of the application using the Ollama API.
27
+ application_name: Name of the application using the Google AI Studio API.
28
28
  tracer: OpenTelemetry tracer for creating spans.
29
- pricing_info: Information used for calculating the cost of Ollama usage.
29
+ pricing_info: Information used for calculating the cost of Google AI Studio usage.
30
30
  trace_content: Flag indicating whether to trace the actual content.
31
31
 
32
32
  Returns:
@@ -81,10 +81,12 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
81
81
  model = instance._model_id
82
82
  if hasattr(instance, "_model_name"):
83
83
  model = instance._model_name.replace("publishers/google/models/", "")
84
+ if model.startswith("models/"):
85
+ model = model[len("models/"):]
84
86
 
85
87
  total_tokens = input_tokens + output_tokens
86
88
  # Calculate cost of the operation
87
- cost = get_chat_model_cost(kwargs.get("model", "gpt-3.5-turbo"),
89
+ cost = get_chat_model_cost(model,
88
90
  pricing_info, input_tokens,
89
91
  output_tokens)
90
92
 
@@ -174,6 +176,8 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
174
176
  model = instance._model_id
175
177
  if hasattr(instance, "_model_name"):
176
178
  model = instance._model_name.replace("publishers/google/models/", "")
179
+ if model.startswith("models/"):
180
+ model = model[len("models/"):]
177
181
 
178
182
  # Set base span attribues
179
183
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
@@ -210,7 +214,7 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
210
214
  completion_tokens = response.usage_metadata.candidates_token_count
211
215
  total_tokens = response.usage_metadata.total_token_count
212
216
  # Calculate cost of the operation
213
- cost = get_chat_model_cost(kwargs.get("model", "llama3"),
217
+ cost = get_chat_model_cost(model,
214
218
  pricing_info, prompt_tokens, completion_tokens)
215
219
 
216
220
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
@@ -231,13 +235,13 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
231
235
  SemanticConvetion.GEN_AI_APPLICATION_NAME:
232
236
  application_name,
233
237
  SemanticConvetion.GEN_AI_SYSTEM:
234
- SemanticConvetion.GEN_AI_SYSTEM_OLLAMA,
238
+ SemanticConvetion.GEN_AI_SYSTEM_GOOGLE_AI_STUDIO,
235
239
  SemanticConvetion.GEN_AI_ENVIRONMENT:
236
240
  environment,
237
241
  SemanticConvetion.GEN_AI_TYPE:
238
242
  SemanticConvetion.GEN_AI_TYPE_CHAT,
239
243
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
240
- kwargs.get("model", "llama3")
244
+ model
241
245
  }
242
246
 
243
247
  metrics["genai_requests"].add(1, attributes)
@@ -1,6 +1,6 @@
1
1
  # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, possibly-used-before-assignment, protected-access
2
2
  """
3
- Module for monitoring Ollama API calls.
3
+ Module for monitoring Google AI Studio API calls.
4
4
  """
5
5
 
6
6
  import logging
@@ -24,9 +24,9 @@ def generate(gen_ai_endpoint, version, environment, application_name,
24
24
  gen_ai_endpoint: Endpoint identifier for logging and tracing.
25
25
  version: Version of the monitoring package.
26
26
  environment: Deployment environment (e.g., production, staging).
27
- application_name: Name of the application using the Ollama API.
27
+ application_name: Name of the application using the Google AI Studio API.
28
28
  tracer: OpenTelemetry tracer for creating spans.
29
- pricing_info: Information used for calculating the cost of Ollama usage.
29
+ pricing_info: Information used for calculating the cost of Google AI Studio usage.
30
30
  trace_content: Flag indicating whether to trace the actual content.
31
31
 
32
32
  Returns:
@@ -81,10 +81,12 @@ def generate(gen_ai_endpoint, version, environment, application_name,
81
81
  model = instance._model_id
82
82
  if hasattr(instance, "_model_name"):
83
83
  model = instance._model_name.replace("publishers/google/models/", "")
84
+ if model.startswith("models/"):
85
+ model = model[len("models/"):]
84
86
 
85
87
  total_tokens = input_tokens + output_tokens
86
88
  # Calculate cost of the operation
87
- cost = get_chat_model_cost(kwargs.get("model", "gpt-3.5-turbo"),
89
+ cost = get_chat_model_cost(model,
88
90
  pricing_info, input_tokens,
89
91
  output_tokens)
90
92
 
@@ -176,6 +178,8 @@ def generate(gen_ai_endpoint, version, environment, application_name,
176
178
  model = instance._model_id
177
179
  if hasattr(instance, "_model_name"):
178
180
  model = instance._model_name.replace("publishers/google/models/", "")
181
+ if model.startswith("models/"):
182
+ model = model[len("models/"):]
179
183
 
180
184
  # Set base span attribues
181
185
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
@@ -212,7 +216,7 @@ def generate(gen_ai_endpoint, version, environment, application_name,
212
216
  completion_tokens = response.usage_metadata.candidates_token_count
213
217
  total_tokens = response.usage_metadata.total_token_count
214
218
  # Calculate cost of the operation
215
- cost = get_chat_model_cost(kwargs.get("model", "llama3"),
219
+ cost = get_chat_model_cost(model,
216
220
  pricing_info, prompt_tokens, completion_tokens)
217
221
 
218
222
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
@@ -233,13 +237,13 @@ def generate(gen_ai_endpoint, version, environment, application_name,
233
237
  SemanticConvetion.GEN_AI_APPLICATION_NAME:
234
238
  application_name,
235
239
  SemanticConvetion.GEN_AI_SYSTEM:
236
- SemanticConvetion.GEN_AI_SYSTEM_OLLAMA,
240
+ SemanticConvetion.GEN_AI_SYSTEM_GOOGLE_AI_STUDIO,
237
241
  SemanticConvetion.GEN_AI_ENVIRONMENT:
238
242
  environment,
239
243
  SemanticConvetion.GEN_AI_TYPE:
240
244
  SemanticConvetion.GEN_AI_TYPE_CHAT,
241
245
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
242
- kwargs.get("model", "llama3")
246
+ model
243
247
  }
244
248
 
245
249
  metrics["genai_requests"].add(1, attributes)
File without changes
File without changes