openlit 1.22.2__tar.gz → 1.22.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. {openlit-1.22.2 → openlit-1.22.5}/PKG-INFO +1 -1
  2. {openlit-1.22.2 → openlit-1.22.5}/pyproject.toml +1 -1
  3. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/__init__.py +9 -1
  4. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/langchain/langchain.py +12 -11
  5. {openlit-1.22.2 → openlit-1.22.5}/LICENSE +0 -0
  6. {openlit-1.22.2 → openlit-1.22.5}/README.md +0 -0
  7. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/__helpers.py +0 -0
  8. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
  9. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
  10. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
  11. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/azure_ai_inference/__init__.py +0 -0
  12. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +0 -0
  13. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +0 -0
  14. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/bedrock/__init__.py +0 -0
  15. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/bedrock/bedrock.py +0 -0
  16. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/chroma/__init__.py +0 -0
  17. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/chroma/chroma.py +0 -0
  18. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/cohere/__init__.py +0 -0
  19. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/cohere/cohere.py +0 -0
  20. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
  21. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
  22. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
  23. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
  24. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
  25. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/google_ai_studio/__init__.py +0 -0
  26. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +0 -0
  27. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +0 -0
  28. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
  29. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
  30. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/gpu/__init__.py +0 -0
  31. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/groq/__init__.py +0 -0
  32. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/groq/async_groq.py +0 -0
  33. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/groq/groq.py +0 -0
  34. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/haystack/__init__.py +0 -0
  35. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/haystack/haystack.py +0 -0
  36. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/langchain/__init__.py +0 -0
  37. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
  38. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
  39. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/milvus/__init__.py +0 -0
  40. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/milvus/milvus.py +0 -0
  41. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/mistral/__init__.py +0 -0
  42. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/mistral/async_mistral.py +0 -0
  43. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/mistral/mistral.py +0 -0
  44. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/ollama/__init__.py +0 -0
  45. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/ollama/async_ollama.py +0 -0
  46. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/ollama/ollama.py +0 -0
  47. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/openai/__init__.py +0 -0
  48. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/openai/async_azure_openai.py +0 -0
  49. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/openai/async_openai.py +0 -0
  50. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/openai/azure_openai.py +0 -0
  51. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/openai/openai.py +0 -0
  52. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
  53. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
  54. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
  55. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
  56. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/transformers/__init__.py +0 -0
  57. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/transformers/transformers.py +0 -0
  58. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
  59. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
  60. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
  61. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/vllm/__init__.py +0 -0
  62. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/instrumentation/vllm/vllm.py +0 -0
  63. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/otel/metrics.py +0 -0
  64. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/otel/tracing.py +0 -0
  65. {openlit-1.22.2 → openlit-1.22.5}/src/openlit/semcov/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: openlit
3
- Version: 1.22.2
3
+ Version: 1.22.5
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "openlit"
3
- version = "1.22.2"
3
+ version = "1.22.5"
4
4
  description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
5
5
  authors = ["OpenLIT"]
6
6
  repository = "https://github.com/openlit/openlit/tree/main/openlit/python"
@@ -130,6 +130,13 @@ class OpenlitConfig:
130
130
  cls.trace_content = trace_content
131
131
  cls.disable_metrics = disable_metrics
132
132
 
133
+ def module_exists(module_name):
134
+ """Check if nested modules exist, addressing the dot notation issue."""
135
+ parts = module_name.split(".")
136
+ for i in range(1, len(parts) + 1):
137
+ if find_spec(".".join(parts[:i])) is None:
138
+ return False
139
+ return True
133
140
 
134
141
  def instrument_if_available(
135
142
  instrumentor_name,
@@ -150,7 +157,7 @@ def instrument_if_available(
150
157
  return
151
158
 
152
159
  try:
153
- if find_spec(module_name) is not None:
160
+ if module_exists(module_name):
154
161
  instrumentor_instance.instrument(
155
162
  environment=config.environment,
156
163
  application_name=config.application_name,
@@ -167,6 +174,7 @@ def instrument_if_available(
167
174
  except Exception as e:
168
175
  logger.error("Failed to instrument %s: %s", instrumentor_name, e)
169
176
 
177
+
170
178
  def init(environment="default", application_name="default", tracer=None, otlp_endpoint=None,
171
179
  otlp_headers=None, disable_batch=False, trace_content=True, disabled_instrumentors=None,
172
180
  meter=None, disable_metrics=False, pricing_json=None, collect_gpu_stats=False):
@@ -484,9 +484,12 @@ def chat(gen_ai_endpoint, version, environment, application_name,
484
484
  input_tokens = response.response_metadata.get("prompt_eval_count", 0)
485
485
  output_tokens = response.response_metadata.get("eval_count", 0)
486
486
 
487
+ prompt = "" if isinstance(args[0], list) else args[0]
488
+ model = getattr(instance, 'model_name', getattr(instance, 'model', 'gpt-4'))
489
+
487
490
  # Calculate cost of the operation
488
491
  cost = get_chat_model_cost(
489
- str(getattr(instance, 'model')),
492
+ model,
490
493
  pricing_info, input_tokens, output_tokens
491
494
  )
492
495
 
@@ -502,15 +505,13 @@ def chat(gen_ai_endpoint, version, environment, application_name,
502
505
  span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
503
506
  application_name)
504
507
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
505
- str(getattr(instance, 'model')))
508
+ model)
506
509
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
507
- str(getattr(instance, 'temperature')))
510
+ str(getattr(instance, 'temperature', 1)))
508
511
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K,
509
- str(getattr(instance, 'top_k')))
512
+ str(getattr(instance, 'top_k', 1)))
510
513
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
511
- str(getattr(instance, 'top_p')))
512
- span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
513
- [response.response_metadata["done_reason"]])
514
+ str(getattr(instance, 'top_p', 1)))
514
515
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
515
516
  False)
516
517
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
@@ -525,7 +526,7 @@ def chat(gen_ai_endpoint, version, environment, application_name,
525
526
  span.add_event(
526
527
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
527
528
  attributes={
528
- SemanticConvetion.GEN_AI_CONTENT_PROMPT: args[0],
529
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
529
530
  },
530
531
  )
531
532
  span.add_event(
@@ -550,7 +551,7 @@ def chat(gen_ai_endpoint, version, environment, application_name,
550
551
  SemanticConvetion.GEN_AI_TYPE:
551
552
  SemanticConvetion.GEN_AI_TYPE_CHAT,
552
553
  SemanticConvetion.GEN_AI_REQUEST_MODEL:
553
- str(getattr(instance, 'model'))
554
+ model
554
555
  }
555
556
 
556
557
  metrics["genai_requests"].add(1, attributes)
@@ -619,8 +620,8 @@ def achat(gen_ai_endpoint, version, environment, application_name,
619
620
  response = await wrapped(*args, **kwargs)
620
621
 
621
622
  try:
622
- input_tokens = response.response_metadata["prompt_eval_count"] or 0
623
- output_tokens = response.response_metadata["eval_count"] or 0
623
+ input_tokens = response.response_metadata.get("prompt_eval_count", 0)
624
+ output_tokens = response.response_metadata.get("eval_count", 0)
624
625
 
625
626
  # Calculate cost of the operation
626
627
  cost = get_chat_model_cost(
File without changes
File without changes