openlit 1.18.1__tar.gz → 1.18.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. {openlit-1.18.1 → openlit-1.18.2}/PKG-INFO +3 -3
  2. {openlit-1.18.1 → openlit-1.18.2}/pyproject.toml +3 -3
  3. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/mistral/__init__.py +18 -18
  4. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/mistral/async_mistral.py +13 -13
  5. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/mistral/mistral.py +12 -12
  6. {openlit-1.18.1 → openlit-1.18.2}/LICENSE +0 -0
  7. {openlit-1.18.1 → openlit-1.18.2}/README.md +0 -0
  8. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/__helpers.py +0 -0
  9. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/__init__.py +0 -0
  10. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
  11. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
  12. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
  13. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/bedrock/__init__.py +0 -0
  14. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/bedrock/bedrock.py +0 -0
  15. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/chroma/__init__.py +0 -0
  16. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/chroma/chroma.py +0 -0
  17. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/cohere/__init__.py +0 -0
  18. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/cohere/cohere.py +0 -0
  19. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
  20. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
  21. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
  22. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
  23. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
  24. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
  25. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
  26. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/gpu/__init__.py +0 -0
  27. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/groq/__init__.py +0 -0
  28. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/groq/async_groq.py +0 -0
  29. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/groq/groq.py +0 -0
  30. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/haystack/__init__.py +0 -0
  31. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/haystack/haystack.py +0 -0
  32. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/langchain/__init__.py +0 -0
  33. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/langchain/langchain.py +0 -0
  34. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
  35. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
  36. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/milvus/__init__.py +0 -0
  37. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/milvus/milvus.py +0 -0
  38. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/ollama/__init__.py +0 -0
  39. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/ollama/async_ollama.py +0 -0
  40. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/ollama/ollama.py +0 -0
  41. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/openai/__init__.py +0 -0
  42. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/openai/async_azure_openai.py +0 -0
  43. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/openai/async_openai.py +0 -0
  44. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/openai/azure_openai.py +0 -0
  45. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/openai/openai.py +0 -0
  46. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
  47. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
  48. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
  49. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
  50. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/transformers/__init__.py +0 -0
  51. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/transformers/transformers.py +0 -0
  52. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
  53. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
  54. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
  55. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/vllm/__init__.py +0 -0
  56. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/instrumentation/vllm/vllm.py +0 -0
  57. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/otel/metrics.py +0 -0
  58. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/otel/tracing.py +0 -0
  59. {openlit-1.18.1 → openlit-1.18.2}/src/openlit/semcov/__init__.py +0 -0
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: openlit
3
- Version: 1.18.1
4
- Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications, facilitating the integration of observability into your GenAI-driven projects
3
+ Version: 1.18.2
4
+ Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
6
- Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT
6
+ Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
7
7
  Author: OpenLIT
8
8
  Requires-Python: >=3.7.1,<4.0.0
9
9
  Classifier: Programming Language :: Python :: 3
@@ -1,12 +1,12 @@
1
1
  [tool.poetry]
2
2
  name = "openlit"
3
- version = "1.18.1"
4
- description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications, facilitating the integration of observability into your GenAI-driven projects"
3
+ version = "1.18.2"
4
+ description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
5
5
  authors = ["OpenLIT"]
6
6
  repository = "https://github.com/openlit/openlit/tree/main/openlit/python"
7
7
  readme = "README.md"
8
8
  homepage = "https://github.com/openlit/openlit/tree/main/openlit/python"
9
- keywords = ["OpenTelemetry", "otel", "otlp","llm", "tracing", "openai", "anthropic", "claude", "cohere", "llm monitoring", "observability", "monitoring", "gpt", "Generative AI", "chatGPT"]
9
+ keywords = ["OpenTelemetry", "otel", "otlp","llm", "tracing", "openai", "anthropic", "claude", "cohere", "llm monitoring", "observability", "monitoring", "gpt", "Generative AI", "chatGPT", "gpu"]
10
10
 
11
11
  [tool.poetry.dependencies]
12
12
  python = "^3.7.1"
@@ -9,10 +9,10 @@ from openlit.instrumentation.mistral.mistral import chat, chat_stream, embedding
9
9
  from openlit.instrumentation.mistral.async_mistral import async_chat, async_chat_stream
10
10
  from openlit.instrumentation.mistral.async_mistral import async_embeddings
11
11
 
12
- _instruments = ("mistralai >= 0.1.0",)
12
+ _instruments = ("mistralai >= 1.0.0",)
13
13
 
14
14
  class MistralInstrumentor(BaseInstrumentor):
15
- """An instrumentor for Azure Mistral's client library."""
15
+ """An instrumentor for Mistral's client library."""
16
16
 
17
17
  def instrumentation_dependencies(self) -> Collection[str]:
18
18
  return _instruments
@@ -27,50 +27,50 @@ class MistralInstrumentor(BaseInstrumentor):
27
27
  disable_metrics = kwargs.get("disable_metrics")
28
28
  version = importlib.metadata.version("mistralai")
29
29
 
30
- #sync
30
+ # sync
31
31
  wrap_function_wrapper(
32
- "mistralai.client",
33
- "MistralClient.chat",
32
+ "mistralai.chat",
33
+ "Chat.complete",
34
34
  chat("mistral.chat", version, environment, application_name,
35
35
  tracer, pricing_info, trace_content, metrics, disable_metrics),
36
36
  )
37
37
 
38
- #sync
38
+ # sync
39
39
  wrap_function_wrapper(
40
- "mistralai.client",
41
- "MistralClient.chat_stream",
40
+ "mistralai.chat",
41
+ "Chat.stream",
42
42
  chat_stream("mistral.chat", version, environment, application_name,
43
43
  tracer, pricing_info, trace_content, metrics, disable_metrics),
44
44
  )
45
45
 
46
- #sync
46
+ # sync
47
47
  wrap_function_wrapper(
48
- "mistralai.client",
49
- "MistralClient.embeddings",
48
+ "mistralai.embeddings",
49
+ "Embeddings.create",
50
50
  embeddings("mistral.embeddings", version, environment, application_name,
51
51
  tracer, pricing_info, trace_content, metrics, disable_metrics),
52
52
  )
53
53
 
54
54
  # Async
55
55
  wrap_function_wrapper(
56
- "mistralai.async_client",
57
- "MistralAsyncClient.chat",
56
+ "mistralai.chat",
57
+ "Chat.complete_async",
58
58
  async_chat("mistral.chat", version, environment, application_name,
59
59
  tracer, pricing_info, trace_content, metrics, disable_metrics),
60
60
  )
61
61
 
62
- #sync
62
+ # Async
63
63
  wrap_function_wrapper(
64
- "mistralai.async_client",
65
- "MistralAsyncClient.chat_stream",
64
+ "mistralai.chat",
65
+ "Chat.stream_async",
66
66
  async_chat_stream("mistral.chat", version, environment, application_name,
67
67
  tracer, pricing_info, trace_content, metrics, disable_metrics),
68
68
  )
69
69
 
70
70
  #sync
71
71
  wrap_function_wrapper(
72
- "mistralai.async_client",
73
- "MistralAsyncClient.embeddings",
72
+ "mistralai.embeddings",
73
+ "Embeddings.create_async",
74
74
  async_embeddings("mistral.embeddings", version, environment, application_name,
75
75
  tracer, pricing_info, trace_content, metrics, disable_metrics),
76
76
  )
@@ -56,8 +56,8 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
56
56
  message_prompt = kwargs.get('messages', "")
57
57
  formatted_messages = []
58
58
  for message in message_prompt:
59
- role = message.role
60
- content = message.content
59
+ role = message["role"]
60
+ content = message["content"]
61
61
 
62
62
  if isinstance(content, list):
63
63
  content_str = ", ".join(
@@ -207,14 +207,14 @@ def async_chat_stream(gen_ai_endpoint, version, environment, application_name,
207
207
  llmresponse = ""
208
208
 
209
209
  # Loop through streaming events capturing relevant details
210
- async for event in wrapped(*args, **kwargs):
211
- response_id = event.id
212
- llmresponse += event.choices[0].delta.content
213
- if event.usage is not None:
214
- prompt_tokens = event.usage.prompt_tokens
215
- completion_tokens = event.usage.completion_tokens
216
- total_tokens = event.usage.total_tokens
217
- finish_reason = event.choices[0].finish_reason
210
+ async for event in await wrapped(*args, **kwargs):
211
+ response_id = event.data.id
212
+ llmresponse += event.data.choices[0].delta.content
213
+ if event.data.usage is not None:
214
+ prompt_tokens = event.data.usage.prompt_tokens
215
+ completion_tokens = event.data.usage.completion_tokens
216
+ total_tokens = event.data.usage.total_tokens
217
+ finish_reason = event.data.choices[0].finish_reason
218
218
  yield event
219
219
 
220
220
  # Handling exception ensure observability without disrupting operation
@@ -223,8 +223,8 @@ def async_chat_stream(gen_ai_endpoint, version, environment, application_name,
223
223
  message_prompt = kwargs.get('messages', "")
224
224
  formatted_messages = []
225
225
  for message in message_prompt:
226
- role = message.role
227
- content = message.content
226
+ role = message["role"]
227
+ content = message["content"]
228
228
 
229
229
  if isinstance(content, list):
230
230
  content_str = ", ".join(
@@ -364,7 +364,7 @@ def async_embeddings(gen_ai_endpoint, version, environment, application_name,
364
364
 
365
365
  try:
366
366
  # Get prompt from kwargs and store as a single string
367
- prompt = ', '.join(kwargs.get('input', []))
367
+ prompt = ', '.join(kwargs.get('inputs', []))
368
368
 
369
369
  # Calculate cost of the operation
370
370
  cost = get_embed_model_cost(kwargs.get('model', "mistral-embed"),
@@ -55,8 +55,8 @@ def chat(gen_ai_endpoint, version, environment, application_name,
55
55
  message_prompt = kwargs.get('messages', "")
56
56
  formatted_messages = []
57
57
  for message in message_prompt:
58
- role = message.role
59
- content = message.content
58
+ role = message["role"]
59
+ content = message["content"]
60
60
 
61
61
  if isinstance(content, list):
62
62
  content_str = ", ".join(
@@ -207,13 +207,13 @@ def chat_stream(gen_ai_endpoint, version, environment, application_name,
207
207
 
208
208
  # Loop through streaming events capturing relevant details
209
209
  for event in wrapped(*args, **kwargs):
210
- response_id = event.id
211
- llmresponse += event.choices[0].delta.content
212
- if event.usage is not None:
213
- prompt_tokens = event.usage.prompt_tokens
214
- completion_tokens = event.usage.completion_tokens
215
- total_tokens = event.usage.total_tokens
216
- finish_reason = event.choices[0].finish_reason
210
+ response_id = event.data.id
211
+ llmresponse += event.data.choices[0].delta.content
212
+ if event.data.usage is not None:
213
+ prompt_tokens = event.data.usage.prompt_tokens
214
+ completion_tokens = event.data.usage.completion_tokens
215
+ total_tokens = event.data.usage.total_tokens
216
+ finish_reason = event.data.choices[0].finish_reason
217
217
  yield event
218
218
 
219
219
  # Handling exception ensure observability without disrupting operation
@@ -222,8 +222,8 @@ def chat_stream(gen_ai_endpoint, version, environment, application_name,
222
222
  message_prompt = kwargs.get('messages', "")
223
223
  formatted_messages = []
224
224
  for message in message_prompt:
225
- role = message.role
226
- content = message.content
225
+ role = message["role"]
226
+ content = message["content"]
227
227
 
228
228
  if isinstance(content, list):
229
229
  content_str = ", ".join(
@@ -363,7 +363,7 @@ def embeddings(gen_ai_endpoint, version, environment, application_name,
363
363
 
364
364
  try:
365
365
  # Get prompt from kwargs and store as a single string
366
- prompt = ', '.join(kwargs.get('input', []))
366
+ prompt = ', '.join(kwargs.get('inputs', []))
367
367
 
368
368
  # Calculate cost of the operation
369
369
  cost = get_embed_model_cost(kwargs.get('model', "mistral-embed"),
File without changes
File without changes