openlit 1.32.7__py3-none-any.whl → 1.32.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -71,6 +71,12 @@ WRAPPED_METHODS = [
71
71
  "endpoint": "langchain.chat_models",
72
72
  "wrapper": achat,
73
73
  },
74
+ {
75
+ "package": "langchain.chains.base",
76
+ "object": "Chain.invoke",
77
+ "endpoint": "langchain.chain.invoke",
78
+ "wrapper": chat,
79
+ }
74
80
  ]
75
81
 
76
82
  class LangChainInstrumentor(BaseInstrumentor):
@@ -1,4 +1,4 @@
1
- # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, unused-import
1
+ # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, unused-import, too-many-function-args
2
2
  """
3
3
  Module for monitoring Langchain applications.
4
4
  """
@@ -507,12 +507,25 @@ def chat(gen_ai_endpoint, version, environment, application_name,
507
507
  response = wrapped(*args, **kwargs)
508
508
 
509
509
  try:
510
- token_usage = response.response_metadata.get("token_usage", {})
511
- input_tokens = token_usage.get("prompt_tokens", 0)
512
- output_tokens = token_usage.get("completion_tokens", 0)
513
- model = response.response_metadata.get("model_name", "gpt-4")
514
-
515
- prompt = "" if isinstance(args[0], list) else args[0]
510
+ prompt = ""
511
+ if hasattr(response, 'response_metadata') and response.response_metadata:
512
+ token_usage = response.response_metadata.get("token_usage", {})
513
+ input_tokens = token_usage.get("prompt_tokens", 0)
514
+ output_tokens = token_usage.get("completion_tokens", 0)
515
+ model = response.response_metadata.get("model_name", "gpt-4")
516
+ prompt = "" if isinstance(args[0], list) else args[0]
517
+ else:
518
+ if not isinstance(response, dict) or "output_text" not in response:
519
+ return response
520
+ # Fallback: Calculate tokens manually if response_metadata is missing
521
+ model = "gpt-4o-mini" # Fallback model
522
+ input_texts = [
523
+ doc.page_content for doc in response.get("input_documents", [])
524
+ if isinstance(doc.page_content, str)
525
+ ]
526
+ input_tokens = sum(general_tokens(text, model) for text in input_texts)
527
+ output_text = response.get("output_text", "")
528
+ output_tokens = general_tokens(output_text, model)
516
529
 
517
530
  # Calculate cost of the operation
518
531
  cost = get_chat_model_cost(
@@ -556,10 +569,11 @@ def chat(gen_ai_endpoint, version, environment, application_name,
556
569
  SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
557
570
  },
558
571
  )
572
+ completion_content = getattr(response, 'content', "")
559
573
  span.add_event(
560
574
  name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
561
575
  attributes={
562
- SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.content,
576
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: completion_content,
563
577
  },
564
578
  )
565
579
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: openlit
3
- Version: 1.32.7
3
+ Version: 1.32.8
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -59,8 +59,8 @@ openlit/instrumentation/haystack/haystack.py,sha256=oQIZiDhdp3gnJnhYQ1OouJMc9YT0
59
59
  openlit/instrumentation/julep/__init__.py,sha256=oonEVK41P5g4SYRm0E_H4zCVH9NM4aJ-UswXzF3Oiv0,3136
60
60
  openlit/instrumentation/julep/async_julep.py,sha256=OO8lIm9uUV1lhPo_klKBVyaDwgHhFJlohTeZItd5qwU,5273
61
61
  openlit/instrumentation/julep/julep.py,sha256=lDUmkEP4hXk7vgUUbNRD-mnfdfrZifXSFVVILs8Ttkw,5276
62
- openlit/instrumentation/langchain/__init__.py,sha256=0AI2Dnqw81IcJw3jM--gGkv_HRh2GtosOGJjvOpw7Zk,3431
63
- openlit/instrumentation/langchain/langchain.py,sha256=jZgWBBWYHYSNnkf5wKyNFF_z9M9YxaZKGI_uyfvtMBU,36909
62
+ openlit/instrumentation/langchain/__init__.py,sha256=gVtPZJifx-H8rqdZlU3GXdy3NtRF8yVb7PW7gE-ddJk,3592
63
+ openlit/instrumentation/langchain/langchain.py,sha256=fYtFDN95aW4DLUKIaldBQXbee4oR__yo1cWcZXqQkEk,37874
64
64
  openlit/instrumentation/litellm/__init__.py,sha256=Z-LsVHKJdPganHfJA_rWg7xAfQYkvLfpLdF-eckU4qY,2401
65
65
  openlit/instrumentation/litellm/async_litellm.py,sha256=1MKNZbvKaf1lFWbXi1MQy3qFNNeXawav34SDlOQ_H3w,27544
66
66
  openlit/instrumentation/litellm/litellm.py,sha256=4YqCQ4CEQ4sfDu7pTlnflL_AfUqYEQdJDTO7nHJ6noY,27450
@@ -106,7 +106,7 @@ openlit/instrumentation/vllm/vllm.py,sha256=lDzM7F5pgxvh8nKL0dcKB4TD0Mc9wXOWeXOs
106
106
  openlit/otel/metrics.py,sha256=y7SQDTyfLakMrz0V4DThN-WAeap7YZzyndeYGSP6nVg,4516
107
107
  openlit/otel/tracing.py,sha256=fG3vl-flSZ30whCi7rrG25PlkIhhr8PhnfJYCkZzCD0,3895
108
108
  openlit/semcov/__init__.py,sha256=9gCyLKqhV2iErzNMIb4H9-CAlzaMUUoNXJmRZbDdhfc,10628
109
- openlit-1.32.7.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
110
- openlit-1.32.7.dist-info/METADATA,sha256=7asxUq7rdEPsvzQ375lE42Wi6XQOYMF17fPrIymLHNk,22670
111
- openlit-1.32.7.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
112
- openlit-1.32.7.dist-info/RECORD,,
109
+ openlit-1.32.8.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
110
+ openlit-1.32.8.dist-info/METADATA,sha256=w203C6GIfvP0D7jXDdE0BK3K7GU7SbnyYIKtVN_umLM,22670
111
+ openlit-1.32.8.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
112
+ openlit-1.32.8.dist-info/RECORD,,