openlit 1.32.7__py3-none-any.whl → 1.32.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/instrumentation/langchain/__init__.py +6 -0
- openlit/instrumentation/langchain/langchain.py +22 -8
- {openlit-1.32.7.dist-info → openlit-1.32.9.dist-info}/METADATA +1 -1
- {openlit-1.32.7.dist-info → openlit-1.32.9.dist-info}/RECORD +6 -6
- {openlit-1.32.7.dist-info → openlit-1.32.9.dist-info}/LICENSE +0 -0
- {openlit-1.32.7.dist-info → openlit-1.32.9.dist-info}/WHEEL +0 -0
@@ -71,6 +71,12 @@ WRAPPED_METHODS = [
|
|
71
71
|
"endpoint": "langchain.chat_models",
|
72
72
|
"wrapper": achat,
|
73
73
|
},
|
74
|
+
{
|
75
|
+
"package": "langchain.chains.base",
|
76
|
+
"object": "Chain.invoke",
|
77
|
+
"endpoint": "langchain.chain.invoke",
|
78
|
+
"wrapper": chat,
|
79
|
+
}
|
74
80
|
]
|
75
81
|
|
76
82
|
class LangChainInstrumentor(BaseInstrumentor):
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, unused-import
|
1
|
+
# pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, unused-import, too-many-function-args
|
2
2
|
"""
|
3
3
|
Module for monitoring Langchain applications.
|
4
4
|
"""
|
@@ -507,12 +507,25 @@ def chat(gen_ai_endpoint, version, environment, application_name,
|
|
507
507
|
response = wrapped(*args, **kwargs)
|
508
508
|
|
509
509
|
try:
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
510
|
+
prompt = ""
|
511
|
+
if hasattr(response, 'response_metadata') and response.response_metadata:
|
512
|
+
token_usage = response.response_metadata.get("token_usage", {})
|
513
|
+
input_tokens = token_usage.get("prompt_tokens", 0)
|
514
|
+
output_tokens = token_usage.get("completion_tokens", 0)
|
515
|
+
model = response.response_metadata.get("model_name", "gpt-4")
|
516
|
+
prompt = "" if isinstance(args[0], list) else args[0]
|
517
|
+
else:
|
518
|
+
if not isinstance(response, dict) or "output_text" not in response:
|
519
|
+
return response
|
520
|
+
# Fallback: Calculate tokens manually if response_metadata is missing
|
521
|
+
model = "gpt-4o-mini" # Fallback model
|
522
|
+
input_texts = [
|
523
|
+
doc.page_content for doc in response.get("input_documents", [])
|
524
|
+
if isinstance(doc.page_content, str)
|
525
|
+
]
|
526
|
+
input_tokens = sum(general_tokens(text) for text in input_texts)
|
527
|
+
output_text = response.get("output_text", "")
|
528
|
+
output_tokens = general_tokens(output_text)
|
516
529
|
|
517
530
|
# Calculate cost of the operation
|
518
531
|
cost = get_chat_model_cost(
|
@@ -556,10 +569,11 @@ def chat(gen_ai_endpoint, version, environment, application_name,
|
|
556
569
|
SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
|
557
570
|
},
|
558
571
|
)
|
572
|
+
completion_content = getattr(response, 'content', "")
|
559
573
|
span.add_event(
|
560
574
|
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
561
575
|
attributes={
|
562
|
-
SemanticConvetion.GEN_AI_CONTENT_COMPLETION:
|
576
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: completion_content,
|
563
577
|
},
|
564
578
|
)
|
565
579
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.32.
|
3
|
+
Version: 1.32.9
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -59,8 +59,8 @@ openlit/instrumentation/haystack/haystack.py,sha256=oQIZiDhdp3gnJnhYQ1OouJMc9YT0
|
|
59
59
|
openlit/instrumentation/julep/__init__.py,sha256=oonEVK41P5g4SYRm0E_H4zCVH9NM4aJ-UswXzF3Oiv0,3136
|
60
60
|
openlit/instrumentation/julep/async_julep.py,sha256=OO8lIm9uUV1lhPo_klKBVyaDwgHhFJlohTeZItd5qwU,5273
|
61
61
|
openlit/instrumentation/julep/julep.py,sha256=lDUmkEP4hXk7vgUUbNRD-mnfdfrZifXSFVVILs8Ttkw,5276
|
62
|
-
openlit/instrumentation/langchain/__init__.py,sha256=
|
63
|
-
openlit/instrumentation/langchain/langchain.py,sha256=
|
62
|
+
openlit/instrumentation/langchain/__init__.py,sha256=gVtPZJifx-H8rqdZlU3GXdy3NtRF8yVb7PW7gE-ddJk,3592
|
63
|
+
openlit/instrumentation/langchain/langchain.py,sha256=XzZ3AH5Ep-UwMlIWVohXaGcZmuDYfUDcc4yeL4HTXvk,37860
|
64
64
|
openlit/instrumentation/litellm/__init__.py,sha256=Z-LsVHKJdPganHfJA_rWg7xAfQYkvLfpLdF-eckU4qY,2401
|
65
65
|
openlit/instrumentation/litellm/async_litellm.py,sha256=1MKNZbvKaf1lFWbXi1MQy3qFNNeXawav34SDlOQ_H3w,27544
|
66
66
|
openlit/instrumentation/litellm/litellm.py,sha256=4YqCQ4CEQ4sfDu7pTlnflL_AfUqYEQdJDTO7nHJ6noY,27450
|
@@ -106,7 +106,7 @@ openlit/instrumentation/vllm/vllm.py,sha256=lDzM7F5pgxvh8nKL0dcKB4TD0Mc9wXOWeXOs
|
|
106
106
|
openlit/otel/metrics.py,sha256=y7SQDTyfLakMrz0V4DThN-WAeap7YZzyndeYGSP6nVg,4516
|
107
107
|
openlit/otel/tracing.py,sha256=fG3vl-flSZ30whCi7rrG25PlkIhhr8PhnfJYCkZzCD0,3895
|
108
108
|
openlit/semcov/__init__.py,sha256=9gCyLKqhV2iErzNMIb4H9-CAlzaMUUoNXJmRZbDdhfc,10628
|
109
|
-
openlit-1.32.
|
110
|
-
openlit-1.32.
|
111
|
-
openlit-1.32.
|
112
|
-
openlit-1.32.
|
109
|
+
openlit-1.32.9.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
110
|
+
openlit-1.32.9.dist-info/METADATA,sha256=pA744cjYuts0_b3GUybEuT8CuTgxdozLEYYfMayLS3Y,22670
|
111
|
+
openlit-1.32.9.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
112
|
+
openlit-1.32.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|