openlit 1.34.22__py3-none-any.whl → 1.34.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,175 @@
1
+ """
2
+ AG2 OpenTelemetry instrumentation utility functions
3
+ """
4
+ import time
5
+
6
+ from opentelemetry.trace import Status, StatusCode
7
+
8
+ from openlit.__helpers import (
9
+ get_chat_model_cost,
10
+ common_span_attributes,
11
+ record_completion_metrics,
12
+ )
13
+ from openlit.semcov import SemanticConvention
14
+
15
+ def calculate_tokens_and_cost(response, request_model, pricing_info):
16
+ """
17
+ Calculate the input, output tokens, and their respective costs from AG2 response.
18
+ """
19
+ input_tokens = 0
20
+ output_tokens = 0
21
+
22
+ # Early return if response doesn't have cost data
23
+ if not hasattr(response, "cost") or response.cost is None:
24
+ cost = get_chat_model_cost(request_model, pricing_info, input_tokens, output_tokens)
25
+ return input_tokens, output_tokens, cost
26
+
27
+ try:
28
+ input_tokens, output_tokens = _extract_tokens_from_cost(response.cost)
29
+ except (AttributeError, TypeError):
30
+ # If theres any issue accessing cost data, default to 0 tokens
31
+ input_tokens = 0
32
+ output_tokens = 0
33
+
34
+ cost = get_chat_model_cost(request_model, pricing_info, input_tokens, output_tokens)
35
+ return input_tokens, output_tokens, cost
36
+
37
+ def _extract_tokens_from_cost(cost_data):
38
+ """
39
+ Extract input and output tokens from AG2 cost data structure.
40
+ """
41
+ input_tokens = 0
42
+ output_tokens = 0
43
+
44
+ for usage_data in cost_data.values():
45
+ if not isinstance(usage_data, dict):
46
+ continue
47
+
48
+ for model_data in usage_data.values():
49
+ if isinstance(model_data, dict):
50
+ input_tokens += model_data.get("prompt_tokens", 0)
51
+ output_tokens += model_data.get("completion_tokens", 0)
52
+
53
+ return input_tokens, output_tokens
54
+
55
+ def format_content(chat_history):
56
+ """
57
+ Format the chat history into a string for span events.
58
+ """
59
+ if not chat_history:
60
+ return ""
61
+
62
+ formatted_messages = []
63
+ for chat in chat_history:
64
+ role = chat.get("role", "user")
65
+ content = chat.get("content", "")
66
+ formatted_messages.append(f"{role}: {content}")
67
+
68
+ return "\n".join(formatted_messages)
69
+
70
+ def common_agent_logic(scope, pricing_info, environment, application_name, metrics,
71
+ capture_message_content, disable_metrics, version, operation_type):
72
+ """
73
+ Process agent request and generate Telemetry
74
+ """
75
+
76
+ # Common Span Attributes
77
+ common_span_attributes(scope,
78
+ operation_type, SemanticConvention.GEN_AI_SYSTEM_AG2,
79
+ scope._server_address, scope._server_port, scope._request_model, scope._response_model,
80
+ environment, application_name, False, 0, scope._end_time - scope._start_time, version)
81
+
82
+ # Span Attributes for Agent-specific parameters
83
+ scope._span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, scope._agent_name)
84
+
85
+ # Span Attributes for Response parameters
86
+ if hasattr(scope, "_input_tokens"):
87
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
88
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
89
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
90
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, scope._cost)
91
+
92
+ # Span Attributes for Content
93
+ if capture_message_content and hasattr(scope, "_chat_history"):
94
+ chat_content = format_content(scope._chat_history)
95
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, chat_content)
96
+
97
+ # To be removed once the change to span_attributes (from span events) is complete
98
+ scope._span.add_event(
99
+ name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
100
+ attributes={
101
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION: chat_content,
102
+ },
103
+ )
104
+
105
+ # Set agent description for create agent operation
106
+ if hasattr(scope, "_system_message"):
107
+ scope._span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION, scope._system_message)
108
+
109
+ scope._span.set_status(Status(StatusCode.OK))
110
+
111
+ # Metrics
112
+ if not disable_metrics and hasattr(scope, "_input_tokens"):
113
+ record_completion_metrics(metrics, operation_type, SemanticConvention.GEN_AI_SYSTEM_AG2,
114
+ scope._server_address, scope._server_port, scope._request_model, scope._response_model, environment,
115
+ application_name, scope._start_time, scope._end_time, scope._input_tokens, scope._output_tokens,
116
+ scope._cost, 0, scope._end_time - scope._start_time)
117
+
118
+ def process_agent_creation(agent_name, llm_config, system_message, pricing_info, server_port, server_address,
119
+ environment, application_name, metrics, start_time, span, capture_message_content=False,
120
+ disable_metrics=False, version="1.0.0", **kwargs):
121
+ """
122
+ Process agent creation and generate Telemetry
123
+ """
124
+
125
+ # Create scope object
126
+ scope = type("GenericScope", (), {})()
127
+
128
+ scope._start_time = start_time
129
+ scope._end_time = time.time()
130
+ scope._span = span
131
+ scope._agent_name = agent_name
132
+ scope._request_model = llm_config.get("model", "gpt-4o")
133
+ scope._response_model = scope._request_model
134
+ scope._system_message = system_message
135
+ scope._server_address, scope._server_port = server_address, server_port
136
+
137
+ common_agent_logic(scope, pricing_info, environment, application_name, metrics,
138
+ capture_message_content, disable_metrics, version, SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT)
139
+
140
+ def process_agent_run(response, agent_name, request_model, pricing_info, server_port, server_address,
141
+ environment, application_name, metrics, start_time, span, capture_message_content=False,
142
+ disable_metrics=False, version="1.0.0", **kwargs):
143
+ """
144
+ Process agent run and generate Telemetry
145
+ """
146
+
147
+ # Create scope object
148
+ scope = type("GenericScope", (), {})()
149
+
150
+ scope._start_time = start_time
151
+ scope._end_time = time.time()
152
+ scope._span = span
153
+ scope._agent_name = agent_name
154
+ scope._request_model = request_model
155
+ scope._chat_history = getattr(response, "chat_history", [])
156
+ scope._server_address, scope._server_port = server_address, server_port
157
+
158
+ # Calculate tokens and cost
159
+ scope._input_tokens, scope._output_tokens, scope._cost = calculate_tokens_and_cost(
160
+ response, request_model, pricing_info)
161
+
162
+ # Extract response model from cost data
163
+ try:
164
+ if hasattr(response, "cost") and response.cost is not None:
165
+ cost_data = response.cost.get("usage_including_cached_inference", {})
166
+ scope._response_model = list(cost_data.keys())[1] if len(cost_data) > 1 else request_model
167
+ else:
168
+ scope._response_model = request_model
169
+ except (AttributeError, IndexError, KeyError, TypeError):
170
+ scope._response_model = request_model
171
+
172
+ common_agent_logic(scope, pricing_info, environment, application_name, metrics,
173
+ capture_message_content, disable_metrics, version, SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK)
174
+
175
+ return response
@@ -1,4 +1,3 @@
1
- # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
1
  """Initializer of Auto Instrumentation of LangChain Functions"""
3
2
  from typing import Collection
4
3
  import importlib.metadata
@@ -6,41 +5,17 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
6
5
  from wrapt import wrap_function_wrapper
7
6
 
8
7
  from openlit.instrumentation.langchain.langchain import (
9
- general_wrap,
10
8
  hub,
11
9
  chat
12
10
  )
13
11
  from openlit.instrumentation.langchain.async_langchain import (
12
+ async_hub,
14
13
  async_chat
15
14
  )
16
15
 
17
16
  _instruments = ("langchain >= 0.1.20",)
18
17
 
19
18
  WRAPPED_METHODS = [
20
- {
21
- "package": "langchain_community.document_loaders.base",
22
- "object": "BaseLoader.load",
23
- "endpoint": "langchain.retrieve.load",
24
- "wrapper": general_wrap,
25
- },
26
- {
27
- "package": "langchain_community.document_loaders.base",
28
- "object": "BaseLoader.aload",
29
- "endpoint": "langchain.retrieve.load",
30
- "wrapper": general_wrap,
31
- },
32
- {
33
- "package": "langchain_text_splitters.base",
34
- "object": "TextSplitter.split_documents",
35
- "endpoint": "langchain.retrieve.split_documents",
36
- "wrapper": general_wrap,
37
- },
38
- {
39
- "package": "langchain_text_splitters.base",
40
- "object": "TextSplitter.create_documents",
41
- "endpoint": "langchain.retrieve.create_documents",
42
- "wrapper": general_wrap,
43
- },
44
19
  {
45
20
  "package": "langchain.hub",
46
21
  "object": "pull",
@@ -79,27 +54,29 @@ WRAPPED_METHODS = [
79
54
  },
80
55
  {
81
56
  "package": "langchain.chains.base",
82
- "object": "Chain.invoke",
57
+ "object": "Chain.ainvoke",
83
58
  "endpoint": "langchain.chain.invoke",
84
59
  "wrapper": async_chat,
85
60
  }
86
61
  ]
87
62
 
88
63
  class LangChainInstrumentor(BaseInstrumentor):
89
- """An instrumentor for Cohere's client library."""
64
+ """
65
+ An instrumentor for LangChain client library.
66
+ """
90
67
 
91
68
  def instrumentation_dependencies(self) -> Collection[str]:
92
69
  return _instruments
93
70
 
94
71
  def _instrument(self, **kwargs):
95
- application_name = kwargs.get("application_name")
96
- environment = kwargs.get("environment")
72
+ version = importlib.metadata.version("langchain")
73
+ environment = kwargs.get("environment", "default")
74
+ application_name = kwargs.get("application_name", "default")
97
75
  tracer = kwargs.get("tracer")
98
- pricing_info = kwargs.get("pricing_info")
99
- capture_message_content = kwargs.get("capture_message_content")
76
+ pricing_info = kwargs.get("pricing_info", {})
77
+ capture_message_content = kwargs.get("capture_message_content", False)
100
78
  metrics = kwargs.get("metrics_dict")
101
79
  disable_metrics = kwargs.get("disable_metrics")
102
- version = importlib.metadata.version("langchain")
103
80
 
104
81
  for wrapped_method in WRAPPED_METHODS:
105
82
  wrap_package = wrapped_method.get("package")
@@ -110,9 +87,8 @@ class LangChainInstrumentor(BaseInstrumentor):
110
87
  wrap_package,
111
88
  wrap_object,
112
89
  wrapper(gen_ai_endpoint, version, environment, application_name,
113
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
90
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
114
91
  )
115
92
 
116
- @staticmethod
117
93
  def _uninstrument(self, **kwargs):
118
94
  pass