openlit 1.34.3__py3-none-any.whl → 1.34.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,53 +13,52 @@ from openlit.instrumentation.ai21.async_ai21 import (
13
13
  async_chat, async_chat_rag
14
14
  )
15
15
 
16
- _instruments = ('ai21 >= 3.0.0',)
16
+ _instruments = ("ai21 >= 3.0.0",)
17
17
 
18
18
  class AI21Instrumentor(BaseInstrumentor):
19
19
  """
20
- An instrumentor for AI21's client library.
20
+ An instrumentor for AI21 client library.
21
21
  """
22
22
 
23
23
  def instrumentation_dependencies(self) -> Collection[str]:
24
24
  return _instruments
25
25
 
26
26
  def _instrument(self, **kwargs):
27
- application_name = kwargs.get('application_name', 'default')
28
- environment = kwargs.get('environment', 'default')
29
- tracer = kwargs.get('tracer')
30
- event_provider = kwargs.get('event_provider')
31
- metrics = kwargs.get('metrics_dict')
32
- pricing_info = kwargs.get('pricing_info', {})
33
- capture_message_content = kwargs.get('capture_message_content', False)
34
- disable_metrics = kwargs.get('disable_metrics')
35
- version = importlib.metadata.version('ai21')
27
+ application_name = kwargs.get("application_name", "default")
28
+ environment = kwargs.get("environment", "default")
29
+ tracer = kwargs.get("tracer")
30
+ metrics = kwargs.get("metrics_dict")
31
+ pricing_info = kwargs.get("pricing_info", {})
32
+ capture_message_content = kwargs.get("capture_message_content", False)
33
+ disable_metrics = kwargs.get("disable_metrics")
34
+ version = importlib.metadata.version("ai21")
36
35
 
37
36
  #sync
38
37
  wrap_function_wrapper(
39
- 'ai21.clients.studio.resources.chat.chat_completions',
40
- 'ChatCompletions.create',
38
+ "ai21.clients.studio.resources.chat.chat_completions",
39
+ "ChatCompletions.create",
41
40
  chat(version, environment, application_name,
42
- tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
41
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
43
42
  )
44
43
  wrap_function_wrapper(
45
- 'ai21.clients.studio.resources.studio_conversational_rag',
46
- 'StudioConversationalRag.create',
44
+ "ai21.clients.studio.resources.studio_conversational_rag",
45
+ "StudioConversationalRag.create",
47
46
  chat_rag(version, environment, application_name,
48
- tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
47
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
49
48
  )
50
49
 
51
50
  #Async
52
51
  wrap_function_wrapper(
53
- 'ai21.clients.studio.resources.chat.async_chat_completions',
54
- 'AsyncChatCompletions.create',
52
+ "ai21.clients.studio.resources.chat.async_chat_completions",
53
+ "AsyncChatCompletions.create",
55
54
  async_chat(version, environment, application_name,
56
- tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
55
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
57
56
  )
58
57
  wrap_function_wrapper(
59
- 'ai21.clients.studio.resources.studio_conversational_rag',
60
- 'AsyncStudioConversationalRag.create',
58
+ "ai21.clients.studio.resources.studio_conversational_rag",
59
+ "AsyncStudioConversationalRag.create",
61
60
  async_chat_rag(version, environment, application_name,
62
- tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
61
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
63
62
  )
64
63
 
65
64
  def _uninstrument(self, **kwargs):
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvention
22
22
  logger = logging.getLogger(__name__)
23
23
 
24
24
  def chat(version, environment, application_name,
25
- tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics):
25
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
26
26
  """
27
27
  Generates a telemetry wrapper for GenAI function call
28
28
  """
@@ -46,9 +46,9 @@ def chat(version, environment, application_name,
46
46
  self._span = span
47
47
  self._span_name = span_name
48
48
  # Placeholder for aggregating streaming response
49
- self._llmresponse = ''
50
- self._response_id = ''
51
- self._finish_reason = ''
49
+ self._llmresponse = ""
50
+ self._response_id = ""
51
+ self._finish_reason = ""
52
52
  self._input_tokens = 0
53
53
  self._output_tokens = 0
54
54
  self._choices = []
@@ -92,14 +92,13 @@ def chat(version, environment, application_name,
92
92
  environment=environment,
93
93
  application_name=application_name,
94
94
  metrics=metrics,
95
- event_provider=event_provider,
96
95
  capture_message_content=capture_message_content,
97
96
  disable_metrics=disable_metrics,
98
97
  version=version
99
98
  )
100
99
  except Exception as e:
101
100
  handle_exception(self._span, e)
102
- logger.error('Error in trace creation: %s', e)
101
+ logger.error("Error in trace creation: %s", e)
103
102
  raise
104
103
 
105
104
  def wrapper(wrapped, instance, args, kwargs):
@@ -108,12 +107,12 @@ def chat(version, environment, application_name,
108
107
  """
109
108
 
110
109
  # Check if streaming is enabled for the API call
111
- streaming = kwargs.get('stream', False)
110
+ streaming = kwargs.get("stream", False)
112
111
 
113
- server_address, server_port = set_server_address_and_port(instance, 'api.ai21.com', 443)
114
- request_model = kwargs.get('model', 'jamba-1.5-mini')
112
+ server_address, server_port = set_server_address_and_port(instance, "api.ai21.com", 443)
113
+ request_model = kwargs.get("model", "jamba-1.5-mini")
115
114
 
116
- span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
115
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
117
116
 
118
117
  # pylint: disable=no-else-return
119
118
  if streaming:
@@ -127,30 +126,34 @@ def chat(version, environment, application_name,
127
126
  with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
128
127
  start_time = time.time()
129
128
  response = wrapped(*args, **kwargs)
130
- response = process_chat_response(
131
- response=response,
132
- request_model=request_model,
133
- pricing_info=pricing_info,
134
- server_port=server_port,
135
- server_address=server_address,
136
- environment=environment,
137
- application_name=application_name,
138
- metrics=metrics,
139
- event_provider=event_provider,
140
- start_time=start_time,
141
- span=span,
142
- capture_message_content=capture_message_content,
143
- disable_metrics=disable_metrics,
144
- version=version,
145
- **kwargs
146
- )
147
129
 
148
- return response
130
+ try:
131
+ response = process_chat_response(
132
+ response=response,
133
+ request_model=request_model,
134
+ pricing_info=pricing_info,
135
+ server_port=server_port,
136
+ server_address=server_address,
137
+ environment=environment,
138
+ application_name=application_name,
139
+ metrics=metrics,
140
+ start_time=start_time,
141
+ span=span,
142
+ capture_message_content=capture_message_content,
143
+ disable_metrics=disable_metrics,
144
+ version=version,
145
+ **kwargs
146
+ )
147
+
148
+ except Exception as e:
149
+ handle_exception(span, e)
150
+
151
+ return response
149
152
 
150
153
  return wrapper
151
154
 
152
155
  def chat_rag(version, environment, application_name,
153
- tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics):
156
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
154
157
  """
155
158
  Generates a telemetry wrapper for GenAI function call
156
159
  """
@@ -160,32 +163,36 @@ def chat_rag(version, environment, application_name,
160
163
  Wraps the GenAI function call.
161
164
  """
162
165
 
163
- server_address, server_port = set_server_address_and_port(instance, 'api.ai21.com', 443)
164
- request_model = kwargs.get('model', 'jamba-1.5-mini')
166
+ server_address, server_port = set_server_address_and_port(instance, "api.ai21.com", 443)
167
+ request_model = kwargs.get("model", "jamba-1.5-mini")
165
168
 
166
- span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
169
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
167
170
 
168
171
  with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
169
172
  start_time = time.time()
170
173
  response = wrapped(*args, **kwargs)
171
- response = process_chat_rag_response(
172
- response=response,
173
- request_model=request_model,
174
- pricing_info=pricing_info,
175
- server_port=server_port,
176
- server_address=server_address,
177
- environment=environment,
178
- application_name=application_name,
179
- metrics=metrics,
180
- event_provider=event_provider,
181
- start_time=start_time,
182
- span=span,
183
- capture_message_content=capture_message_content,
184
- disable_metrics=disable_metrics,
185
- version=version,
186
- **kwargs
187
- )
188
-
189
- return response
174
+
175
+ try:
176
+ response = process_chat_rag_response(
177
+ response=response,
178
+ request_model=request_model,
179
+ pricing_info=pricing_info,
180
+ server_port=server_port,
181
+ server_address=server_address,
182
+ environment=environment,
183
+ application_name=application_name,
184
+ metrics=metrics,
185
+ start_time=start_time,
186
+ span=span,
187
+ capture_message_content=capture_message_content,
188
+ disable_metrics=disable_metrics,
189
+ version=version,
190
+ **kwargs
191
+ )
192
+
193
+ except Exception as e:
194
+ handle_exception(span, e)
195
+
196
+ return response
190
197
 
191
198
  return wrapper
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvention
22
22
  logger = logging.getLogger(__name__)
23
23
 
24
24
  def async_chat(version, environment, application_name,
25
- tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics):
25
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
26
26
  """
27
27
  Generates a telemetry wrapper for GenAI function call
28
28
  """
@@ -46,9 +46,9 @@ def async_chat(version, environment, application_name,
46
46
  self._span = span
47
47
  self._span_name = span_name
48
48
  # Placeholder for aggregating streaming response
49
- self._llmresponse = ''
50
- self._response_id = ''
51
- self._finish_reason = ''
49
+ self._llmresponse = ""
50
+ self._response_id = ""
51
+ self._finish_reason = ""
52
52
  self._input_tokens = 0
53
53
  self._output_tokens = 0
54
54
  self._choices = []
@@ -92,14 +92,13 @@ def async_chat(version, environment, application_name,
92
92
  environment=environment,
93
93
  application_name=application_name,
94
94
  metrics=metrics,
95
- event_provider=event_provider,
96
95
  capture_message_content=capture_message_content,
97
96
  disable_metrics=disable_metrics,
98
97
  version=version
99
98
  )
100
99
  except Exception as e:
101
100
  handle_exception(self._span, e)
102
- logger.error('Error in trace creation: %s', e)
101
+
103
102
  raise
104
103
 
105
104
  async def wrapper(wrapped, instance, args, kwargs):
@@ -108,12 +107,12 @@ def async_chat(version, environment, application_name,
108
107
  """
109
108
 
110
109
  # Check if streaming is enabled for the API call
111
- streaming = kwargs.get('stream', False)
110
+ streaming = kwargs.get("stream", False)
112
111
 
113
- server_address, server_port = set_server_address_and_port(instance, 'api.ai21.com', 443)
114
- request_model = kwargs.get('model', 'jamba-1.5-mini')
112
+ server_address, server_port = set_server_address_and_port(instance, "api.ai21.com", 443)
113
+ request_model = kwargs.get("model", "jamba-1.5-mini")
115
114
 
116
- span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
115
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
117
116
 
118
117
  # pylint: disable=no-else-return
119
118
  if streaming:
@@ -127,30 +126,34 @@ def async_chat(version, environment, application_name,
127
126
  with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
128
127
  start_time = time.time()
129
128
  response = await wrapped(*args, **kwargs)
130
- response = process_chat_response(
131
- response=response,
132
- request_model=request_model,
133
- pricing_info=pricing_info,
134
- server_port=server_port,
135
- server_address=server_address,
136
- environment=environment,
137
- application_name=application_name,
138
- metrics=metrics,
139
- event_provider=event_provider,
140
- start_time=start_time,
141
- span=span,
142
- capture_message_content=capture_message_content,
143
- disable_metrics=disable_metrics,
144
- version=version,
145
- **kwargs
146
- )
147
129
 
148
- return response
130
+ try:
131
+ response = process_chat_response(
132
+ response=response,
133
+ request_model=request_model,
134
+ pricing_info=pricing_info,
135
+ server_port=server_port,
136
+ server_address=server_address,
137
+ environment=environment,
138
+ application_name=application_name,
139
+ metrics=metrics,
140
+ start_time=start_time,
141
+ span=span,
142
+ capture_message_content=capture_message_content,
143
+ disable_metrics=disable_metrics,
144
+ version=version,
145
+ **kwargs
146
+ )
147
+
148
+ except Exception as e:
149
+ handle_exception(span, e)
150
+
151
+ return response
149
152
 
150
153
  return wrapper
151
154
 
152
155
  def async_chat_rag(version, environment, application_name,
153
- tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics):
156
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
154
157
  """
155
158
  Generates a telemetry wrapper for GenAI function call
156
159
  """
@@ -160,32 +163,36 @@ def async_chat_rag(version, environment, application_name,
160
163
  Wraps the GenAI function call.
161
164
  """
162
165
 
163
- server_address, server_port = set_server_address_and_port(instance, 'api.ai21.com', 443)
164
- request_model = kwargs.get('model', 'jamba-1.5-mini')
166
+ server_address, server_port = set_server_address_and_port(instance, "api.ai21.com", 443)
167
+ request_model = kwargs.get("model", "jamba-1.5-mini")
165
168
 
166
- span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
169
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
167
170
 
168
171
  with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
169
172
  start_time = time.time()
170
173
  response = await wrapped(*args, **kwargs)
171
- response = process_chat_rag_response(
172
- response=response,
173
- request_model=request_model,
174
- pricing_info=pricing_info,
175
- server_port=server_port,
176
- server_address=server_address,
177
- environment=environment,
178
- application_name=application_name,
179
- metrics=metrics,
180
- event_provider=event_provider,
181
- start_time=start_time,
182
- span=span,
183
- capture_message_content=capture_message_content,
184
- disable_metrics=disable_metrics,
185
- version=version,
186
- **kwargs
187
- )
188
-
189
- return response
174
+
175
+ try:
176
+ response = process_chat_rag_response(
177
+ response=response,
178
+ request_model=request_model,
179
+ pricing_info=pricing_info,
180
+ server_port=server_port,
181
+ server_address=server_address,
182
+ environment=environment,
183
+ application_name=application_name,
184
+ metrics=metrics,
185
+ start_time=start_time,
186
+ span=span,
187
+ capture_message_content=capture_message_content,
188
+ disable_metrics=disable_metrics,
189
+ version=version,
190
+ **kwargs
191
+ )
192
+
193
+ except Exception as e:
194
+ handle_exception(span, e)
195
+
196
+ return response
190
197
 
191
198
  return wrapper
@@ -14,9 +14,7 @@ from openlit.__helpers import (
14
14
  general_tokens,
15
15
  extract_and_format_input,
16
16
  get_chat_model_cost,
17
- handle_exception,
18
17
  create_metrics_attributes,
19
- otel_event,
20
18
  concatenate_all_contents
21
19
  )
22
20
  from openlit.semcov import SemanticConvention
@@ -29,36 +27,38 @@ def setup_common_span_attributes(span, request_model, kwargs, tokens,
29
27
  """
30
28
 
31
29
  # Base attributes from SDK and operation settings.
32
- span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
30
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
33
31
  span.set_attribute(SemanticConvention.GEN_AI_OPERATION, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT)
34
32
  span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_AI21)
35
33
  span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
36
34
  span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
37
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, kwargs.get('seed', ''))
38
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY, kwargs.get('frequency_penalty', 0.0))
39
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, kwargs.get('max_tokens', -1))
40
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, kwargs.get('presence_penalty', 0.0))
41
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, kwargs.get('stop', []))
42
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, kwargs.get('temperature', 0.4))
43
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, kwargs.get('top_p', 1.0))
35
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, kwargs.get("seed", ""))
36
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY, kwargs.get("frequency_penalty", 0.0))
37
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, kwargs.get("max_tokens", -1))
38
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, kwargs.get("presence_penalty", 0.0))
39
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, kwargs.get("stop", []))
40
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, kwargs.get("temperature", 0.4))
41
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, kwargs.get("top_p", 1.0))
44
42
 
45
43
  # Add token-related attributes if available.
46
- if 'finish_reason' in tokens:
47
- span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [tokens['finish_reason']])
48
- if 'response_id' in tokens:
49
- span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, tokens['response_id'])
50
- if 'input_tokens' in tokens:
51
- span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, tokens['input_tokens'])
52
- if 'output_tokens' in tokens:
53
- span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, tokens['output_tokens'])
54
- if 'total_tokens' in tokens:
55
- span.set_attribute(SemanticConvention.GEN_AI_USAGE_TOTAL_TOKENS, tokens['total_tokens'])
44
+ if "finish_reason" in tokens:
45
+ span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [tokens["finish_reason"]])
46
+ if "response_id" in tokens:
47
+ span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, tokens["response_id"])
48
+ if "input_tokens" in tokens:
49
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, tokens["input_tokens"])
50
+ if "output_tokens" in tokens:
51
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, tokens["output_tokens"])
52
+ if "total_tokens" in tokens:
53
+ span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, tokens["total_tokens"])
56
54
 
57
55
  span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, request_model)
58
56
  span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
57
+
59
58
  # Environment and service identifiers.
60
59
  span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
61
60
  span.set_attribute(SERVICE_NAME, application_name)
61
+
62
62
  # Set any extra attributes passed in.
63
63
  for key, value in extra_attrs.items():
64
64
  span.set_attribute(key, value)
@@ -80,106 +80,15 @@ def record_common_metrics(metrics, application_name, environment, request_model,
80
80
  server_port=server_port,
81
81
  response_model=request_model,
82
82
  )
83
- metrics['genai_client_usage_tokens'].record(input_tokens + output_tokens, attributes)
84
- metrics['genai_client_operation_duration'].record(end_time - start_time, attributes)
83
+ metrics["genai_client_usage_tokens"].record(input_tokens + output_tokens, attributes)
84
+ metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
85
85
  if include_tbt and tbt_value is not None:
86
- metrics['genai_server_tbt'].record(tbt_value, attributes)
87
- metrics['genai_server_ttft'].record(end_time - start_time, attributes)
88
- metrics['genai_requests'].add(1, attributes)
89
- metrics['genai_completion_tokens'].add(output_tokens, attributes)
90
- metrics['genai_prompt_tokens'].add(input_tokens, attributes)
91
- metrics['genai_cost'].record(cost, attributes)
92
-
93
- def emit_common_events(event_provider, choices, finish_reason, llmresponse, formatted_messages,
94
- capture_message_content, n):
95
- """
96
- Emit events common to both chat and chat rag operations.
97
- """
98
-
99
- if n > 1:
100
- for choice in choices:
101
- choice_event_body = {
102
- 'finish_reason': finish_reason,
103
- 'index': choice.get('index', 0),
104
- 'message': {
105
- **({'content': choice.get('message', {}).get('content', '')} if capture_message_content else {}),
106
- 'role': choice.get('message', {}).get('role', 'assistant')
107
- }
108
- }
109
- # If tool calls exist, emit an event for each tool call.
110
- tool_calls = choice.get('message', {}).get('tool_calls')
111
- if tool_calls:
112
- for tool_call in tool_calls:
113
- choice_event_body['message'].update({
114
- 'tool_calls': {
115
- 'function': {
116
- 'name': tool_call.get('function', {}).get('name', ''),
117
- 'arguments': tool_call.get('function', {}).get('arguments', '')
118
- },
119
- 'id': tool_call.get('id', ''),
120
- 'type': tool_call.get('type', 'function')
121
- }
122
- })
123
- event = otel_event(
124
- name=SemanticConvention.GEN_AI_CHOICE,
125
- attributes={SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AI21},
126
- body=choice_event_body
127
- )
128
- event_provider.emit(event)
129
- else:
130
- event = otel_event(
131
- name=SemanticConvention.GEN_AI_CHOICE,
132
- attributes={SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AI21},
133
- body=choice_event_body
134
- )
135
- event_provider.emit(event)
136
- else:
137
- # Single choice case.
138
- choice_event_body = {
139
- 'finish_reason': finish_reason,
140
- 'index': 0,
141
- 'message': {
142
- **({'content': llmresponse} if capture_message_content else {}),
143
- 'role': 'assistant'
144
- }
145
- }
146
- event = otel_event(
147
- name=SemanticConvention.GEN_AI_CHOICE,
148
- attributes={SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AI21},
149
- body=choice_event_body
150
- )
151
- event_provider.emit(event)
152
-
153
- # Emit additional role-based events (if formatted messages are available).
154
- for role in ['user', 'system', 'assistant', 'tool']:
155
- msg = formatted_messages.get(role, {})
156
- if msg.get('content', ''):
157
- event_body = {
158
- **({'content': msg.get('content', '')} if capture_message_content else {}),
159
- 'role': msg.get('role', [])
160
- }
161
- # For assistant messages, attach tool call details if they exist.
162
- if role == 'assistant' and choices:
163
- tool_calls = choices[0].get('message', {}).get('tool_calls', [])
164
- if tool_calls:
165
- event_body['tool_calls'] = {
166
- 'function': {
167
- 'name': tool_calls[0].get('function', {}).get('name', ''),
168
- 'arguments': tool_calls[0].get('function', {}).get('arguments', '')
169
- },
170
- 'id': tool_calls[0].get('id', ''),
171
- 'type': 'function'
172
- }
173
- if role == 'tool' and choices:
174
- tool_calls = choices[0].get('message', {}).get('tool_calls', [])
175
- if tool_calls:
176
- event_body['id'] = tool_calls[0].get('id', '')
177
- event = otel_event(
178
- name=getattr(SemanticConvention, f'GEN_AI_{role.upper()}_MESSAGE'),
179
- attributes={SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AI21},
180
- body=event_body
181
- )
182
- event_provider.emit(event)
86
+ metrics["genai_server_tbt"].record(tbt_value, attributes)
87
+ metrics["genai_server_ttft"].record(end_time - start_time, attributes)
88
+ metrics["genai_requests"].add(1, attributes)
89
+ metrics["genai_completion_tokens"].add(output_tokens, attributes)
90
+ metrics["genai_prompt_tokens"].add(input_tokens, attributes)
91
+ metrics["genai_cost"].record(cost, attributes)
183
92
 
184
93
  def process_chunk(self, chunk):
185
94
  """
@@ -194,21 +103,20 @@ def process_chunk(self, chunk):
194
103
  self._ttft = calculate_ttft(self._timestamps, self._start_time)
195
104
 
196
105
  chunked = response_as_dict(chunk)
197
- if (len(chunked.get('choices')) > 0 and
198
- 'delta' in chunked.get('choices')[0] and
199
- 'content' in chunked.get('choices')[0].get('delta')):
200
- content = chunked.get('choices')[0].get('delta').get('content')
201
- if content:
106
+ if (len(chunked.get("choices")) > 0 and
107
+ "delta" in chunked.get("choices")[0] and
108
+ "content" in chunked.get("choices")[0].get("delta")):
109
+ if content := chunked.get("choices")[0].get("delta").get("content"):
202
110
  self._llmresponse += content
203
- if chunked.get('usage'):
204
- self._input_tokens = chunked.get('usage').get('prompt_tokens')
205
- self._output_tokens = chunked.get('usage').get('completion_tokens')
206
- self._response_id = chunked.get('id')
207
- self._choices += chunked.get('choices')
208
- self._finish_reason = chunked.get('choices')[0].get('finish_reason')
111
+ if chunked.get("usage"):
112
+ self._input_tokens = chunked.get("usage").get("prompt_tokens")
113
+ self._output_tokens = chunked.get("usage").get("completion_tokens")
114
+ self._response_id = chunked.get("id")
115
+ self._choices += chunked.get("choices")
116
+ self._finish_reason = chunked.get("choices")[0].get("finish_reason")
209
117
 
210
118
  def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
211
- event_provider, capture_message_content, disable_metrics, version, is_stream):
119
+ capture_message_content, disable_metrics, version, is_stream):
212
120
  """
213
121
  Process chat request and generate Telemetry.
214
122
  """
@@ -218,19 +126,19 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
218
126
  scope._tbt = calculate_tbt(scope._timestamps)
219
127
 
220
128
  # Extract and format input messages.
221
- formatted_messages = extract_and_format_input(scope._kwargs.get('messages', ''))
129
+ formatted_messages = extract_and_format_input(scope._kwargs.get("messages", ""))
222
130
  prompt = concatenate_all_contents(formatted_messages)
223
- request_model = scope._kwargs.get('model', 'jamba-1.5-mini')
131
+ request_model = scope._kwargs.get("model", "jamba-1.5-mini")
224
132
 
225
133
  # Calculate cost based on token usage.
226
134
  cost = get_chat_model_cost(request_model, pricing_info, scope._input_tokens, scope._output_tokens)
227
135
  # Prepare tokens dictionary.
228
136
  tokens = {
229
- 'finish_reason': scope._finish_reason,
230
- 'response_id': scope._response_id,
231
- 'input_tokens': scope._input_tokens,
232
- 'output_tokens': scope._output_tokens,
233
- 'total_tokens': scope._input_tokens + scope._output_tokens,
137
+ "finish_reason": scope._finish_reason,
138
+ "response_id": scope._response_id,
139
+ "input_tokens": scope._input_tokens,
140
+ "output_tokens": scope._output_tokens,
141
+ "total_tokens": scope._input_tokens + scope._output_tokens,
234
142
  }
235
143
  extra_attrs = {
236
144
  SemanticConvention.GEN_AI_REQUEST_IS_STREAM: is_stream,
@@ -239,14 +147,13 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
239
147
  SemanticConvention.GEN_AI_SERVER_TBT: scope._tbt,
240
148
  SemanticConvention.GEN_AI_SERVER_TTFT: scope._ttft,
241
149
  SemanticConvention.GEN_AI_SDK_VERSION: version,
242
- SemanticConvention.GEN_AI_OUTPUT_TYPE: 'text' if isinstance(scope._llmresponse, str) else 'json'
150
+ SemanticConvention.GEN_AI_OUTPUT_TYPE: "text" if isinstance(scope._llmresponse, str) else "json"
243
151
  }
244
152
  # Set span attributes.
245
153
  setup_common_span_attributes(scope._span, request_model, scope._kwargs, tokens,
246
154
  scope._server_port, scope._server_address, environment,
247
155
  application_name, extra_attrs)
248
156
 
249
- # Optionally add events capturing the prompt and completion.
250
157
  if capture_message_content:
251
158
  scope._span.add_event(
252
159
  name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
@@ -257,11 +164,6 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
257
164
  attributes={SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._llmresponse},
258
165
  )
259
166
 
260
- # Emit events for each choice and message role.
261
- n = scope._kwargs.get('n', 1)
262
- emit_common_events(event_provider, scope._choices, scope._finish_reason, scope._llmresponse,
263
- formatted_messages, capture_message_content, n)
264
-
265
167
  scope._span.set_status(Status(StatusCode.OK))
266
168
 
267
169
  if not disable_metrics:
@@ -272,23 +174,23 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
272
174
  include_tbt=True, tbt_value=scope._tbt)
273
175
 
274
176
  def process_streaming_chat_response(self, pricing_info, environment, application_name, metrics,
275
- event_provider, capture_message_content=False, disable_metrics=False, version=''):
177
+ capture_message_content=False, disable_metrics=False, version=""):
276
178
  """
277
179
  Process a streaming chat response and generate Telemetry.
278
180
  """
279
181
 
280
182
  common_chat_logic(self, pricing_info, environment, application_name, metrics,
281
- event_provider, capture_message_content, disable_metrics, version, is_stream=True)
183
+ capture_message_content, disable_metrics, version, is_stream=True)
282
184
 
283
185
  def process_chat_response(response, request_model, pricing_info, server_port, server_address,
284
- environment, application_name, metrics, event_provider, start_time,
285
- span, capture_message_content=False, disable_metrics=False, version='1.0.0', **kwargs):
186
+ environment, application_name, metrics, start_time,
187
+ span, capture_message_content=False, disable_metrics=False, version="1.0.0", **kwargs):
286
188
  """
287
189
  Process a synchronous chat response and generate Telemetry.
288
190
  """
289
191
 
290
192
  # Create a generic scope object to hold telemetry data.
291
- self = type('GenericScope', (), {})()
193
+ self = type("GenericScope", (), {})()
292
194
  response_dict = response_as_dict(response)
293
195
 
294
196
  # pylint: disable = no-member
@@ -297,113 +199,102 @@ def process_chat_response(response, request_model, pricing_info, server_port, se
297
199
 
298
200
  self._span = span
299
201
  # Concatenate content from all choices.
300
- self._llmresponse = ''.join(
301
- (choice.get('message', {}).get('content') or '')
302
- for choice in response_dict.get('choices', [])
202
+ self._llmresponse = "".join(
203
+ (choice.get("message", {}).get("content") or "")
204
+ for choice in response_dict.get("choices", [])
303
205
  )
304
- self._response_role = response_dict.get('message', {}).get('role', 'assistant')
305
- self._input_tokens = response_dict.get('usage', {}).get('prompt_tokens', 0)
306
- self._output_tokens = response_dict.get('usage', {}).get('completion_tokens', 0)
307
- self._response_id = response_dict.get('id', '')
206
+ self._response_role = response_dict.get("message", {}).get("role", "assistant")
207
+ self._input_tokens = response_dict.get("usage", {}).get("prompt_tokens", 0)
208
+ self._output_tokens = response_dict.get("usage", {}).get("completion_tokens", 0)
209
+ self._response_id = response_dict.get("id", "")
308
210
  self._response_model = request_model
309
- self._finish_reason = response_dict.get('choices', [{}])[0].get('finish_reason')
211
+ self._finish_reason = response_dict.get("choices", [{}])[0].get("finish_reason")
310
212
  self._timestamps = []
311
213
  self._ttft, self._tbt = self._end_time - self._start_time, 0
312
214
  self._server_address, self._server_port = server_address, server_port
313
215
  self._kwargs = kwargs
314
- self._choices = response_dict.get('choices')
216
+ self._choices = response_dict.get("choices")
315
217
 
316
218
  common_chat_logic(self, pricing_info, environment, application_name, metrics,
317
- event_provider, capture_message_content, disable_metrics, version, is_stream=False)
219
+ capture_message_content, disable_metrics, version, is_stream=False)
318
220
 
319
221
  return response
320
222
 
321
223
  def process_chat_rag_response(response, request_model, pricing_info, server_port, server_address,
322
- environment, application_name, metrics, event_provider, start_time,
323
- span, capture_message_content=False, disable_metrics=False, version='1.0.0', **kwargs):
224
+ environment, application_name, metrics, start_time,
225
+ span, capture_message_content=False, disable_metrics=False, version="1.0.0", **kwargs):
324
226
  """
325
227
  Process a chat response and generate Telemetry.
326
228
  """
327
229
  end_time = time.time()
328
230
  response_dict = response_as_dict(response)
329
- try:
330
- # Format input messages into a single prompt string.
331
- messages_input = kwargs.get('messages', '')
332
- formatted_messages = extract_and_format_input(messages_input)
333
- prompt = concatenate_all_contents(formatted_messages)
334
- input_tokens = general_tokens(prompt)
335
-
336
- # Create tokens dict and RAG-specific extra attributes.
337
- tokens = {'response_id': response_dict.get('id'), 'input_tokens': input_tokens}
338
- extra_attrs = {
339
- SemanticConvention.GEN_AI_REQUEST_IS_STREAM: False,
340
- SemanticConvention.GEN_AI_SERVER_TTFT: end_time - start_time,
341
- SemanticConvention.GEN_AI_SDK_VERSION: version,
342
- SemanticConvention.GEN_AI_RAG_MAX_SEGMENTS: kwargs.get('max_segments', -1),
343
- SemanticConvention.GEN_AI_RAG_STRATEGY: kwargs.get('retrieval_strategy', 'segments'),
344
- SemanticConvention.GEN_AI_RAG_SIMILARITY_THRESHOLD: kwargs.get('retrieval_similarity_threshold', -1),
345
- SemanticConvention.GEN_AI_RAG_MAX_NEIGHBORS: kwargs.get('max_neighbors', -1),
346
- SemanticConvention.GEN_AI_RAG_FILE_IDS: str(kwargs.get('file_ids', '')),
347
- SemanticConvention.GEN_AI_RAG_DOCUMENTS_PATH: kwargs.get('path', '')
348
- }
349
- # Set common span attributes.
350
- setup_common_span_attributes(span, request_model, kwargs, tokens,
351
- server_port, server_address, environment, application_name,
352
- extra_attrs)
353
-
354
- # Record the prompt event if requested.
355
- if capture_message_content:
356
- span.add_event(
357
- name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
358
- attributes={SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt},
359
- )
360
-
361
- output_tokens = 0
362
- choices = response_dict.get('choices', [])
363
- # Instead of adding a separate event per choice, we aggregate all completion content.
364
- aggregated_completion = []
365
- for i in range(kwargs.get('n', 1)):
366
- # Get the response content from each choice and count tokens.
367
- content = choices[i].get('content', '')
368
- aggregated_completion.append(content)
369
- output_tokens += general_tokens(content)
370
- if kwargs.get('tools'):
371
- span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALLS,
372
- str(choices[i].get('message', {}).get('tool_calls')))
373
- # Set output type based on actual content type.
374
- if isinstance(content, str):
375
- span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, 'text')
376
- elif content is not None:
377
- span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, 'json')
378
-
379
- # Concatenate completion responses.
380
- llmresponse = ''.join(aggregated_completion)
381
- tokens['output_tokens'] = output_tokens
382
- tokens['total_tokens'] = input_tokens + output_tokens
383
-
384
- cost = get_chat_model_cost(request_model, pricing_info, input_tokens, output_tokens)
385
- span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
386
- span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
387
- span.set_attribute(SemanticConvention.GEN_AI_USAGE_TOTAL_TOKENS, input_tokens + output_tokens)
388
-
389
- span.set_status(Status(StatusCode.OK))
390
- # Emit a single aggregated completion event.
391
- if capture_message_content:
392
- span.add_event(
393
- name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
394
- attributes={SemanticConvention.GEN_AI_CONTENT_COMPLETION: llmresponse},
395
- )
396
- # Emit the rest of the events (choice and role-based events) as before.
397
- n = kwargs.get('n', 1)
398
- emit_common_events(event_provider, choices, choices[0].get('finish_reason', ''),
399
- llmresponse, formatted_messages, capture_message_content, n)
400
-
401
- if not disable_metrics:
402
- record_common_metrics(metrics, application_name, environment, request_model,
403
- server_address, server_port, start_time, end_time,
404
- input_tokens, output_tokens, cost, include_tbt=False)
405
- return response
406
-
407
- except Exception as e:
408
- handle_exception(span, e)
409
- return response
231
+ # Format input messages into a single prompt string.
232
+ messages_input = kwargs.get("messages", "")
233
+ formatted_messages = extract_and_format_input(messages_input)
234
+ prompt = concatenate_all_contents(formatted_messages)
235
+ input_tokens = general_tokens(prompt)
236
+
237
+ # Create tokens dict and RAG-specific extra attributes.
238
+ tokens = {"response_id": response_dict.get("id"), "input_tokens": input_tokens}
239
+ extra_attrs = {
240
+ SemanticConvention.GEN_AI_REQUEST_IS_STREAM: False,
241
+ SemanticConvention.GEN_AI_SERVER_TTFT: end_time - start_time,
242
+ SemanticConvention.GEN_AI_SDK_VERSION: version,
243
+ SemanticConvention.GEN_AI_RAG_MAX_SEGMENTS: kwargs.get("max_segments", -1),
244
+ SemanticConvention.GEN_AI_RAG_STRATEGY: kwargs.get("retrieval_strategy", "segments"),
245
+ SemanticConvention.GEN_AI_RAG_SIMILARITY_THRESHOLD: kwargs.get("retrieval_similarity_threshold", -1),
246
+ SemanticConvention.GEN_AI_RAG_MAX_NEIGHBORS: kwargs.get("max_neighbors", -1),
247
+ SemanticConvention.GEN_AI_RAG_FILE_IDS: str(kwargs.get("file_ids", "")),
248
+ SemanticConvention.GEN_AI_RAG_DOCUMENTS_PATH: kwargs.get("path", "")
249
+ }
250
+ # Set common span attributes.
251
+ setup_common_span_attributes(span, request_model, kwargs, tokens,
252
+ server_port, server_address, environment, application_name,
253
+ extra_attrs)
254
+
255
+ if capture_message_content:
256
+ span.add_event(
257
+ name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
258
+ attributes={SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt},
259
+ )
260
+
261
+ output_tokens = 0
262
+ choices = response_dict.get("choices", [])
263
+ aggregated_completion = []
264
+ for i in range(kwargs.get("n", 1)):
265
+ # Get the response content from each choice and count tokens.
266
+ content = choices[i].get("content", "")
267
+ aggregated_completion.append(content)
268
+ output_tokens += general_tokens(content)
269
+ if kwargs.get("tools"):
270
+ span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALLS,
271
+ str(choices[i].get("message", {}).get("tool_calls")))
272
+ # Set output type based on actual content type.
273
+ if isinstance(content, str):
274
+ span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "text")
275
+ elif content is not None:
276
+ span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "json")
277
+
278
+ # Concatenate completion responses.
279
+ llmresponse = "".join(aggregated_completion)
280
+ tokens["output_tokens"] = output_tokens
281
+ tokens["total_tokens"] = input_tokens + output_tokens
282
+
283
+ cost = get_chat_model_cost(request_model, pricing_info, input_tokens, output_tokens)
284
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
285
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
286
+ span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, input_tokens + output_tokens)
287
+
288
+ span.set_status(Status(StatusCode.OK))
289
+
290
+ if capture_message_content:
291
+ span.add_event(
292
+ name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
293
+ attributes={SemanticConvention.GEN_AI_CONTENT_COMPLETION: llmresponse},
294
+ )
295
+
296
+ if not disable_metrics:
297
+ record_common_metrics(metrics, application_name, environment, request_model,
298
+ server_address, server_port, start_time, end_time,
299
+ input_tokens, output_tokens, cost, include_tbt=False)
300
+ return response
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: openlit
3
- Version: 1.34.3
3
+ Version: 1.34.4
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  License: Apache-2.0
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -14,10 +14,10 @@ openlit/guard/sensitive_topic.py,sha256=RgVw_laFERv0nNdzBsAd2_3yLomMOK-gVq-P7oj1
14
14
  openlit/guard/utils.py,sha256=6hE3rCRjFXYjKRQYUo8YsqUSlvod48nOWp8MwoQEYdw,7670
15
15
  openlit/instrumentation/ag2/__init__.py,sha256=KgyLJBmwAxRWu7Z0S8FDDK4TZ13EFoAAIalvG5Oq4wc,1839
16
16
  openlit/instrumentation/ag2/ag2.py,sha256=eNQziyeZl4396GsIp5qI1Dne2KcnQMmhftW7joKQvNU,6934
17
- openlit/instrumentation/ai21/__init__.py,sha256=QXMByKCUhFITUIwUR01m0Fjpr20txV_GWcRJ66dTu_Q,2703
18
- openlit/instrumentation/ai21/ai21.py,sha256=J1QlBg56LWhkiD9IDmu0tJMOGnqKz0AiHtrMnACjU0Y,6814
19
- openlit/instrumentation/ai21/async_ai21.py,sha256=fIIGrAjF2xfGlAJiW6itqL88PSaA9vxy7F8nFpyVjZg,6918
20
- openlit/instrumentation/ai21/utils.py,sha256=7PcxzAwrLNTF5df5SR0Mp6UAkLGTATWxYrKS98c9iig,19770
17
+ openlit/instrumentation/ai21/__init__.py,sha256=U24XlK1aHX0zubyUyBY6PBCa59fwp5sU5f-VD1EkCjc,2583
18
+ openlit/instrumentation/ai21/ai21.py,sha256=1fJ1MvVIRQG-gh5YXkDycuTriT7_VB77vjXDKb7GZY8,6965
19
+ openlit/instrumentation/ai21/async_ai21.py,sha256=uUJUXCKJcokYi6fPfcgBABSMVLj9CQsWJySakiZcSiU,7003
20
+ openlit/instrumentation/ai21/utils.py,sha256=TiJtzG6kcrTf4FMJkAeHYUwZxkjp6JS3xoM2qn4gw54,14215
21
21
  openlit/instrumentation/anthropic/__init__.py,sha256=QEsiwdxcQDzzlVYR4_x7KTdf0-UJDJt8FjwNQMspnxM,1929
22
22
  openlit/instrumentation/anthropic/anthropic.py,sha256=NxJJjhsu9sSFIlBp322olGkPlLt9Bn5sndaugYA68dE,5149
23
23
  openlit/instrumentation/anthropic/async_anthropic.py,sha256=ivJGygKWVTS2hWWX12_g1tiq-5mpeHXETZsWoFZL3UE,5235
@@ -131,7 +131,7 @@ openlit/otel/events.py,sha256=VrMjTpvnLtYRBHCiFwJojTQqqNpRCxoD4yJYeQrtPsk,3560
131
131
  openlit/otel/metrics.py,sha256=GM2PDloBGRhBTkHHkYaqmOwIAQkY124ZhW4sEqW1Fgk,7086
132
132
  openlit/otel/tracing.py,sha256=tjV2bEbEDPUB1Z46gE-UsJsb04sRdFrfbhIDkxViZc0,3103
133
133
  openlit/semcov/__init__.py,sha256=ptyo37PY-FHDx_PShEvbdns71cD4YvvXw15bCRXKCKM,13461
134
- openlit-1.34.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
135
- openlit-1.34.3.dist-info/METADATA,sha256=8_jDnUBC1cxAr2DNwkg5IXbNQX2qru-_nC7OpwC6Jh8,23469
136
- openlit-1.34.3.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
137
- openlit-1.34.3.dist-info/RECORD,,
134
+ openlit-1.34.4.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
135
+ openlit-1.34.4.dist-info/METADATA,sha256=0EfABzey6V_Rxze1-maNqQN1Y9Vftu54Ix0-5t7vaNo,23469
136
+ openlit-1.34.4.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
137
+ openlit-1.34.4.dist-info/RECORD,,