openlit 1.33.11__py3-none-any.whl → 1.33.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +73 -109
- openlit/instrumentation/ag2/__init__.py +14 -14
- openlit/instrumentation/ag2/ag2.py +11 -11
- openlit/instrumentation/ai21/__init__.py +18 -18
- openlit/instrumentation/ai21/ai21.py +13 -14
- openlit/instrumentation/ai21/async_ai21.py +13 -14
- openlit/instrumentation/ai21/utils.py +86 -84
- openlit/instrumentation/anthropic/__init__.py +16 -16
- openlit/instrumentation/anthropic/anthropic.py +60 -352
- openlit/instrumentation/anthropic/async_anthropic.py +61 -353
- openlit/instrumentation/anthropic/utils.py +251 -0
- openlit/instrumentation/ollama/utils.py +0 -1
- {openlit-1.33.11.dist-info → openlit-1.33.12.dist-info}/METADATA +1 -1
- {openlit-1.33.11.dist-info → openlit-1.33.12.dist-info}/RECORD +16 -15
- {openlit-1.33.11.dist-info → openlit-1.33.12.dist-info}/LICENSE +0 -0
- {openlit-1.33.11.dist-info → openlit-1.33.12.dist-info}/WHEEL +0 -0
@@ -0,0 +1,251 @@
|
|
1
|
+
"""
|
2
|
+
Anthropic OpenTelemetry instrumentation utility functions
|
3
|
+
"""
|
4
|
+
import time
|
5
|
+
|
6
|
+
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
7
|
+
from opentelemetry.trace import Status, StatusCode
|
8
|
+
|
9
|
+
from openlit.__helpers import (
|
10
|
+
calculate_ttft,
|
11
|
+
response_as_dict,
|
12
|
+
calculate_tbt,
|
13
|
+
extract_and_format_input,
|
14
|
+
get_chat_model_cost,
|
15
|
+
create_metrics_attributes,
|
16
|
+
otel_event,
|
17
|
+
concatenate_all_contents
|
18
|
+
)
|
19
|
+
from openlit.semcov import SemanticConvetion
|
20
|
+
|
21
|
+
def process_chunk(self, chunk):
|
22
|
+
"""
|
23
|
+
Process a chunk of response data and update state.
|
24
|
+
"""
|
25
|
+
|
26
|
+
end_time = time.time()
|
27
|
+
# Record the timestamp for the current chunk
|
28
|
+
self._timestamps.append(end_time)
|
29
|
+
|
30
|
+
if len(self._timestamps) == 1:
|
31
|
+
# Calculate time to first chunk
|
32
|
+
self._ttft = calculate_ttft(self._timestamps, self._start_time)
|
33
|
+
|
34
|
+
chunked = response_as_dict(chunk)
|
35
|
+
|
36
|
+
# Collect message IDs and input token from events
|
37
|
+
if chunked.get('type') == 'message_start':
|
38
|
+
self._response_id = chunked.get('message').get('id')
|
39
|
+
self._input_tokens = chunked.get('message').get('usage').get('input_tokens')
|
40
|
+
self._response_model = chunked.get('message').get('model')
|
41
|
+
self._response_role = chunked.get('message').get('role')
|
42
|
+
|
43
|
+
# Collect message IDs and aggregated response from events
|
44
|
+
if chunked.get('type') == 'content_block_delta':
|
45
|
+
if chunked.get('delta').get('text'):
|
46
|
+
self._llmresponse += chunked.get('delta').get('text')
|
47
|
+
elif chunked.get('delta').get('partial_json'):
|
48
|
+
self._tool_arguments += chunked.get('delta').get('partial_json')
|
49
|
+
|
50
|
+
if chunked.get('type') == 'content_block_start':
|
51
|
+
if chunked.get('content_block').get('id'):
|
52
|
+
self._tool_id = chunked.get('content_block').get('id')
|
53
|
+
if chunked.get('content_block').get('name'):
|
54
|
+
self._tool_name = chunked.get('content_block').get('name')
|
55
|
+
|
56
|
+
# Collect output tokens and stop reason from events
|
57
|
+
if chunked.get('type') == 'message_delta':
|
58
|
+
self._output_tokens = chunked.get('usage').get('output_tokens')
|
59
|
+
self._finish_reason = chunked.get('delta').get('stop_reason')
|
60
|
+
|
61
|
+
def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
62
|
+
event_provider, capture_message_content, disable_metrics, version, is_stream):
|
63
|
+
"""
|
64
|
+
Process chat request and generate Telemetry
|
65
|
+
"""
|
66
|
+
|
67
|
+
scope._end_time = time.time()
|
68
|
+
if len(scope._timestamps) > 1:
|
69
|
+
scope._tbt = calculate_tbt(scope._timestamps)
|
70
|
+
|
71
|
+
formatted_messages = extract_and_format_input(scope._kwargs.get('messages', ''))
|
72
|
+
request_model = scope._kwargs.get('model', 'claude-3-opus-20240229')
|
73
|
+
|
74
|
+
cost = get_chat_model_cost(request_model, pricing_info, scope._input_tokens, scope._output_tokens)
|
75
|
+
|
76
|
+
# Set Span attributes (OTel Semconv)
|
77
|
+
scope._span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
|
78
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION, SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
|
79
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM, SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC)
|
80
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL, request_model)
|
81
|
+
scope._span.set_attribute(SemanticConvetion.SERVER_PORT, scope._server_port)
|
82
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get('max_tokens', -1))
|
83
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get('stop_sequences', []))
|
84
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get('temperature', 1.0))
|
85
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K, scope._kwargs.get('top_k', 1.0))
|
86
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P, scope._kwargs.get('top_p', 1.0))
|
87
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
|
88
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID, scope._response_id)
|
89
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL, scope._response_model)
|
90
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
|
91
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
|
92
|
+
scope._span.set_attribute(SemanticConvetion.SERVER_ADDRESS, scope._server_address)
|
93
|
+
|
94
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
|
95
|
+
'text' if isinstance(scope._llmresponse, str) else 'json')
|
96
|
+
|
97
|
+
scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
98
|
+
scope._span.set_attribute(SERVICE_NAME, application_name)
|
99
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM, is_stream)
|
100
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
|
101
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST, cost)
|
102
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TBT, scope._tbt)
|
103
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT, scope._ttft)
|
104
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION, version)
|
105
|
+
|
106
|
+
# To be removed one the change to log events (from span events) is complete
|
107
|
+
prompt = concatenate_all_contents(formatted_messages)
|
108
|
+
if capture_message_content:
|
109
|
+
scope._span.add_event(
|
110
|
+
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
111
|
+
attributes={
|
112
|
+
SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
|
113
|
+
},
|
114
|
+
)
|
115
|
+
scope._span.add_event(
|
116
|
+
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
117
|
+
attributes={
|
118
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
|
119
|
+
},
|
120
|
+
)
|
121
|
+
|
122
|
+
choice_event_body = {
|
123
|
+
'finish_reason': scope._finish_reason,
|
124
|
+
'index': 0,
|
125
|
+
'message': {
|
126
|
+
**({'content': scope._llmresponse} if capture_message_content else {}),
|
127
|
+
'role': scope._response_role
|
128
|
+
}
|
129
|
+
}
|
130
|
+
|
131
|
+
if scope._tool_calls:
|
132
|
+
choice_event_body['message'].update({
|
133
|
+
'tool_calls': {
|
134
|
+
'function': {
|
135
|
+
'name': scope._tool_calls.get('name', ''),
|
136
|
+
'arguments': scope._tool_calls.get('input', '')
|
137
|
+
},
|
138
|
+
'id': scope._tool_calls.get('id', ''),
|
139
|
+
'type': 'function'
|
140
|
+
}
|
141
|
+
})
|
142
|
+
|
143
|
+
# Emit events
|
144
|
+
for role in ['user', 'system', 'assistant', 'tool']:
|
145
|
+
if formatted_messages.get(role, {}).get('content', ''):
|
146
|
+
event = otel_event(
|
147
|
+
name=getattr(SemanticConvetion, f'GEN_AI_{role.upper()}_MESSAGE'),
|
148
|
+
attributes={
|
149
|
+
SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC
|
150
|
+
},
|
151
|
+
body = {
|
152
|
+
# pylint: disable=line-too-long
|
153
|
+
**({'content': formatted_messages.get(role, {}).get('content', '')} if capture_message_content else {}),
|
154
|
+
'role': formatted_messages.get(role, {}).get('role', []),
|
155
|
+
**({
|
156
|
+
'tool_calls': {
|
157
|
+
'function': {
|
158
|
+
# pylint: disable=line-too-long
|
159
|
+
'name': (scope._tool_calls[0].get('function', {}).get('name', '') if scope._tool_calls else ''),
|
160
|
+
'arguments': (scope._tool_calls[0].get('function', {}).get('arguments', '') if scope._tool_calls else '')
|
161
|
+
},
|
162
|
+
'id': (scope._tool_calls[0].get('id', '') if scope._tool_calls else ''),
|
163
|
+
'type': 'function'
|
164
|
+
}
|
165
|
+
} if role == 'assistant' else {}),
|
166
|
+
**({
|
167
|
+
'id': (scope._tool_calls[0].get('id', '') if scope._tool_calls else '')
|
168
|
+
} if role == 'tool' else {})
|
169
|
+
}
|
170
|
+
)
|
171
|
+
event_provider.emit(event)
|
172
|
+
|
173
|
+
choice_event = otel_event(
|
174
|
+
name=SemanticConvetion.GEN_AI_CHOICE,
|
175
|
+
attributes={
|
176
|
+
SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC
|
177
|
+
},
|
178
|
+
body=choice_event_body
|
179
|
+
)
|
180
|
+
event_provider.emit(choice_event)
|
181
|
+
|
182
|
+
scope._span.set_status(Status(StatusCode.OK))
|
183
|
+
|
184
|
+
if not disable_metrics:
|
185
|
+
metrics_attributes = create_metrics_attributes(
|
186
|
+
service_name=application_name,
|
187
|
+
deployment_environment=environment,
|
188
|
+
operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
|
189
|
+
system=SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC,
|
190
|
+
request_model=request_model,
|
191
|
+
server_address=scope._server_address,
|
192
|
+
server_port=scope._server_port,
|
193
|
+
response_model=scope._response_model,
|
194
|
+
)
|
195
|
+
|
196
|
+
metrics['genai_client_usage_tokens'].record(scope._input_tokens + scope._output_tokens, metrics_attributes)
|
197
|
+
metrics['genai_client_operation_duration'].record(scope._end_time - scope._start_time, metrics_attributes)
|
198
|
+
metrics['genai_server_tbt'].record(scope._tbt, metrics_attributes)
|
199
|
+
metrics['genai_server_ttft'].record(scope._ttft, metrics_attributes)
|
200
|
+
metrics['genai_requests'].add(1, metrics_attributes)
|
201
|
+
metrics['genai_completion_tokens'].add(scope._output_tokens, metrics_attributes)
|
202
|
+
metrics['genai_prompt_tokens'].add(scope._input_tokens, metrics_attributes)
|
203
|
+
metrics['genai_cost'].record(cost, metrics_attributes)
|
204
|
+
|
205
|
+
def process_streaming_chat_response(self, pricing_info, environment, application_name, metrics,
|
206
|
+
event_provider, capture_message_content=False, disable_metrics=False, version=''):
|
207
|
+
"""
|
208
|
+
Process chat request and generate Telemetry
|
209
|
+
"""
|
210
|
+
if self._tool_id != '':
|
211
|
+
self._tool_calls = {
|
212
|
+
'id': self._tool_id,
|
213
|
+
'name': self._tool_name,
|
214
|
+
'input': self._tool_arguments
|
215
|
+
}
|
216
|
+
|
217
|
+
common_chat_logic(self, pricing_info, environment, application_name, metrics,
|
218
|
+
event_provider, capture_message_content, disable_metrics, version, is_stream=True)
|
219
|
+
|
220
|
+
def process_chat_response(response, request_model, pricing_info, server_port, server_address,
|
221
|
+
environment, application_name, metrics, event_provider, start_time,
|
222
|
+
span, capture_message_content=False, disable_metrics=False, version='1.0.0', **kwargs):
|
223
|
+
"""
|
224
|
+
Process chat request and generate Telemetry
|
225
|
+
"""
|
226
|
+
|
227
|
+
self = type('GenericScope', (), {})()
|
228
|
+
response_dict = response_as_dict(response)
|
229
|
+
|
230
|
+
# pylint: disable = no-member
|
231
|
+
self._start_time = start_time
|
232
|
+
self._end_time = time.time()
|
233
|
+
self._span = span
|
234
|
+
self._llmresponse = response_dict.get('content', {})[0].get('text', '')
|
235
|
+
self._response_role = response_dict.get('message', {}).get('role', 'assistant')
|
236
|
+
self._input_tokens = response_dict.get('usage').get('input_tokens')
|
237
|
+
self._output_tokens = response_dict.get('usage').get('output_tokens')
|
238
|
+
self._response_model = response_dict.get('model', '')
|
239
|
+
self._finish_reason = response_dict.get('stop_reason', '')
|
240
|
+
self._response_id = response_dict.get('id', '')
|
241
|
+
self._timestamps = []
|
242
|
+
self._ttft, self._tbt = self._end_time - self._start_time, 0
|
243
|
+
self._server_address, self._server_port = server_address, server_port
|
244
|
+
self._kwargs = kwargs
|
245
|
+
#pylint: disable=line-too-long
|
246
|
+
self._tool_calls = (lambda c: c[1] if len(c) > 1 and c[1].get('type') == 'tool_use' else None)(response_dict.get('content', []))
|
247
|
+
|
248
|
+
common_chat_logic(self, pricing_info, environment, application_name, metrics,
|
249
|
+
event_provider, capture_message_content, disable_metrics, version, is_stream=False)
|
250
|
+
|
251
|
+
return response
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.33.
|
3
|
+
Version: 1.33.12
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -1,4 +1,4 @@
|
|
1
|
-
openlit/__helpers.py,sha256=
|
1
|
+
openlit/__helpers.py,sha256=TBFkzFFAOWLy2YjMOU--VMtQGb9kspSiJRwk4Twts3g,10675
|
2
2
|
openlit/__init__.py,sha256=87uE_wi6YF2FKkmVy0VLmRKeNMuJ9e6XrycIbad9T6A,23755
|
3
3
|
openlit/evals/__init__.py,sha256=nJe99nuLo1b5rf7pt9U9BCdSDedzbVi2Fj96cgl7msM,380
|
4
4
|
openlit/evals/all.py,sha256=oWrue3PotE-rB5WePG3MRYSA-ro6WivkclSHjYlAqGs,7154
|
@@ -12,15 +12,16 @@ openlit/guard/prompt_injection.py,sha256=3e4DKxB7QDzM-xPCpwEuureiH_2s_OTJ9BSckkn
|
|
12
12
|
openlit/guard/restrict_topic.py,sha256=KTuWa7XeMsV4oXxOrD1CYZV0wXWxTfA0H3p_6q_IOsk,6444
|
13
13
|
openlit/guard/sensitive_topic.py,sha256=RgVw_laFERv0nNdzBsAd2_3yLomMOK-gVq-P7oj1bTk,5552
|
14
14
|
openlit/guard/utils.py,sha256=x0-_hAtNa_ogYR2GfnwiBF1rlqaXtaJ-rJeGguTDe-Q,7663
|
15
|
-
openlit/instrumentation/ag2/__init__.py,sha256=
|
16
|
-
openlit/instrumentation/ag2/ag2.py,sha256=
|
17
|
-
openlit/instrumentation/ai21/__init__.py,sha256=
|
18
|
-
openlit/instrumentation/ai21/ai21.py,sha256=
|
19
|
-
openlit/instrumentation/ai21/async_ai21.py,sha256=
|
20
|
-
openlit/instrumentation/ai21/utils.py,sha256=
|
21
|
-
openlit/instrumentation/anthropic/__init__.py,sha256=
|
22
|
-
openlit/instrumentation/anthropic/anthropic.py,sha256=
|
23
|
-
openlit/instrumentation/anthropic/async_anthropic.py,sha256=
|
15
|
+
openlit/instrumentation/ag2/__init__.py,sha256=KgyLJBmwAxRWu7Z0S8FDDK4TZ13EFoAAIalvG5Oq4wc,1839
|
16
|
+
openlit/instrumentation/ag2/ag2.py,sha256=TFqYLyHxGjXMR4s8xoBdysTjZaQYZd8tbf_srw5RYUw,6908
|
17
|
+
openlit/instrumentation/ai21/__init__.py,sha256=QXMByKCUhFITUIwUR01m0Fjpr20txV_GWcRJ66dTu_Q,2703
|
18
|
+
openlit/instrumentation/ai21/ai21.py,sha256=j18n7X1gL15aZlYvqd64r_6hjhiPg7KHyQwQwzyYutg,6811
|
19
|
+
openlit/instrumentation/ai21/async_ai21.py,sha256=Qn4Z0M2TZRN4fkgcy3D4DwIUAqYj_1HKztapVbgf3kw,6915
|
20
|
+
openlit/instrumentation/ai21/utils.py,sha256=nLN4VuczcP6x0mq7tAgBkVxjt1ZY8hFMZ1KFUdayQx0,19705
|
21
|
+
openlit/instrumentation/anthropic/__init__.py,sha256=QEsiwdxcQDzzlVYR4_x7KTdf0-UJDJt8FjwNQMspnxM,1929
|
22
|
+
openlit/instrumentation/anthropic/anthropic.py,sha256=mZbNepBzOtQBCGJCDWaVTpXukii6jjt_fgSOY1RfMoU,5147
|
23
|
+
openlit/instrumentation/anthropic/async_anthropic.py,sha256=DBHFdGKBhnI21Tuh_430kc-Zjaq54yUvfqmCmb1EQqg,5233
|
24
|
+
openlit/instrumentation/anthropic/utils.py,sha256=nfIOV7eTtAReNN5QN0oG6NaqftulGjbgMrALOaDHQ8A,11897
|
24
25
|
openlit/instrumentation/assemblyai/__init__.py,sha256=AS6tEzcyEG7RP6bNDW_Kf4_rN-u-O1BNjJ3khX3AEUs,1565
|
25
26
|
openlit/instrumentation/assemblyai/assemblyai.py,sha256=18AQ7wrigCZd9RXwNZ36mn9fc3M3p2WkAHlT_sD5M3c,6292
|
26
27
|
openlit/instrumentation/astra/__init__.py,sha256=mEHT_p4q3bl9IiXAFqutGEvw37k6CCuWbmlI9Gx3xKY,8314
|
@@ -89,7 +90,7 @@ openlit/instrumentation/multion/multion.py,sha256=SrO25cv7dob1H4r00MsAa13Bj-WNsE
|
|
89
90
|
openlit/instrumentation/ollama/__init__.py,sha256=JjxSqEegmRoRqIVz7ZAq9dLyXPZ2DqV2wGmgXCENNpw,3004
|
90
91
|
openlit/instrumentation/ollama/async_ollama.py,sha256=LhDQPy3wLyNO9JWksUEeCx-DK9oIV3K98Cgwnp4RfKg,6538
|
91
92
|
openlit/instrumentation/ollama/ollama.py,sha256=wVyaX0quoiiCj1J3tyTiQx5Du5CmaWmt9e_lpCr7s6A,6434
|
92
|
-
openlit/instrumentation/ollama/utils.py,sha256=
|
93
|
+
openlit/instrumentation/ollama/utils.py,sha256=zXsWNqfnZLssrcb-GNbWeZeqTKVzQb1bes8vzgl-gbQ,14549
|
93
94
|
openlit/instrumentation/openai/__init__.py,sha256=dfgMBHd2wAT24uckVBBqTy7pzN34ESzeymKzkUy6t58,4893
|
94
95
|
openlit/instrumentation/openai/async_openai.py,sha256=8-I6SMT1bFxNGLf2GW1Yfpny-lCvIcR5coKvytKL_yE,50635
|
95
96
|
openlit/instrumentation/openai/openai.py,sha256=t1-9KY1CCqeRno2_gXd3PxFkWm1ySwFweno4st2T3bE,50469
|
@@ -119,7 +120,7 @@ openlit/otel/events.py,sha256=VrMjTpvnLtYRBHCiFwJojTQqqNpRCxoD4yJYeQrtPsk,3560
|
|
119
120
|
openlit/otel/metrics.py,sha256=URL7gzQbnxaNQJSX7oHRa15v6xi1GFmANn-5uFNL-aY,6378
|
120
121
|
openlit/otel/tracing.py,sha256=tjV2bEbEDPUB1Z46gE-UsJsb04sRdFrfbhIDkxViZc0,3103
|
121
122
|
openlit/semcov/__init__.py,sha256=kUd-ZSmXkXBo-osVve4ce_XEgr0fgEN7nXxoNm7kfEQ,12798
|
122
|
-
openlit-1.33.
|
123
|
-
openlit-1.33.
|
124
|
-
openlit-1.33.
|
125
|
-
openlit-1.33.
|
123
|
+
openlit-1.33.12.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
124
|
+
openlit-1.33.12.dist-info/METADATA,sha256=t2L1uO5RcNpbAuY5qKha7WQWoB6TCT6L0sc0jG1K5cY,23512
|
125
|
+
openlit-1.33.12.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
126
|
+
openlit-1.33.12.dist-info/RECORD,,
|
File without changes
|
File without changes
|