openlit 1.16.0__py3-none-any.whl → 1.16.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/instrumentation/anthropic/anthropic.py +2 -2
- openlit/instrumentation/anthropic/async_anthropic.py +2 -2
- openlit/instrumentation/cohere/cohere.py +2 -2
- openlit/instrumentation/groq/async_groq.py +10 -10
- openlit/instrumentation/groq/groq.py +10 -10
- openlit/instrumentation/mistral/async_mistral.py +4 -4
- openlit/instrumentation/mistral/mistral.py +4 -4
- openlit/instrumentation/openai/async_azure_openai.py +12 -12
- openlit/instrumentation/openai/async_openai.py +10 -10
- openlit/instrumentation/openai/azure_openai.py +12 -12
- openlit/instrumentation/openai/openai.py +10 -10
- openlit/instrumentation/transformers/transformers.py +1 -1
- openlit/semcov/__init__.py +3 -3
- {openlit-1.16.0.dist-info → openlit-1.16.1.dist-info}/METADATA +2 -2
- {openlit-1.16.0.dist-info → openlit-1.16.1.dist-info}/RECORD +17 -17
- {openlit-1.16.0.dist-info → openlit-1.16.1.dist-info}/LICENSE +0 -0
- {openlit-1.16.0.dist-info → openlit-1.16.1.dist-info}/WHEEL +0 -0
@@ -120,7 +120,7 @@ def messages(gen_ai_endpoint, version, environment, application_name, tracer,
|
|
120
120
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
121
121
|
kwargs.get("model", "claude-3-sonnet-20240229"))
|
122
122
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
123
|
-
kwargs.get("max_tokens",
|
123
|
+
kwargs.get("max_tokens", -1))
|
124
124
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
125
125
|
True)
|
126
126
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
@@ -224,7 +224,7 @@ def messages(gen_ai_endpoint, version, environment, application_name, tracer,
|
|
224
224
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
225
225
|
kwargs.get("model", "claude-3-sonnet-20240229"))
|
226
226
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
227
|
-
kwargs.get("max_tokens",
|
227
|
+
kwargs.get("max_tokens", -1))
|
228
228
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
229
229
|
False)
|
230
230
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
@@ -120,7 +120,7 @@ def async_messages(gen_ai_endpoint, version, environment, application_name,
|
|
120
120
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
121
121
|
kwargs.get("model", "claude-3-sonnet-20240229"))
|
122
122
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
123
|
-
kwargs.get("max_tokens",
|
123
|
+
kwargs.get("max_tokens", -1))
|
124
124
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
125
125
|
True)
|
126
126
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
@@ -224,7 +224,7 @@ def async_messages(gen_ai_endpoint, version, environment, application_name,
|
|
224
224
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
225
225
|
kwargs.get("model", "claude-3-sonnet-20240229"))
|
226
226
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
227
|
-
kwargs.get("max_tokens",
|
227
|
+
kwargs.get("max_tokens", -1))
|
228
228
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
229
229
|
False)
|
230
230
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
@@ -193,7 +193,7 @@ def chat(gen_ai_endpoint, version, environment, application_name, tracer,
|
|
193
193
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
194
194
|
kwargs.get("temperature", 0.3))
|
195
195
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
196
|
-
kwargs.get("max_tokens",
|
196
|
+
kwargs.get("max_tokens", -1))
|
197
197
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
198
198
|
kwargs.get("seed", ""))
|
199
199
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
@@ -336,7 +336,7 @@ def chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
336
336
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
337
337
|
kwargs.get("temperature", 0.3))
|
338
338
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
339
|
-
kwargs.get("max_tokens",
|
339
|
+
kwargs.get("max_tokens", -1))
|
340
340
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
341
341
|
kwargs.get("seed", ""))
|
342
342
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
@@ -119,15 +119,15 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
|
|
119
119
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
120
120
|
kwargs.get("user", ""))
|
121
121
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
122
|
-
kwargs.get("top_p", 1))
|
122
|
+
kwargs.get("top_p", 1.0))
|
123
123
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
124
|
-
kwargs.get("max_tokens",
|
124
|
+
kwargs.get("max_tokens", -1))
|
125
125
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
126
|
-
kwargs.get("temperature", 1))
|
126
|
+
kwargs.get("temperature", 1.0))
|
127
127
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
128
|
-
kwargs.get("presence_penalty", 0))
|
128
|
+
kwargs.get("presence_penalty", 0.0))
|
129
129
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
130
|
-
kwargs.get("frequency_penalty", 0))
|
130
|
+
kwargs.get("frequency_penalty", 0.0))
|
131
131
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
132
132
|
kwargs.get("seed", ""))
|
133
133
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -221,17 +221,17 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
|
|
221
221
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
222
222
|
kwargs.get("model", "llama3-8b-8192"))
|
223
223
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
224
|
-
kwargs.get("top_p", 1))
|
224
|
+
kwargs.get("top_p", 1.0))
|
225
225
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
226
|
-
kwargs.get("max_tokens",
|
226
|
+
kwargs.get("max_tokens", -1))
|
227
227
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
228
228
|
kwargs.get("name", ""))
|
229
229
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
230
|
-
kwargs.get("temperature", 1))
|
230
|
+
kwargs.get("temperature", 1.0))
|
231
231
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
232
|
-
kwargs.get("presence_penalty", 0))
|
232
|
+
kwargs.get("presence_penalty", 0.0))
|
233
233
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
234
|
-
kwargs.get("frequency_penalty", 0))
|
234
|
+
kwargs.get("frequency_penalty", 0.0))
|
235
235
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
236
236
|
kwargs.get("seed", ""))
|
237
237
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -119,15 +119,15 @@ def chat(gen_ai_endpoint, version, environment, application_name,
|
|
119
119
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
120
120
|
kwargs.get("user", ""))
|
121
121
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
122
|
-
kwargs.get("top_p", 1))
|
122
|
+
kwargs.get("top_p", 1.0))
|
123
123
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
124
|
-
kwargs.get("max_tokens",
|
124
|
+
kwargs.get("max_tokens", -1))
|
125
125
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
126
|
-
kwargs.get("temperature", 1))
|
126
|
+
kwargs.get("temperature", 1.0))
|
127
127
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
128
|
-
kwargs.get("presence_penalty", 0))
|
128
|
+
kwargs.get("presence_penalty", 0.0))
|
129
129
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
130
|
-
kwargs.get("frequency_penalty", 0))
|
130
|
+
kwargs.get("frequency_penalty", 0.0))
|
131
131
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
132
132
|
kwargs.get("seed", ""))
|
133
133
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -221,17 +221,17 @@ def chat(gen_ai_endpoint, version, environment, application_name,
|
|
221
221
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
222
222
|
kwargs.get("model", "llama3-8b-8192"))
|
223
223
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
224
|
-
kwargs.get("top_p", 1))
|
224
|
+
kwargs.get("top_p", 1.0))
|
225
225
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
226
|
-
kwargs.get("max_tokens",
|
226
|
+
kwargs.get("max_tokens", -1))
|
227
227
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
228
228
|
kwargs.get("name", ""))
|
229
229
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
230
|
-
kwargs.get("temperature", 1))
|
230
|
+
kwargs.get("temperature", 1.0))
|
231
231
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
232
|
-
kwargs.get("presence_penalty", 0))
|
232
|
+
kwargs.get("presence_penalty", 0.0))
|
233
233
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
234
|
-
kwargs.get("frequency_penalty", 0))
|
234
|
+
kwargs.get("frequency_penalty", 0.0))
|
235
235
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
236
236
|
kwargs.get("seed", ""))
|
237
237
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -95,9 +95,9 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
|
|
95
95
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
96
96
|
kwargs.get("temperature", 0.7))
|
97
97
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
98
|
-
kwargs.get("top_p", 1))
|
98
|
+
kwargs.get("top_p", 1.0))
|
99
99
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
100
|
-
kwargs.get("max_tokens",
|
100
|
+
kwargs.get("max_tokens", -1))
|
101
101
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
102
102
|
kwargs.get("random_seed", ""))
|
103
103
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -252,9 +252,9 @@ def async_chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
252
252
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
253
253
|
kwargs.get("temperature", 0.7))
|
254
254
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
255
|
-
kwargs.get("top_p", 1))
|
255
|
+
kwargs.get("top_p", 1.0))
|
256
256
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
257
|
-
kwargs.get("max_tokens",
|
257
|
+
kwargs.get("max_tokens", -1))
|
258
258
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
259
259
|
kwargs.get("random_seed", ""))
|
260
260
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -94,9 +94,9 @@ def chat(gen_ai_endpoint, version, environment, application_name,
|
|
94
94
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
95
95
|
kwargs.get("temperature", 0.7))
|
96
96
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
97
|
-
kwargs.get("top_p", 1))
|
97
|
+
kwargs.get("top_p", 1.0))
|
98
98
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
99
|
-
kwargs.get("max_tokens",
|
99
|
+
kwargs.get("max_tokens", -1))
|
100
100
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
101
101
|
kwargs.get("random_seed", ""))
|
102
102
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -251,9 +251,9 @@ def chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
251
251
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
252
252
|
kwargs.get("temperature", 0.7))
|
253
253
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
254
|
-
kwargs.get("top_p", 1))
|
254
|
+
kwargs.get("top_p", 1.0))
|
255
255
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
256
|
-
kwargs.get("max_tokens",
|
256
|
+
kwargs.get("max_tokens", -1))
|
257
257
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
258
258
|
kwargs.get("random_seed", ""))
|
259
259
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -125,11 +125,11 @@ def azure_async_chat_completions(gen_ai_endpoint, version, environment, applicat
|
|
125
125
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOOL_CHOICE,
|
126
126
|
kwargs.get("tool_choice", ""))
|
127
127
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
128
|
-
kwargs.get("temperature", 1))
|
128
|
+
kwargs.get("temperature", 1.0))
|
129
129
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
130
|
-
kwargs.get("presence_penalty", 0))
|
130
|
+
kwargs.get("presence_penalty", 0.0))
|
131
131
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
132
|
-
kwargs.get("frequency_penalty", 0))
|
132
|
+
kwargs.get("frequency_penalty", 0.0))
|
133
133
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
134
134
|
kwargs.get("seed", ""))
|
135
135
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -228,11 +228,11 @@ def azure_async_chat_completions(gen_ai_endpoint, version, environment, applicat
|
|
228
228
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOOL_CHOICE,
|
229
229
|
kwargs.get("tool_choice", ""))
|
230
230
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
231
|
-
kwargs.get("temperature", 1))
|
231
|
+
kwargs.get("temperature", 1.0))
|
232
232
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
233
|
-
kwargs.get("presence_penalty", 0))
|
233
|
+
kwargs.get("presence_penalty", 0.0))
|
234
234
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
235
|
-
kwargs.get("frequency_penalty", 0))
|
235
|
+
kwargs.get("frequency_penalty", 0.0))
|
236
236
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
237
237
|
kwargs.get("seed", ""))
|
238
238
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -425,11 +425,11 @@ def azure_async_completions(gen_ai_endpoint, version, environment, application_n
|
|
425
425
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOOL_CHOICE,
|
426
426
|
kwargs.get("tool_choice", ""))
|
427
427
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
428
|
-
kwargs.get("temperature", 1))
|
428
|
+
kwargs.get("temperature", 1.0))
|
429
429
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
430
|
-
kwargs.get("presence_penalty", 0))
|
430
|
+
kwargs.get("presence_penalty", 0.0))
|
431
431
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
432
|
-
kwargs.get("frequency_penalty", 0))
|
432
|
+
kwargs.get("frequency_penalty", 0.0))
|
433
433
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
434
434
|
kwargs.get("seed", ""))
|
435
435
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -509,11 +509,11 @@ def azure_async_completions(gen_ai_endpoint, version, environment, application_n
|
|
509
509
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOOL_CHOICE,
|
510
510
|
kwargs.get("tool_choice", ""))
|
511
511
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
512
|
-
kwargs.get("temperature", 1))
|
512
|
+
kwargs.get("temperature", 1.0))
|
513
513
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
514
|
-
kwargs.get("presence_penalty", 0))
|
514
|
+
kwargs.get("presence_penalty", 0.0))
|
515
515
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
516
|
-
kwargs.get("frequency_penalty", 0))
|
516
|
+
kwargs.get("frequency_penalty", 0.0))
|
517
517
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
518
518
|
kwargs.get("seed", ""))
|
519
519
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -122,15 +122,15 @@ def async_chat_completions(gen_ai_endpoint, version, environment, application_na
|
|
122
122
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
123
123
|
kwargs.get("user", ""))
|
124
124
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
125
|
-
kwargs.get("top_p", 1))
|
125
|
+
kwargs.get("top_p", 1.0))
|
126
126
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
127
|
-
kwargs.get("max_tokens",
|
127
|
+
kwargs.get("max_tokens", -1))
|
128
128
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
129
|
-
kwargs.get("temperature", 1))
|
129
|
+
kwargs.get("temperature", 1.0))
|
130
130
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
131
|
-
kwargs.get("presence_penalty", 0))
|
131
|
+
kwargs.get("presence_penalty", 0.0))
|
132
132
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
133
|
-
kwargs.get("frequency_penalty", 0))
|
133
|
+
kwargs.get("frequency_penalty", 0.0))
|
134
134
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
135
135
|
kwargs.get("seed", ""))
|
136
136
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -224,17 +224,17 @@ def async_chat_completions(gen_ai_endpoint, version, environment, application_na
|
|
224
224
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
225
225
|
kwargs.get("model", "gpt-3.5-turbo"))
|
226
226
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
227
|
-
kwargs.get("top_p", 1))
|
227
|
+
kwargs.get("top_p", 1.0))
|
228
228
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
229
|
-
kwargs.get("max_tokens",
|
229
|
+
kwargs.get("max_tokens", -1))
|
230
230
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
231
231
|
kwargs.get("user", ""))
|
232
232
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
233
|
-
kwargs.get("temperature", 1))
|
233
|
+
kwargs.get("temperature", 1.0))
|
234
234
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
235
|
-
kwargs.get("presence_penalty", 0))
|
235
|
+
kwargs.get("presence_penalty", 0.0))
|
236
236
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
237
|
-
kwargs.get("frequency_penalty", 0))
|
237
|
+
kwargs.get("frequency_penalty", 0.0))
|
238
238
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
239
239
|
kwargs.get("seed", ""))
|
240
240
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -125,11 +125,11 @@ def azure_chat_completions(gen_ai_endpoint, version, environment, application_na
|
|
125
125
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOOL_CHOICE,
|
126
126
|
kwargs.get("tool_choice", ""))
|
127
127
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
128
|
-
kwargs.get("temperature", 1))
|
128
|
+
kwargs.get("temperature", 1.0))
|
129
129
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
130
|
-
kwargs.get("presence_penalty", 0))
|
130
|
+
kwargs.get("presence_penalty", 0.0))
|
131
131
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
132
|
-
kwargs.get("frequency_penalty", 0))
|
132
|
+
kwargs.get("frequency_penalty", 0.0))
|
133
133
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
134
134
|
kwargs.get("seed", ""))
|
135
135
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -228,11 +228,11 @@ def azure_chat_completions(gen_ai_endpoint, version, environment, application_na
|
|
228
228
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOOL_CHOICE,
|
229
229
|
kwargs.get("tool_choice", ""))
|
230
230
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
231
|
-
kwargs.get("temperature", 1))
|
231
|
+
kwargs.get("temperature", 1.0))
|
232
232
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
233
|
-
kwargs.get("presence_penalty", 0))
|
233
|
+
kwargs.get("presence_penalty", 0.0))
|
234
234
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
235
|
-
kwargs.get("frequency_penalty", 0))
|
235
|
+
kwargs.get("frequency_penalty", 0.0))
|
236
236
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
237
237
|
kwargs.get("seed", ""))
|
238
238
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -423,11 +423,11 @@ def azure_completions(gen_ai_endpoint, version, environment, application_name,
|
|
423
423
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOOL_CHOICE,
|
424
424
|
kwargs.get("tool_choice", ""))
|
425
425
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
426
|
-
kwargs.get("temperature", 1))
|
426
|
+
kwargs.get("temperature", 1.0))
|
427
427
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
428
|
-
kwargs.get("presence_penalty", 0))
|
428
|
+
kwargs.get("presence_penalty", 0.0))
|
429
429
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
430
|
-
kwargs.get("frequency_penalty", 0))
|
430
|
+
kwargs.get("frequency_penalty", 0.0))
|
431
431
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
432
432
|
kwargs.get("seed", ""))
|
433
433
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -507,11 +507,11 @@ def azure_completions(gen_ai_endpoint, version, environment, application_name,
|
|
507
507
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOOL_CHOICE,
|
508
508
|
kwargs.get("tool_choice", ""))
|
509
509
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
510
|
-
kwargs.get("temperature", 1))
|
510
|
+
kwargs.get("temperature", 1.0))
|
511
511
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
512
|
-
kwargs.get("presence_penalty", 0))
|
512
|
+
kwargs.get("presence_penalty", 0.0))
|
513
513
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
514
|
-
kwargs.get("frequency_penalty", 0))
|
514
|
+
kwargs.get("frequency_penalty", 0.0))
|
515
515
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
516
516
|
kwargs.get("seed", ""))
|
517
517
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -123,15 +123,15 @@ def chat_completions(gen_ai_endpoint, version, environment, application_name,
|
|
123
123
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
124
124
|
kwargs.get("user", ""))
|
125
125
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
126
|
-
kwargs.get("top_p", 1))
|
126
|
+
kwargs.get("top_p", 1.0))
|
127
127
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
128
|
-
kwargs.get("max_tokens",
|
128
|
+
kwargs.get("max_tokens", -1))
|
129
129
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
130
|
-
kwargs.get("temperature", 1))
|
130
|
+
kwargs.get("temperature", 1.0))
|
131
131
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
132
|
-
kwargs.get("presence_penalty", 0))
|
132
|
+
kwargs.get("presence_penalty", 0.0))
|
133
133
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
134
|
-
kwargs.get("frequency_penalty", 0))
|
134
|
+
kwargs.get("frequency_penalty", 0.0))
|
135
135
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
136
136
|
kwargs.get("seed", ""))
|
137
137
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -225,17 +225,17 @@ def chat_completions(gen_ai_endpoint, version, environment, application_name,
|
|
225
225
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
226
226
|
kwargs.get("model", "gpt-3.5-turbo"))
|
227
227
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
228
|
-
kwargs.get("top_p", 1))
|
228
|
+
kwargs.get("top_p", 1.0))
|
229
229
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
230
|
-
kwargs.get("max_tokens",
|
230
|
+
kwargs.get("max_tokens", -1))
|
231
231
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_USER,
|
232
232
|
kwargs.get("user", ""))
|
233
233
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
234
|
-
kwargs.get("temperature", 1))
|
234
|
+
kwargs.get("temperature", 1.0))
|
235
235
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
236
|
-
kwargs.get("presence_penalty", 0))
|
236
|
+
kwargs.get("presence_penalty", 0.0))
|
237
237
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
238
|
-
kwargs.get("frequency_penalty", 0))
|
238
|
+
kwargs.get("frequency_penalty", 0.0))
|
239
239
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SEED,
|
240
240
|
kwargs.get("seed", ""))
|
241
241
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
@@ -87,7 +87,7 @@ def text_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
87
87
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
88
88
|
forward_params.get("top_p", "null"))
|
89
89
|
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
90
|
-
forward_params.get("max_length",
|
90
|
+
forward_params.get("max_length", -1))
|
91
91
|
span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_PROMPT,
|
92
92
|
prompt)
|
93
93
|
if trace_content:
|
openlit/semcov/__init__.py
CHANGED
@@ -59,14 +59,14 @@ class SemanticConvetion:
|
|
59
59
|
GEN_AI_REQUEST_IMAGE_STYLE = "gen_ai.request.image_style"
|
60
60
|
|
61
61
|
# GenAI Usage
|
62
|
-
GEN_AI_USAGE_PROMPT_TOKENS = "gen_ai.usage.
|
63
|
-
GEN_AI_USAGE_COMPLETION_TOKENS = "gen_ai.usage.
|
62
|
+
GEN_AI_USAGE_PROMPT_TOKENS = "gen_ai.usage.input_tokens"
|
63
|
+
GEN_AI_USAGE_COMPLETION_TOKENS = "gen_ai.usage.output_tokens"
|
64
64
|
GEN_AI_USAGE_TOTAL_TOKENS = "gen_ai.usage.total_tokens"
|
65
65
|
GEN_AI_USAGE_COST = "gen_ai.usage.cost"
|
66
66
|
|
67
67
|
# GenAI Response
|
68
68
|
GEN_AI_RESPONSE_ID = "gen_ai.response.id"
|
69
|
-
GEN_AI_RESPONSE_FINISH_REASON = "gen_ai.response.
|
69
|
+
GEN_AI_RESPONSE_FINISH_REASON = "gen_ai.response.finish_reasons"
|
70
70
|
GEN_AI_RESPONSE_IMAGE = "gen_ai.response.image" # Not used directly in code yet
|
71
71
|
|
72
72
|
# GenAI Content
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.16.
|
3
|
+
Version: 1.16.1
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT
|
@@ -21,7 +21,7 @@ Requires-Dist: opentelemetry-instrumentation (>=0.45b0,<0.46)
|
|
21
21
|
Requires-Dist: opentelemetry-sdk (>=1.24.0,<2.0.0)
|
22
22
|
Requires-Dist: requests (>=2.26.0,<3.0.0)
|
23
23
|
Requires-Dist: schedule (>=1.2.2,<2.0.0)
|
24
|
-
Requires-Dist: tiktoken (>=0.
|
24
|
+
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
25
25
|
Project-URL: Repository, https://github.com/openlit/openlit/tree/main/openlit/python
|
26
26
|
Description-Content-Type: text/markdown
|
27
27
|
|
@@ -1,14 +1,14 @@
|
|
1
1
|
openlit/__helpers.py,sha256=lrn4PBs9owDudiCY2NBoVbAi7AU_HtUpyOj0oqPBsPY,5545
|
2
2
|
openlit/__init__.py,sha256=eJKH1Op7wzBsuoBYuM_C022Jo7cCtRQBJxf2lpDfe_o,14981
|
3
3
|
openlit/instrumentation/anthropic/__init__.py,sha256=oaU53BOPyfUKbEzYvLr1DPymDluurSnwo4Hernf2XdU,1955
|
4
|
-
openlit/instrumentation/anthropic/anthropic.py,sha256=
|
5
|
-
openlit/instrumentation/anthropic/async_anthropic.py,sha256=
|
4
|
+
openlit/instrumentation/anthropic/anthropic.py,sha256=AkQUmi_VtDzFKOMobQw6LTw_CwC27E0r_--7FnwCJ3A,16026
|
5
|
+
openlit/instrumentation/anthropic/async_anthropic.py,sha256=l-AjpkxllWAXTlv9rOm61Ktbei3csvWQPFWw7FvI6Cg,16068
|
6
6
|
openlit/instrumentation/bedrock/__init__.py,sha256=QPvDMQde6Meodu5JvosHdZsnyExS19lcoP5Li4YrOkw,1540
|
7
7
|
openlit/instrumentation/bedrock/bedrock.py,sha256=SsN1SFWFn7P84Z6irH_8OLY2mOctWsBG82f-cnroOhU,22276
|
8
8
|
openlit/instrumentation/chroma/__init__.py,sha256=61lFpHlUEQUobsUJZHXdvOViKwsOH8AOvSfc4VgCmiM,3253
|
9
9
|
openlit/instrumentation/chroma/chroma.py,sha256=E80j_41UeZi8RzTsHbpvi1izOA_n-0-3_VdrA68AJPA,10531
|
10
10
|
openlit/instrumentation/cohere/__init__.py,sha256=PC5T1qIg9pwLNocBP_WjG5B_6p_z019s8quk_fNLAMs,1920
|
11
|
-
openlit/instrumentation/cohere/cohere.py,sha256=
|
11
|
+
openlit/instrumentation/cohere/cohere.py,sha256=_FXytRRfRuHwNK-PME_X9LZIQjQ0Uq7QQa1Vq_y8NNY,20437
|
12
12
|
openlit/instrumentation/elevenlabs/__init__.py,sha256=BZjAe-kzFJpKxT0tKksXVfZgirvgEp8qM3SfegWU5co,2631
|
13
13
|
openlit/instrumentation/elevenlabs/async_elevenlabs.py,sha256=aDbSV5rXx-ZpBMea5DLERQDGW7uoegLMszhy-x3A1lw,5543
|
14
14
|
openlit/instrumentation/elevenlabs/elevenlabs.py,sha256=AbMThG8edI778Dv85jtdUY2YkXD6s5auozXxH03iTvY,5942
|
@@ -18,8 +18,8 @@ openlit/instrumentation/gpt4all/__init__.py,sha256=-59CP2B3-HGZJ_vC-fI9Dt-0BuQXR
|
|
18
18
|
openlit/instrumentation/gpt4all/gpt4all.py,sha256=iDu8CAat4j5VPAlhIdkGOclZvhFPG-u7zKwadsKeJps,17948
|
19
19
|
openlit/instrumentation/gpu/__init__.py,sha256=Dj2MLar0DB20-t6W3pfR-3jfR_mwg4SYwhzIrH_n9sU,5596
|
20
20
|
openlit/instrumentation/groq/__init__.py,sha256=uW_0G6HSanQyK2dIXYhzR604pDiyPQfybzc37DsfSew,1911
|
21
|
-
openlit/instrumentation/groq/async_groq.py,sha256=
|
22
|
-
openlit/instrumentation/groq/groq.py,sha256=
|
21
|
+
openlit/instrumentation/groq/async_groq.py,sha256=AiKx_f4wuJqiiI6hhu7qbKsOXOWzLug3R0QMkZHfC10,19092
|
22
|
+
openlit/instrumentation/groq/groq.py,sha256=jJVGpc5DlQ5xD7FgDaG20pV3qfFzbdpjUf6LdULpaJg,19056
|
23
23
|
openlit/instrumentation/haystack/__init__.py,sha256=QK6XxxZUHX8vMv2Crk7rNBOc64iOOBLhJGL_lPlAZ8s,1758
|
24
24
|
openlit/instrumentation/haystack/haystack.py,sha256=oQIZiDhdp3gnJnhYQ1OouJMc9YT0pQ-_31cmNuopa68,3891
|
25
25
|
openlit/instrumentation/langchain/__init__.py,sha256=19C7YGSF-6u5VlvKkThNS4zZqvxw-fQfRsKufZ9onfk,2881
|
@@ -29,29 +29,29 @@ openlit/instrumentation/llamaindex/llamaindex.py,sha256=uiIigbwhonSbJWA7LpgOVI1R
|
|
29
29
|
openlit/instrumentation/milvus/__init__.py,sha256=qi1yfmMrvkDtnrN_6toW8qC9BRL78bq7ayWpObJ8Bq4,2961
|
30
30
|
openlit/instrumentation/milvus/milvus.py,sha256=qhKIoggBAJhRctRrBYz69AcvXH-eh7oBn_l9WfxpAjI,9121
|
31
31
|
openlit/instrumentation/mistral/__init__.py,sha256=zJCIpFWRbsYrvooOJYuqwyuKeSOQLWbyXWCObL-Snks,3156
|
32
|
-
openlit/instrumentation/mistral/async_mistral.py,sha256=
|
33
|
-
openlit/instrumentation/mistral/mistral.py,sha256
|
32
|
+
openlit/instrumentation/mistral/async_mistral.py,sha256=uv5P5ow6b78QWJidIXY3Sl6X8re09ITtLRdji2L97Dw,21365
|
33
|
+
openlit/instrumentation/mistral/mistral.py,sha256=-uLlPPl3U3670DWUBetLkoYvT83eJlHPnLxXjr5qI7M,21216
|
34
34
|
openlit/instrumentation/ollama/__init__.py,sha256=cOax8PiypDuo_FC4WvDCYBRo7lH5nV9xU92h7k-eZbg,3812
|
35
35
|
openlit/instrumentation/ollama/async_ollama.py,sha256=ESk1zZTj2hPmkWIH5F2owuoo0apleDSSx5VORlO3e3w,28991
|
36
36
|
openlit/instrumentation/ollama/ollama.py,sha256=PLGF9RB3TRNZ9GSGqeGVvKFBtgUK8Hc8xwvk-3NPeGI,28901
|
37
37
|
openlit/instrumentation/openai/__init__.py,sha256=AZ2cPr3TMKkgGdMl_yXMeSi7bWhtmMqOW1iHdzHHGHA,16265
|
38
|
-
openlit/instrumentation/openai/async_azure_openai.py,sha256=
|
39
|
-
openlit/instrumentation/openai/async_openai.py,sha256=
|
40
|
-
openlit/instrumentation/openai/azure_openai.py,sha256=
|
41
|
-
openlit/instrumentation/openai/openai.py,sha256=
|
38
|
+
openlit/instrumentation/openai/async_azure_openai.py,sha256=Y0HIFwCZ6EAIQ8DgwSkVvDSGd53oohWla00T6tw3BrQ,46302
|
39
|
+
openlit/instrumentation/openai/async_openai.py,sha256=mzMz2j6hYK-mxIgI2fSEoYUPAZroHkv_6pTBI1fRu2c,45844
|
40
|
+
openlit/instrumentation/openai/azure_openai.py,sha256=XUf5eLR1_ggpoWSC46vTBkKXiyNd-_fRxx70XGCHj2M,46096
|
41
|
+
openlit/instrumentation/openai/openai.py,sha256=TWr0U6bZrgurrm5rM7EUgmEk-L_pxIGd_LPTq2K9SUE,46525
|
42
42
|
openlit/instrumentation/pinecone/__init__.py,sha256=Mv9bElqNs07_JQkYyNnO0wOM3hdbprmw7sttdMeKC7g,2526
|
43
43
|
openlit/instrumentation/pinecone/pinecone.py,sha256=0EhLmtOuvwWVvAKh3e56wyd8wzQq1oaLOmF15SVHxVE,8765
|
44
44
|
openlit/instrumentation/qdrant/__init__.py,sha256=OJIg17-IGmBEvBYVKjCHcJ0hFXuEL7XV_jzUTqkolN8,4799
|
45
45
|
openlit/instrumentation/qdrant/qdrant.py,sha256=4uHKYGvWQtRAEVLUWo3o4joJw7hFm2NxVuBu5YKZKiI,14456
|
46
46
|
openlit/instrumentation/transformers/__init__.py,sha256=4GBtjzcJU4XiPexIUYEqF3pNZMeQw4Gm5B-cyumaFjs,1468
|
47
|
-
openlit/instrumentation/transformers/transformers.py,sha256=
|
47
|
+
openlit/instrumentation/transformers/transformers.py,sha256=HCpG-gC5W9F2ekbol3HsuNjXb4jrM_D7YLtHDlV4STc,7604
|
48
48
|
openlit/instrumentation/vertexai/__init__.py,sha256=N3E9HtzefD-zC0fvmfGYiDmSqssoavp_i59wfuYLyMw,6079
|
49
49
|
openlit/instrumentation/vertexai/async_vertexai.py,sha256=PMHYyLf1J4gZpC_-KZ_ZVx1xIHhZDJSNa7mrjNXZ5M0,52372
|
50
50
|
openlit/instrumentation/vertexai/vertexai.py,sha256=UvpNKBHPoV9idVMfGigZnmWuEQiyqSwZn0zK9-U7Lzw,52125
|
51
51
|
openlit/otel/metrics.py,sha256=O7NoaDz0bY19mqpE4-0PcKwEe-B-iJFRgOCaanAuZAc,4291
|
52
52
|
openlit/otel/tracing.py,sha256=vL1ifMbARPBpqK--yXYsCM6y5dSu5LFIKqkhZXtYmUc,3712
|
53
|
-
openlit/semcov/__init__.py,sha256=
|
54
|
-
openlit-1.16.
|
55
|
-
openlit-1.16.
|
56
|
-
openlit-1.16.
|
57
|
-
openlit-1.16.
|
53
|
+
openlit/semcov/__init__.py,sha256=KIKPDAXA29wu6XmHEfDprvlbvf83FJaprsCIbfChfBs,7341
|
54
|
+
openlit-1.16.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
55
|
+
openlit-1.16.1.dist-info/METADATA,sha256=ubp77Y4gtEqfXi6bkDizYsV10mFcFGTWFlnsoby905U,14120
|
56
|
+
openlit-1.16.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
57
|
+
openlit-1.16.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|