openlit 1.33.12__py3-none-any.whl → 1.33.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +2 -18
- openlit/instrumentation/openai/async_openai.py +3 -5
- openlit/instrumentation/openai/openai.py +3 -5
- {openlit-1.33.12.dist-info → openlit-1.33.13.dist-info}/METADATA +1 -2
- {openlit-1.33.12.dist-info → openlit-1.33.13.dist-info}/RECORD +7 -7
- {openlit-1.33.12.dist-info → openlit-1.33.13.dist-info}/LICENSE +0 -0
- {openlit-1.33.12.dist-info → openlit-1.33.13.dist-info}/WHEEL +0 -0
openlit/__helpers.py
CHANGED
@@ -7,8 +7,8 @@ import json
|
|
7
7
|
import logging
|
8
8
|
from urllib.parse import urlparse
|
9
9
|
from typing import Any, Dict, List, Tuple
|
10
|
+
import math
|
10
11
|
import requests
|
11
|
-
import tiktoken
|
12
12
|
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
13
13
|
from opentelemetry.trace import Status, StatusCode
|
14
14
|
from opentelemetry._events import Event
|
@@ -45,28 +45,12 @@ def get_env_variable(name, arg_value, error_message):
|
|
45
45
|
raise RuntimeError(error_message)
|
46
46
|
return value
|
47
47
|
|
48
|
-
def openai_tokens(text, model):
|
49
|
-
"""
|
50
|
-
Calculate the number of tokens a given text would take up for a specified model.
|
51
|
-
"""
|
52
|
-
|
53
|
-
try:
|
54
|
-
encoding = tiktoken.encoding_for_model(model)
|
55
|
-
except:
|
56
|
-
encoding = tiktoken.get_encoding("cl100k_base")
|
57
|
-
|
58
|
-
num_tokens = len(encoding.encode(text))
|
59
|
-
return num_tokens
|
60
|
-
|
61
48
|
def general_tokens(text):
|
62
49
|
"""
|
63
50
|
Calculate the number of tokens a given text would take up.
|
64
51
|
"""
|
65
52
|
|
66
|
-
|
67
|
-
|
68
|
-
num_tokens = len(encoding.encode(text))
|
69
|
-
return num_tokens
|
53
|
+
return math.ceil(len(text) / 2)
|
70
54
|
|
71
55
|
def get_chat_model_cost(model, pricing_info, prompt_tokens, completion_tokens):
|
72
56
|
"""
|
@@ -11,7 +11,7 @@ from openlit.__helpers import (
|
|
11
11
|
get_embed_model_cost,
|
12
12
|
get_audio_model_cost,
|
13
13
|
get_image_model_cost,
|
14
|
-
|
14
|
+
general_tokens,
|
15
15
|
handle_exception,
|
16
16
|
response_as_dict,
|
17
17
|
calculate_ttft,
|
@@ -151,10 +151,8 @@ def async_chat_completions(version, environment, application_name,
|
|
151
151
|
request_model = self._kwargs.get("model", "gpt-4o")
|
152
152
|
|
153
153
|
# Calculate tokens using input prompt and aggregated response
|
154
|
-
input_tokens =
|
155
|
-
|
156
|
-
output_tokens = openai_tokens(self._llmresponse,
|
157
|
-
request_model)
|
154
|
+
input_tokens = general_tokens(prompt)
|
155
|
+
output_tokens = general_tokens(self._llmresponse)
|
158
156
|
|
159
157
|
# Calculate cost of the operation
|
160
158
|
cost = get_chat_model_cost(request_model,
|
@@ -11,7 +11,7 @@ from openlit.__helpers import (
|
|
11
11
|
get_embed_model_cost,
|
12
12
|
get_audio_model_cost,
|
13
13
|
get_image_model_cost,
|
14
|
-
|
14
|
+
general_tokens,
|
15
15
|
handle_exception,
|
16
16
|
response_as_dict,
|
17
17
|
calculate_ttft,
|
@@ -151,10 +151,8 @@ def chat_completions(version, environment, application_name,
|
|
151
151
|
request_model = self._kwargs.get("model", "gpt-4o")
|
152
152
|
|
153
153
|
# Calculate tokens using input prompt and aggregated response
|
154
|
-
input_tokens =
|
155
|
-
|
156
|
-
output_tokens = openai_tokens(self._llmresponse,
|
157
|
-
request_model)
|
154
|
+
input_tokens = general_tokens(prompt)
|
155
|
+
output_tokens = general_tokens(self._llmresponse)
|
158
156
|
|
159
157
|
# Calculate cost of the operation
|
160
158
|
cost = get_chat_model_cost(request_model,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.33.
|
3
|
+
Version: 1.33.13
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -25,7 +25,6 @@ Requires-Dist: opentelemetry-sdk (>=1.27.0,<2.0.0)
|
|
25
25
|
Requires-Dist: pydantic (>=2.0.0,<3.0.0)
|
26
26
|
Requires-Dist: requests (>=2.26.0,<3.0.0)
|
27
27
|
Requires-Dist: schedule (>=1.2.2,<2.0.0)
|
28
|
-
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
29
28
|
Requires-Dist: xmltodict (>=0.13.0,<0.14.0)
|
30
29
|
Project-URL: Homepage, https://github.com/openlit/openlit/tree/main/openlit/python
|
31
30
|
Project-URL: Repository, https://github.com/openlit/openlit/tree/main/openlit/python
|
@@ -1,4 +1,4 @@
|
|
1
|
-
openlit/__helpers.py,sha256=
|
1
|
+
openlit/__helpers.py,sha256=9K9nz_RunwtnFeAk591uOJZiY3J88HsYv7T2H8elHWA,10262
|
2
2
|
openlit/__init__.py,sha256=87uE_wi6YF2FKkmVy0VLmRKeNMuJ9e6XrycIbad9T6A,23755
|
3
3
|
openlit/evals/__init__.py,sha256=nJe99nuLo1b5rf7pt9U9BCdSDedzbVi2Fj96cgl7msM,380
|
4
4
|
openlit/evals/all.py,sha256=oWrue3PotE-rB5WePG3MRYSA-ro6WivkclSHjYlAqGs,7154
|
@@ -92,8 +92,8 @@ openlit/instrumentation/ollama/async_ollama.py,sha256=LhDQPy3wLyNO9JWksUEeCx-DK9
|
|
92
92
|
openlit/instrumentation/ollama/ollama.py,sha256=wVyaX0quoiiCj1J3tyTiQx5Du5CmaWmt9e_lpCr7s6A,6434
|
93
93
|
openlit/instrumentation/ollama/utils.py,sha256=zXsWNqfnZLssrcb-GNbWeZeqTKVzQb1bes8vzgl-gbQ,14549
|
94
94
|
openlit/instrumentation/openai/__init__.py,sha256=dfgMBHd2wAT24uckVBBqTy7pzN34ESzeymKzkUy6t58,4893
|
95
|
-
openlit/instrumentation/openai/async_openai.py,sha256=
|
96
|
-
openlit/instrumentation/openai/openai.py,sha256=
|
95
|
+
openlit/instrumentation/openai/async_openai.py,sha256=DtY26K-mNXqVAIGb_md3V3nVnGzNDn82anw9qzA3RKo,50500
|
96
|
+
openlit/instrumentation/openai/openai.py,sha256=79ydZrzRDd_44_3WtZ7U68I3HMvTkKysKglyMhjPcNQ,50334
|
97
97
|
openlit/instrumentation/phidata/__init__.py,sha256=tqls5-UI6FzbjxYgq_qqAfALhWJm8dHn2NtgqiQA4f8,1557
|
98
98
|
openlit/instrumentation/phidata/phidata.py,sha256=-BU_g3FpGcttOt-W-QIER5qquCRORob2UFLdaOW3F_s,4819
|
99
99
|
openlit/instrumentation/pinecone/__init__.py,sha256=0guSEPmObaZiOF8yHExpOGY-qW_egHXfZGog3rKGi8M,2596
|
@@ -120,7 +120,7 @@ openlit/otel/events.py,sha256=VrMjTpvnLtYRBHCiFwJojTQqqNpRCxoD4yJYeQrtPsk,3560
|
|
120
120
|
openlit/otel/metrics.py,sha256=URL7gzQbnxaNQJSX7oHRa15v6xi1GFmANn-5uFNL-aY,6378
|
121
121
|
openlit/otel/tracing.py,sha256=tjV2bEbEDPUB1Z46gE-UsJsb04sRdFrfbhIDkxViZc0,3103
|
122
122
|
openlit/semcov/__init__.py,sha256=kUd-ZSmXkXBo-osVve4ce_XEgr0fgEN7nXxoNm7kfEQ,12798
|
123
|
-
openlit-1.33.
|
124
|
-
openlit-1.33.
|
125
|
-
openlit-1.33.
|
126
|
-
openlit-1.33.
|
123
|
+
openlit-1.33.13.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
124
|
+
openlit-1.33.13.dist-info/METADATA,sha256=qzSNDWBO7S4WOizt3IIR8LSzPDvmttzZggT7OfSqHpU,23471
|
125
|
+
openlit-1.33.13.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
126
|
+
openlit-1.33.13.dist-info/RECORD,,
|
File without changes
|
File without changes
|