tokenr-ruby 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/tokenr/integrations/openai.rb +23 -25
- data/lib/tokenr/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 123b21c4a01928c071ed6342f10febbc57d10a48103463f2cef0c0a9b596915f
|
|
4
|
+
data.tar.gz: '008e8aee3a0ef09e2e0cdfb3dcf077716daa34d8d95c153a2ff3eed2f3381cbc'
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 38b6597c2dfb33c03438d2e225c7e0380bfad3870d91fe918a24ef0c61cdfd7fe4ba0fd775c788064eec8929ffbdaead10882402cb7e6249ff4ebb8246bbce64
|
|
7
|
+
data.tar.gz: 5b0782571b01576d11cd81d9b662a4b2df398d382fa595098446bb5765dac9b4963d68bc2ed35b3a5a623fc878a33225398d44995d3db4640eb00aba9300aef8
|
|
@@ -34,33 +34,39 @@ module Tokenr
|
|
|
34
34
|
end
|
|
35
35
|
|
|
36
36
|
def chat(parameters:)
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
37
|
+
start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
38
|
+
response = client.chat(parameters: parameters)
|
|
39
|
+
latency = ((Process.clock_gettime(Process::CLOCK_MONOTONIC) - start) * 1000).round
|
|
40
|
+
track_chat_response(parameters[:model], response, latency)
|
|
41
|
+
response
|
|
40
42
|
end
|
|
41
43
|
|
|
42
44
|
def completions(parameters:)
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
45
|
+
start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
46
|
+
response = client.completions(parameters: parameters)
|
|
47
|
+
latency = ((Process.clock_gettime(Process::CLOCK_MONOTONIC) - start) * 1000).round
|
|
48
|
+
track_chat_response(parameters[:model], response, latency)
|
|
49
|
+
response
|
|
46
50
|
end
|
|
47
51
|
|
|
48
52
|
def embeddings(parameters:)
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
+
start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
54
|
+
response = client.embeddings(parameters: parameters)
|
|
55
|
+
latency = ((Process.clock_gettime(Process::CLOCK_MONOTONIC) - start) * 1000).round
|
|
56
|
+
usage = response.dig("usage")
|
|
57
|
+
if usage
|
|
53
58
|
Tokenr.track(
|
|
54
|
-
model:
|
|
55
|
-
provider:
|
|
56
|
-
input_tokens:
|
|
59
|
+
model: parameters[:model],
|
|
60
|
+
provider: "openai",
|
|
61
|
+
input_tokens: usage["prompt_tokens"] || 0,
|
|
57
62
|
output_tokens: 0,
|
|
58
|
-
latency_ms:
|
|
59
|
-
agent_id:
|
|
60
|
-
feature_name:
|
|
61
|
-
tags:
|
|
63
|
+
latency_ms: latency,
|
|
64
|
+
agent_id: agent_id,
|
|
65
|
+
feature_name: feature_name,
|
|
66
|
+
tags: tags
|
|
62
67
|
)
|
|
63
68
|
end
|
|
69
|
+
response
|
|
64
70
|
end
|
|
65
71
|
|
|
66
72
|
# Pass any other methods straight through to the underlying client.
|
|
@@ -74,14 +80,6 @@ module Tokenr
|
|
|
74
80
|
|
|
75
81
|
private
|
|
76
82
|
|
|
77
|
-
def timed
|
|
78
|
-
start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
79
|
-
response = yield
|
|
80
|
-
latency = ((Process.clock_gettime(Process::CLOCK_MONOTONIC) - start) * 1000).round
|
|
81
|
-
yield response, latency if block_given?
|
|
82
|
-
response
|
|
83
|
-
end
|
|
84
|
-
|
|
85
83
|
def track_chat_response(model, response, latency_ms)
|
|
86
84
|
usage = response.dig("usage")
|
|
87
85
|
return unless usage
|
data/lib/tokenr/version.rb
CHANGED
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: tokenr-ruby
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.1.
|
|
4
|
+
version: 0.1.1
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Tokenr
|
|
@@ -50,7 +50,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
|
50
50
|
- !ruby/object:Gem::Version
|
|
51
51
|
version: '0'
|
|
52
52
|
requirements: []
|
|
53
|
-
rubygems_version:
|
|
53
|
+
rubygems_version: 4.0.6
|
|
54
54
|
specification_version: 4
|
|
55
55
|
summary: Automatic LLM cost tracking for OpenAI, Anthropic, and more
|
|
56
56
|
test_files: []
|