tokenr-ruby 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: ce5be4fe9dded47d914a3e7e348b86b807d90a53b75d2e875c7a63e24d52344b
4
+ data.tar.gz: 8b36455f2b65965afd9b304e88c467117b0e3d14abbdf3e3e05449ec34092a69
5
+ SHA512:
6
+ metadata.gz: aeec3f6ee5c391a14f503c666989cd14bb62e1693dcd320a2b695a9f0fbed01c73107909f60a88b4191ce6421308cc375b1ed295e7095e4a8f3f8b3829bcf158
7
+ data.tar.gz: bc7c5d3133f07e146020eb3946a96956563003a068746296537259ba2361e769aab4bb8b39efeb92f601c6c0278c43155a86599846119770c09feba88e1bcf60
data/CHANGELOG.md ADDED
@@ -0,0 +1,15 @@
1
+ # Changelog
2
+
3
+ All notable changes to the Tokenr Ruby SDK will be documented here.
4
+
5
+ ## [0.1.0] - 2025-02-22
6
+
7
+ ### Added
8
+ - Initial release
9
+ - `Tokenr.configure` block-style configuration
10
+ - `Tokenr.track` for manual tracking
11
+ - `Tokenr.track_batch` for bulk tracking
12
+ - `Tokenr::Integrations::OpenAI.wrap` — auto-tracks chat, completions, and embeddings
13
+ - `Tokenr::Integrations::Anthropic.wrap` — auto-tracks messages
14
+ - Async batched delivery via background `Thread` — zero runtime dependencies
15
+ - `Tokenr.costs` and related query helpers
data/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Tokenr
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,231 @@
1
+ # tokenr Ruby SDK
2
+
3
+ **Automatic LLM cost tracking in a few lines of code.**
4
+
5
+ Track costs from OpenAI, Anthropic, and other LLM providers with minimal code changes. Get real-time visibility into spending by agent, feature, team, or any dimension you need.
6
+
7
+ [![Gem Version](https://badge.fury.io/rb/tokenr-ruby.svg)](https://rubygems.org/gems/tokenr-ruby)
8
+
9
+ ## Features
10
+
11
+ - **Minimal setup** — configure once, wrap your client, you're done
12
+ - **Async by default** — batches requests in a background thread; never adds latency
13
+ - **Multi-provider** — OpenAI and Anthropic today; manual tracking for anything else
14
+ - **Rich attribution** — agent, feature, team, and custom tags per request
15
+ - **Production-ready** — tracking failures are silent; your app always runs
16
+
17
+ ## Installation
18
+
19
+ ```ruby
20
+ # Gemfile
21
+ gem "tokenr-ruby"
22
+ ```
23
+
24
+ ```bash
25
+ bundle install
26
+ ```
27
+
28
+ Or install directly:
29
+
30
+ ```bash
31
+ gem install tokenr-ruby
32
+ ```
33
+
34
+ > The gem is named `tokenr-ruby` on RubyGems. Once installed, you `require "tokenr"` as normal.
35
+
36
+ ## Quickstart
37
+
38
+ ### OpenAI
39
+
40
+ ```ruby
41
+ require "openai"
42
+ require "tokenr"
43
+
44
+ Tokenr.configure do |c|
45
+ c.api_key = ENV["TOKENR_TOKEN"]
46
+ c.agent_id = "my-app" # optional default
47
+ end
48
+
49
+ client = OpenAI::Client.new(access_token: ENV["OPENAI_API_KEY"])
50
+ tracked = Tokenr::Integrations::OpenAI.wrap(client)
51
+
52
+ response = tracked.chat(parameters: {
53
+ model: "gpt-4o",
54
+ messages: [{ role: "user", content: "Hello!" }]
55
+ })
56
+ # Cost is automatically tracked to Tokenr
57
+ ```
58
+
59
+ ### Anthropic
60
+
61
+ ```ruby
62
+ require "anthropic"
63
+ require "tokenr"
64
+
65
+ Tokenr.configure do |c|
66
+ c.api_key = ENV["TOKENR_TOKEN"]
67
+ end
68
+
69
+ client = Anthropic::Client.new(api_key: ENV["ANTHROPIC_API_KEY"])
70
+ tracked = Tokenr::Integrations::Anthropic.wrap(client)
71
+
72
+ response = tracked.messages(
73
+ model: "claude-opus-4-5",
74
+ max_tokens: 1024,
75
+ messages: [{ role: "user", content: "Hello!" }]
76
+ )
77
+ # Automatically tracked!
78
+ ```
79
+
80
+ ## Configuration
81
+
82
+ ### Environment Variables
83
+
84
+ ```bash
85
+ export TOKENR_TOKEN="your-token"
86
+ ```
87
+
88
+ ```ruby
89
+ Tokenr.configure do |c|
90
+ c.api_key = ENV["TOKENR_TOKEN"] # or it's read automatically
91
+ end
92
+ ```
93
+
94
+ ### All Options
95
+
96
+ ```ruby
97
+ Tokenr.configure do |c|
98
+ c.api_key = ENV["TOKENR_TOKEN"] # required
99
+ c.agent_id = "my-app" # default agent ID for all requests
100
+ c.team_id = nil # default team ID
101
+ c.default_tags = { environment: "prod" } # merged into every request
102
+ c.async = true # send in background (recommended)
103
+ c.batch_size = 100 # flush after this many queued events
104
+ c.flush_interval = 5 # flush every N seconds
105
+ end
106
+ ```
107
+
108
+ ### Disable in Development
109
+
110
+ ```ruby
111
+ Tokenr.configure do |c|
112
+ c.api_key = ENV["TOKENR_TOKEN"]
113
+ c.async = ENV["RAILS_ENV"] == "production"
114
+ end
115
+ ```
116
+
117
+ ## Advanced Usage
118
+
119
+ ### Track by Agent
120
+
121
+ ```ruby
122
+ # Option 1: default at configure time
123
+ Tokenr.configure { |c| c.agent_id = "support-bot" }
124
+
125
+ # Option 2: per-wrapper
126
+ tracked = Tokenr::Integrations::OpenAI.wrap(client, agent_id: "sales-bot")
127
+ ```
128
+
129
+ ### Track by Feature
130
+
131
+ ```ruby
132
+ tracked = Tokenr::Integrations::OpenAI.wrap(client,
133
+ agent_id: "support-bot",
134
+ feature_name: "ticket-summary"
135
+ )
136
+ ```
137
+
138
+ ### Multi-Tenant Tracking
139
+
140
+ ```ruby
141
+ # Wrap with a team_id to roll up costs per customer/team
142
+ def ai_client_for(team)
143
+ Tokenr::Integrations::OpenAI.wrap(
144
+ base_client,
145
+ agent_id: "shared-bot",
146
+ tags: { team_id: team.id, plan: team.plan }
147
+ )
148
+ end
149
+ ```
150
+
151
+ ### Custom Tags
152
+
153
+ ```ruby
154
+ tracked = Tokenr::Integrations::Anthropic.wrap(client,
155
+ tags: { customer_id: "cust_123", language: "es" }
156
+ )
157
+ ```
158
+
159
+ ### Manual Tracking
160
+
161
+ For providers without a built-in integration, or when you want explicit control:
162
+
163
+ ```ruby
164
+ Tokenr.track(
165
+ provider: "cohere",
166
+ model: "command-r-plus",
167
+ input_tokens: 1200,
168
+ output_tokens: 400,
169
+ agent_id: "research-bot",
170
+ feature_name: "summarization",
171
+ latency_ms: 320
172
+ )
173
+ ```
174
+
175
+ ### Querying Costs
176
+
177
+ ```ruby
178
+ # Costs for the last 7 days
179
+ Tokenr.costs(start_date: 7.days.ago.iso8601, end_date: Time.now.iso8601)
180
+
181
+ # Grouped by agent
182
+ Tokenr.client.get_costs_by_agent(limit: 20)
183
+
184
+ # Time-series
185
+ Tokenr.client.get_timeseries(interval: "day")
186
+ ```
187
+
188
+ ## How It Works
189
+
190
+ 1. `Tokenr::Integrations::OpenAI.wrap(client)` returns a thin wrapper around your existing client
191
+ 2. After each call the wrapper reads token counts from the response `usage` field
192
+ 3. Events are pushed onto an in-process queue and flushed to Tokenr in the background
193
+ 4. If tracking fails for any reason, the exception is swallowed — your app is unaffected
194
+ 5. On process exit, `at_exit` flushes any remaining queued events
195
+
196
+ ## Supported Providers
197
+
198
+ | Provider | Auto-Tracking | Manual Tracking |
199
+ |-----------|:-------------:|:---------------:|
200
+ | OpenAI | Yes | Yes |
201
+ | Anthropic | Yes | Yes |
202
+ | Cohere | Coming soon | Yes |
203
+ | Custom | — | Yes |
204
+
205
+ ## Getting Your API Token
206
+
207
+ 1. Sign up at [tokenr.co](https://tokenr.co)
208
+ 2. Go to **API Tokens** and create a token
209
+ 3. Copy it — shown only once
210
+
211
+ ```bash
212
+ export TOKENR_TOKEN="your-token-here"
213
+ ```
214
+
215
+ ## Security
216
+
217
+ This SDK is open source so you can audit exactly what data is sent and when. The short version:
218
+
219
+ - Only token counts, model names, and your attribution metadata are transmitted
220
+ - No prompt content or response content ever leaves your application
221
+ - All requests use HTTPS
222
+ - Tracking runs on a background thread and cannot block your main thread
223
+
224
+ ## License
225
+
226
+ MIT — see [LICENSE.txt](LICENSE.txt)
227
+
228
+ ## Support
229
+
230
+ - Issues: [github.com/tokenr-co/tokenr-ruby/issues](https://github.com/tokenr-co/tokenr-ruby/issues)
231
+ - Email: support@tokenr.co
@@ -0,0 +1,113 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "net/http"
4
+ require "json"
5
+ require "uri"
6
+
7
+ module Tokenr
8
+ class Client
9
+ attr_reader :config
10
+
11
+ def initialize(config)
12
+ @config = config
13
+ end
14
+
15
+ def track(data)
16
+ config.validate!
17
+ post("/api/v1/track", data)
18
+ end
19
+
20
+ def track_batch(requests)
21
+ config.validate!
22
+ post("/api/v1/track/batch", { requests: requests })
23
+ end
24
+
25
+ def get_costs(start_date: nil, end_date: nil, agent_id: nil, model: nil, group_by: nil)
26
+ config.validate!
27
+ params = {}
28
+ params[:start_date] = start_date if start_date
29
+ params[:end_date] = end_date if end_date
30
+ params[:agent_id] = agent_id if agent_id
31
+ params[:model] = model if model
32
+ params[:group_by] = group_by if group_by
33
+ get("/api/v1/costs", params)
34
+ end
35
+
36
+ def get_costs_by_agent(limit: 50)
37
+ config.validate!
38
+ get("/api/v1/costs/agents", { limit: limit })
39
+ end
40
+
41
+ def get_costs_by_model
42
+ config.validate!
43
+ get("/api/v1/costs/models")
44
+ end
45
+
46
+ def get_timeseries(interval: "day", **filters)
47
+ config.validate!
48
+ get("/api/v1/costs/timeseries", filters.merge(interval: interval))
49
+ end
50
+
51
+ def token_info
52
+ config.validate!
53
+ get("/api/v1/tokens/me")
54
+ end
55
+
56
+ private
57
+
58
+ def post(path, body)
59
+ request(:post, path, body)
60
+ end
61
+
62
+ def get(path, params = {})
63
+ request(:get, path, nil, params)
64
+ end
65
+
66
+ def request(method, path, body = nil, params = {})
67
+ uri = build_uri(path, params)
68
+ http = Net::HTTP.new(uri.host, uri.port)
69
+ http.use_ssl = uri.scheme == "https"
70
+ http.open_timeout = 5
71
+ http.read_timeout = 30
72
+
73
+ req = build_request(method, uri, body)
74
+ handle_response(http.request(req))
75
+ end
76
+
77
+ def build_uri(path, params = {})
78
+ uri = URI.join(config.api_base, path)
79
+ uri.query = URI.encode_www_form(params) if params.any?
80
+ uri
81
+ end
82
+
83
+ def build_request(method, uri, body)
84
+ req = case method
85
+ when :post then Net::HTTP::Post.new(uri)
86
+ when :get then Net::HTTP::Get.new(uri)
87
+ else raise ArgumentError, "Unsupported HTTP method: #{method}"
88
+ end
89
+
90
+ req["Authorization"] = "Bearer #{config.api_key}"
91
+ req["Content-Type"] = "application/json"
92
+ req["User-Agent"] = "tokenr-ruby/#{VERSION}"
93
+ req.body = body.to_json if body
94
+ req
95
+ end
96
+
97
+ def handle_response(response)
98
+ case response.code.to_i
99
+ when 200..299
100
+ JSON.parse(response.body, symbolize_names: true)
101
+ when 401
102
+ raise ApiError, "Unauthorized: invalid API key"
103
+ when 403
104
+ raise ApiError, "Forbidden: insufficient token permissions"
105
+ when 422
106
+ data = JSON.parse(response.body, symbolize_names: true)
107
+ raise ApiError, "Validation error: #{data[:error]}"
108
+ else
109
+ raise ApiError, "API error (#{response.code}): #{response.body}"
110
+ end
111
+ end
112
+ end
113
+ end
@@ -0,0 +1,27 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Tokenr
4
+ class Configuration
5
+ attr_accessor :api_key, :api_base, :agent_id, :team_id, :default_tags,
6
+ :async, :batch_size, :flush_interval
7
+
8
+ def initialize
9
+ @api_key = ENV["TOKENR_TOKEN"]
10
+ @api_base = ENV["TOKENR_API_BASE"] || "https://tokenr.co"
11
+ @agent_id = nil
12
+ @team_id = nil
13
+ @default_tags = {}
14
+ @async = true
15
+ @batch_size = 100
16
+ @flush_interval = 5
17
+ end
18
+
19
+ def valid?
20
+ !api_key.nil? && !api_key.empty?
21
+ end
22
+
23
+ def validate!
24
+ raise ConfigurationError, "API key is required. Set TOKENR_TOKEN or pass api_key to Tokenr.configure." unless valid?
25
+ end
26
+ end
27
+ end
@@ -0,0 +1,79 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Tokenr
4
+ module Integrations
5
+ # Wrap an Anthropic client to automatically track costs.
6
+ #
7
+ # Usage:
8
+ # require "anthropic"
9
+ # require "tokenr"
10
+ #
11
+ # Tokenr.configure { |c| c.api_key = ENV["TOKENR_TOKEN"] }
12
+ #
13
+ # client = Anthropic::Client.new(api_key: ENV["ANTHROPIC_API_KEY"])
14
+ # tracked = Tokenr::Integrations::Anthropic.wrap(client, agent_id: "my-bot")
15
+ #
16
+ # response = tracked.messages(
17
+ # model: "claude-opus-4-5",
18
+ # max_tokens: 1024,
19
+ # messages: [{ role: "user", content: "Hello!" }]
20
+ # )
21
+ # # Cost is tracked automatically — no other changes needed.
22
+ #
23
+ module Anthropic
24
+ class << self
25
+ def wrap(client, agent_id: nil, feature_name: nil, tags: {})
26
+ Wrapper.new(client, agent_id: agent_id, feature_name: feature_name, tags: tags)
27
+ end
28
+ end
29
+
30
+ class Wrapper
31
+ attr_reader :client, :agent_id, :feature_name, :tags
32
+
33
+ def initialize(client, agent_id: nil, feature_name: nil, tags: {})
34
+ @client = client
35
+ @agent_id = agent_id
36
+ @feature_name = feature_name
37
+ @tags = tags
38
+ end
39
+
40
+ def messages(model:, messages:, **params)
41
+ start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
42
+ response = client.messages(model: model, messages: messages, **params)
43
+ latency = ((Process.clock_gettime(Process::CLOCK_MONOTONIC) - start) * 1000).round
44
+
45
+ track_response(model, response, latency)
46
+ response
47
+ end
48
+
49
+ def method_missing(method, *args, **kwargs, &block)
50
+ client.send(method, *args, **kwargs, &block)
51
+ end
52
+
53
+ def respond_to_missing?(method, include_private = false)
54
+ client.respond_to?(method, include_private)
55
+ end
56
+
57
+ private
58
+
59
+ def track_response(model, response, latency_ms)
60
+ usage = response.usage
61
+ return unless usage
62
+
63
+ Tokenr.track(
64
+ model: model,
65
+ provider: "anthropic",
66
+ input_tokens: usage.input_tokens || 0,
67
+ output_tokens: usage.output_tokens || 0,
68
+ latency_ms: latency_ms,
69
+ agent_id: agent_id,
70
+ feature_name: feature_name,
71
+ tags: tags
72
+ )
73
+ rescue StandardError
74
+ # Never let tracking errors surface to the caller
75
+ end
76
+ end
77
+ end
78
+ end
79
+ end
@@ -0,0 +1,103 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Tokenr
4
+ module Integrations
5
+ # Wrap an OpenAI client to automatically track costs.
6
+ #
7
+ # Usage:
8
+ # require "openai"
9
+ # require "tokenr"
10
+ #
11
+ # Tokenr.configure { |c| c.api_key = ENV["TOKENR_TOKEN"] }
12
+ #
13
+ # client = OpenAI::Client.new(access_token: ENV["OPENAI_API_KEY"])
14
+ # tracked = Tokenr::Integrations::OpenAI.wrap(client, agent_id: "my-bot")
15
+ #
16
+ # response = tracked.chat(parameters: { model: "gpt-4o", messages: [...] })
17
+ # # Cost is tracked automatically — no other changes needed.
18
+ #
19
+ module OpenAI
20
+ class << self
21
+ def wrap(client, agent_id: nil, feature_name: nil, tags: {})
22
+ Wrapper.new(client, agent_id: agent_id, feature_name: feature_name, tags: tags)
23
+ end
24
+ end
25
+
26
+ class Wrapper
27
+ attr_reader :client, :agent_id, :feature_name, :tags
28
+
29
+ def initialize(client, agent_id: nil, feature_name: nil, tags: {})
30
+ @client = client
31
+ @agent_id = agent_id
32
+ @feature_name = feature_name
33
+ @tags = tags
34
+ end
35
+
36
+ def chat(parameters:)
37
+ timed { client.chat(parameters: parameters) } do |response, latency_ms|
38
+ track_chat_response(parameters[:model], response, latency_ms)
39
+ end
40
+ end
41
+
42
+ def completions(parameters:)
43
+ timed { client.completions(parameters: parameters) } do |response, latency_ms|
44
+ track_chat_response(parameters[:model], response, latency_ms)
45
+ end
46
+ end
47
+
48
+ def embeddings(parameters:)
49
+ timed { client.embeddings(parameters: parameters) } do |response, latency_ms|
50
+ usage = response.dig("usage")
51
+ next unless usage
52
+
53
+ Tokenr.track(
54
+ model: parameters[:model],
55
+ provider: "openai",
56
+ input_tokens: usage["prompt_tokens"] || 0,
57
+ output_tokens: 0,
58
+ latency_ms: latency_ms,
59
+ agent_id: agent_id,
60
+ feature_name: feature_name,
61
+ tags: tags
62
+ )
63
+ end
64
+ end
65
+
66
+ # Pass any other methods straight through to the underlying client.
67
+ def method_missing(method, *args, **kwargs, &block)
68
+ client.send(method, *args, **kwargs, &block)
69
+ end
70
+
71
+ def respond_to_missing?(method, include_private = false)
72
+ client.respond_to?(method, include_private)
73
+ end
74
+
75
+ private
76
+
77
+ def timed
78
+ start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
79
+ response = yield
80
+ latency = ((Process.clock_gettime(Process::CLOCK_MONOTONIC) - start) * 1000).round
81
+ yield response, latency if block_given?
82
+ response
83
+ end
84
+
85
+ def track_chat_response(model, response, latency_ms)
86
+ usage = response.dig("usage")
87
+ return unless usage
88
+
89
+ Tokenr.track(
90
+ model: model,
91
+ provider: "openai",
92
+ input_tokens: usage["prompt_tokens"] || 0,
93
+ output_tokens: usage["completion_tokens"] || 0,
94
+ latency_ms: latency_ms,
95
+ agent_id: agent_id,
96
+ feature_name: feature_name,
97
+ tags: tags
98
+ )
99
+ end
100
+ end
101
+ end
102
+ end
103
+ end
@@ -0,0 +1,96 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Tokenr
4
+ class Tracker
5
+ attr_reader :client, :queue
6
+
7
+ def initialize(client)
8
+ @client = client
9
+ @queue = []
10
+ @mutex = Mutex.new
11
+ @flusher = nil
12
+ start_flusher if client.config.async
13
+ end
14
+
15
+ def track(model:, input_tokens:, output_tokens:, **options)
16
+ data = build_request(model, input_tokens, output_tokens, options)
17
+
18
+ if client.config.async
19
+ enqueue(data)
20
+ else
21
+ client.track(data)
22
+ end
23
+ end
24
+
25
+ def track_batch(requests)
26
+ formatted = requests.map do |r|
27
+ build_request(r[:model], r[:input_tokens], r[:output_tokens], r)
28
+ end
29
+
30
+ if client.config.async
31
+ formatted.each { |r| enqueue(r) }
32
+ else
33
+ client.track_batch(formatted)
34
+ end
35
+ end
36
+
37
+ def flush
38
+ to_send = nil
39
+ @mutex.synchronize do
40
+ return if @queue.empty?
41
+ to_send = @queue.dup
42
+ @queue.clear
43
+ end
44
+
45
+ to_send.each_slice(client.config.batch_size) do |batch|
46
+ client.track_batch(batch)
47
+ rescue StandardError => e
48
+ warn "[Tokenr] Flush error: #{e.message}"
49
+ end
50
+ end
51
+
52
+ def shutdown
53
+ @flusher&.kill
54
+ flush
55
+ end
56
+
57
+ private
58
+
59
+ def build_request(model, input_tokens, output_tokens, options)
60
+ config = client.config
61
+ {
62
+ model: model,
63
+ input_tokens: input_tokens,
64
+ output_tokens: output_tokens,
65
+ agent_id: options[:agent_id] || config.agent_id,
66
+ team_id: options[:team_id] || config.team_id,
67
+ feature_name: options[:feature_name],
68
+ provider: options[:provider],
69
+ latency_ms: options[:latency_ms],
70
+ status: options[:status] || "success",
71
+ external_id: options[:external_id],
72
+ total_cost: options[:total_cost],
73
+ requested_at: options[:requested_at] || Time.now.iso8601,
74
+ tags: config.default_tags.merge(options[:tags] || {}),
75
+ metrics: options[:metrics]
76
+ }.compact
77
+ end
78
+
79
+ def enqueue(data)
80
+ @mutex.synchronize { @queue << data }
81
+ flush if @queue.size >= client.config.batch_size
82
+ end
83
+
84
+ def start_flusher
85
+ interval = client.config.flush_interval
86
+ @flusher = Thread.new do
87
+ loop do
88
+ sleep interval
89
+ flush
90
+ end
91
+ end
92
+ @flusher.abort_on_exception = false
93
+ at_exit { shutdown }
94
+ end
95
+ end
96
+ end
@@ -0,0 +1,5 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Tokenr
4
+ VERSION = "0.1.0"
5
+ end
data/lib/tokenr.rb ADDED
@@ -0,0 +1,60 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "tokenr/version"
4
+ require_relative "tokenr/configuration"
5
+ require_relative "tokenr/client"
6
+ require_relative "tokenr/tracker"
7
+ require_relative "tokenr/integrations/openai"
8
+ require_relative "tokenr/integrations/anthropic"
9
+
10
+ module Tokenr
11
+ class Error < StandardError; end
12
+ class ConfigurationError < Error; end
13
+ class ApiError < Error; end
14
+
15
+ class << self
16
+ attr_accessor :configuration
17
+
18
+ # Configure the SDK.
19
+ #
20
+ # Tokenr.configure do |c|
21
+ # c.api_key = ENV["TOKENR_TOKEN"]
22
+ # c.agent_id = "my-app"
23
+ # end
24
+ #
25
+ def configure
26
+ self.configuration ||= Configuration.new
27
+ yield(configuration)
28
+ end
29
+
30
+ def client
31
+ @client ||= Client.new(configuration)
32
+ end
33
+
34
+ def tracker
35
+ @tracker ||= Tracker.new(client)
36
+ end
37
+
38
+ # Track a single LLM request.
39
+ def track(model:, input_tokens:, output_tokens:, **options)
40
+ tracker.track(model: model, input_tokens: input_tokens, output_tokens: output_tokens, **options)
41
+ end
42
+
43
+ # Track multiple LLM requests in one API call.
44
+ def track_batch(requests)
45
+ tracker.track_batch(requests)
46
+ end
47
+
48
+ # Query cost data from the Tokenr API.
49
+ def costs(**options)
50
+ client.get_costs(**options)
51
+ end
52
+
53
+ # Reset all module-level state (useful in tests).
54
+ def reset!
55
+ @client = nil
56
+ @tracker = nil
57
+ @configuration = nil
58
+ end
59
+ end
60
+ end
metadata ADDED
@@ -0,0 +1,56 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: tokenr-ruby
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - Tokenr
8
+ bindir: bin
9
+ cert_chain: []
10
+ date: 1980-01-02 00:00:00.000000000 Z
11
+ dependencies: []
12
+ description: Track and attribute LLM costs across OpenAI, Anthropic, and other providers.
13
+ Async by default, never slows down your app. Supports per-request attribution by
14
+ agent, feature, team, and custom tags.
15
+ email:
16
+ - support@tokenr.co
17
+ executables: []
18
+ extensions: []
19
+ extra_rdoc_files: []
20
+ files:
21
+ - CHANGELOG.md
22
+ - LICENSE.txt
23
+ - README.md
24
+ - lib/tokenr.rb
25
+ - lib/tokenr/client.rb
26
+ - lib/tokenr/configuration.rb
27
+ - lib/tokenr/integrations/anthropic.rb
28
+ - lib/tokenr/integrations/openai.rb
29
+ - lib/tokenr/tracker.rb
30
+ - lib/tokenr/version.rb
31
+ homepage: https://tokenr.co
32
+ licenses:
33
+ - MIT
34
+ metadata:
35
+ homepage_uri: https://tokenr.co
36
+ source_code_uri: https://github.com/tokenr-co/tokenr-ruby
37
+ changelog_uri: https://github.com/tokenr-co/tokenr-ruby/blob/main/CHANGELOG.md
38
+ rubygems_mfa_required: 'true'
39
+ rdoc_options: []
40
+ require_paths:
41
+ - lib
42
+ required_ruby_version: !ruby/object:Gem::Requirement
43
+ requirements:
44
+ - - ">="
45
+ - !ruby/object:Gem::Version
46
+ version: '3.0'
47
+ required_rubygems_version: !ruby/object:Gem::Requirement
48
+ requirements:
49
+ - - ">="
50
+ - !ruby/object:Gem::Version
51
+ version: '0'
52
+ requirements: []
53
+ rubygems_version: 3.6.9
54
+ specification_version: 4
55
+ summary: Automatic LLM cost tracking for OpenAI, Anthropic, and more
56
+ test_files: []