llm_cost_tracker 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +20 -0
- data/README.md +111 -68
- data/Rakefile +2 -0
- data/app/controllers/llm_cost_tracker/assets_controller.rb +1 -2
- data/app/helpers/llm_cost_tracker/dashboard_filter_helper.rb +6 -1
- data/app/services/llm_cost_tracker/dashboard/data_quality.rb +16 -1
- data/app/services/llm_cost_tracker/dashboard/filter.rb +22 -0
- data/app/views/llm_cost_tracker/calls/index.html.erb +10 -0
- data/app/views/llm_cost_tracker/dashboard/index.html.erb +10 -0
- data/app/views/llm_cost_tracker/data_quality/index.html.erb +46 -0
- data/lib/llm_cost_tracker/assets.rb +6 -11
- data/lib/llm_cost_tracker/configuration.rb +78 -42
- data/lib/llm_cost_tracker/event.rb +2 -0
- data/lib/llm_cost_tracker/generators/llm_cost_tracker/add_streaming_generator.rb +29 -0
- data/lib/llm_cost_tracker/generators/llm_cost_tracker/templates/add_streaming_to_llm_api_calls.rb.erb +25 -0
- data/lib/llm_cost_tracker/generators/llm_cost_tracker/templates/create_llm_api_calls.rb.erb +4 -0
- data/lib/llm_cost_tracker/generators/llm_cost_tracker/templates/llm_cost_tracker_prices.yml.erb +8 -1
- data/lib/llm_cost_tracker/llm_api_call.rb +8 -0
- data/lib/llm_cost_tracker/middleware/faraday.rb +57 -9
- data/lib/llm_cost_tracker/parsed_usage.rb +7 -3
- data/lib/llm_cost_tracker/parsers/anthropic.rb +79 -1
- data/lib/llm_cost_tracker/parsers/base.rb +17 -5
- data/lib/llm_cost_tracker/parsers/gemini.rb +59 -6
- data/lib/llm_cost_tracker/parsers/openai.rb +8 -0
- data/lib/llm_cost_tracker/parsers/openai_compatible.rb +8 -0
- data/lib/llm_cost_tracker/parsers/openai_usage.rb +55 -1
- data/lib/llm_cost_tracker/parsers/registry.rb +15 -3
- data/lib/llm_cost_tracker/parsers/sse.rb +81 -0
- data/lib/llm_cost_tracker/price_registry.rb +1 -1
- data/lib/llm_cost_tracker/price_sync/fetcher.rb +72 -0
- data/lib/llm_cost_tracker/price_sync/merger.rb +72 -0
- data/lib/llm_cost_tracker/price_sync/model_catalog.rb +77 -0
- data/lib/llm_cost_tracker/price_sync/raw_price.rb +35 -0
- data/lib/llm_cost_tracker/price_sync/source.rb +29 -0
- data/lib/llm_cost_tracker/price_sync/source_result.rb +7 -0
- data/lib/llm_cost_tracker/price_sync/sources/litellm.rb +91 -0
- data/lib/llm_cost_tracker/price_sync/sources/open_router.rb +94 -0
- data/lib/llm_cost_tracker/price_sync/validator.rb +66 -0
- data/lib/llm_cost_tracker/price_sync.rb +310 -0
- data/lib/llm_cost_tracker/storage/active_record_store.rb +3 -1
- data/lib/llm_cost_tracker/stream_collector.rb +158 -0
- data/lib/llm_cost_tracker/tags_column.rb +8 -0
- data/lib/llm_cost_tracker/tracker.rb +15 -12
- data/lib/llm_cost_tracker/value_helpers.rb +40 -0
- data/lib/llm_cost_tracker/version.rb +1 -1
- data/lib/llm_cost_tracker.rb +50 -29
- data/lib/tasks/llm_cost_tracker.rake +116 -0
- data/llm_cost_tracker.gemspec +8 -6
- metadata +24 -8
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
3
|
require_relative "errors"
|
|
4
|
+
require_relative "value_helpers"
|
|
4
5
|
|
|
5
6
|
module LlmCostTracker
|
|
6
7
|
class Configuration
|
|
@@ -14,22 +15,32 @@ module LlmCostTracker
|
|
|
14
15
|
STORAGE_ERROR_BEHAVIORS = %i[ignore warn raise].freeze
|
|
15
16
|
STORAGE_BACKENDS = %i[log active_record custom].freeze
|
|
16
17
|
UNKNOWN_PRICING_BEHAVIORS = %i[ignore warn raise].freeze
|
|
18
|
+
SHARED_SCALAR_ATTRIBUTES = %i[
|
|
19
|
+
enabled
|
|
20
|
+
custom_storage
|
|
21
|
+
on_budget_exceeded
|
|
22
|
+
monthly_budget
|
|
23
|
+
log_level
|
|
24
|
+
prices_file
|
|
25
|
+
].freeze
|
|
26
|
+
SHARED_ENUM_ATTRIBUTES = {
|
|
27
|
+
storage_backend: [STORAGE_BACKENDS, :log],
|
|
28
|
+
budget_exceeded_behavior: [BUDGET_EXCEEDED_BEHAVIORS, :notify],
|
|
29
|
+
storage_error_behavior: [STORAGE_ERROR_BEHAVIORS, :warn],
|
|
30
|
+
unknown_pricing_behavior: [UNKNOWN_PRICING_BEHAVIORS, :warn]
|
|
31
|
+
}.freeze
|
|
17
32
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
:storage_backend, # :log, :active_record, :custom
|
|
30
|
-
:storage_error_behavior, # :ignore, :warn, :raise
|
|
31
|
-
:unknown_pricing_behavior, # :ignore, :warn, :raise
|
|
32
|
-
:openai_compatible_providers
|
|
33
|
+
attr_reader(
|
|
34
|
+
*SHARED_SCALAR_ATTRIBUTES,
|
|
35
|
+
:budget_exceeded_behavior,
|
|
36
|
+
:default_tags,
|
|
37
|
+
:pricing_overrides,
|
|
38
|
+
:report_tag_breakdowns,
|
|
39
|
+
:storage_backend,
|
|
40
|
+
:storage_error_behavior,
|
|
41
|
+
:unknown_pricing_behavior,
|
|
42
|
+
:openai_compatible_providers
|
|
43
|
+
)
|
|
33
44
|
|
|
34
45
|
def initialize
|
|
35
46
|
@enabled = true
|
|
@@ -46,55 +57,74 @@ module LlmCostTracker
|
|
|
46
57
|
@pricing_overrides = {}
|
|
47
58
|
@report_tag_breakdowns = []
|
|
48
59
|
self.openai_compatible_providers = OPENAI_COMPATIBLE_PROVIDERS
|
|
60
|
+
@finalized = false
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
def default_tags=(value)
|
|
64
|
+
ensure_shared_configuration_mutable!
|
|
65
|
+
@default_tags = value
|
|
49
66
|
end
|
|
50
67
|
|
|
51
68
|
def openai_compatible_providers=(providers)
|
|
69
|
+
ensure_shared_configuration_mutable!
|
|
52
70
|
@openai_compatible_providers = normalize_openai_compatible_providers(providers)
|
|
53
71
|
end
|
|
54
72
|
|
|
55
|
-
def
|
|
56
|
-
|
|
73
|
+
def pricing_overrides=(value)
|
|
74
|
+
ensure_shared_configuration_mutable!
|
|
75
|
+
@pricing_overrides = value
|
|
57
76
|
end
|
|
58
77
|
|
|
59
|
-
def
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
value,
|
|
63
|
-
BUDGET_EXCEEDED_BEHAVIORS,
|
|
64
|
-
default: :notify
|
|
65
|
-
)
|
|
78
|
+
def report_tag_breakdowns=(value)
|
|
79
|
+
ensure_shared_configuration_mutable!
|
|
80
|
+
@report_tag_breakdowns = value
|
|
66
81
|
end
|
|
67
82
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
value
|
|
72
|
-
|
|
73
|
-
default: :warn
|
|
74
|
-
)
|
|
83
|
+
SHARED_SCALAR_ATTRIBUTES.each do |name|
|
|
84
|
+
define_method("#{name}=") do |value|
|
|
85
|
+
ensure_shared_configuration_mutable!
|
|
86
|
+
instance_variable_set(:"@#{name}", value)
|
|
87
|
+
end
|
|
75
88
|
end
|
|
76
89
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
value,
|
|
81
|
-
|
|
82
|
-
default: :warn
|
|
83
|
-
)
|
|
90
|
+
SHARED_ENUM_ATTRIBUTES.each do |name, (allowed, default)|
|
|
91
|
+
define_method("#{name}=") do |value|
|
|
92
|
+
ensure_shared_configuration_mutable!
|
|
93
|
+
instance_variable_set(:"@#{name}", normalize_enum(name, value, allowed, default: default))
|
|
94
|
+
end
|
|
84
95
|
end
|
|
85
96
|
|
|
86
97
|
def normalize_openai_compatible_providers!
|
|
87
98
|
self.openai_compatible_providers = openai_compatible_providers
|
|
88
99
|
end
|
|
89
100
|
|
|
90
|
-
def
|
|
91
|
-
|
|
101
|
+
def finalize!
|
|
102
|
+
@default_tags = ValueHelpers.deep_freeze(@default_tags || {})
|
|
103
|
+
@pricing_overrides = ValueHelpers.deep_freeze(@pricing_overrides || {})
|
|
104
|
+
@report_tag_breakdowns = ValueHelpers.deep_freeze(Array(@report_tag_breakdowns))
|
|
105
|
+
@openai_compatible_providers = ValueHelpers.deep_freeze(@openai_compatible_providers || {})
|
|
106
|
+
@finalized = true
|
|
107
|
+
self
|
|
92
108
|
end
|
|
93
109
|
|
|
94
|
-
def
|
|
95
|
-
|
|
110
|
+
def finalized? = @finalized
|
|
111
|
+
|
|
112
|
+
def dup_for_configuration
|
|
113
|
+
copy = dup
|
|
114
|
+
copy.instance_variable_set(:@default_tags, ValueHelpers.deep_dup(@default_tags || {}))
|
|
115
|
+
copy.instance_variable_set(:@pricing_overrides, ValueHelpers.deep_dup(@pricing_overrides || {}))
|
|
116
|
+
copy.instance_variable_set(:@report_tag_breakdowns, ValueHelpers.deep_dup(@report_tag_breakdowns || []))
|
|
117
|
+
copy.instance_variable_set(
|
|
118
|
+
:@openai_compatible_providers,
|
|
119
|
+
ValueHelpers.deep_dup(@openai_compatible_providers || {})
|
|
120
|
+
)
|
|
121
|
+
copy.instance_variable_set(:@finalized, false)
|
|
122
|
+
copy
|
|
96
123
|
end
|
|
97
124
|
|
|
125
|
+
def active_record? = storage_backend == :active_record
|
|
126
|
+
def log? = storage_backend == :log
|
|
127
|
+
|
|
98
128
|
private
|
|
99
129
|
|
|
100
130
|
def normalize_enum(name, value, allowed, default:)
|
|
@@ -110,5 +140,11 @@ module LlmCostTracker
|
|
|
110
140
|
normalized[host.to_s.downcase] = provider.to_s
|
|
111
141
|
end
|
|
112
142
|
end
|
|
143
|
+
|
|
144
|
+
def ensure_shared_configuration_mutable!
|
|
145
|
+
return unless finalized?
|
|
146
|
+
|
|
147
|
+
raise FrozenError, "can't modify frozen LlmCostTracker::Configuration"
|
|
148
|
+
end
|
|
113
149
|
end
|
|
114
150
|
end
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "rails/generators"
|
|
4
|
+
require "rails/generators/active_record"
|
|
5
|
+
|
|
6
|
+
module LlmCostTracker
|
|
7
|
+
module Generators
|
|
8
|
+
class AddStreamingGenerator < Rails::Generators::Base
|
|
9
|
+
include ActiveRecord::Generators::Migration
|
|
10
|
+
|
|
11
|
+
source_root File.expand_path("templates", __dir__)
|
|
12
|
+
|
|
13
|
+
desc "Creates a migration to add llm_api_calls.stream and llm_api_calls.usage_source"
|
|
14
|
+
|
|
15
|
+
def create_migration_file
|
|
16
|
+
migration_template(
|
|
17
|
+
"add_streaming_to_llm_api_calls.rb.erb",
|
|
18
|
+
"db/migrate/add_streaming_to_llm_api_calls.rb"
|
|
19
|
+
)
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
private
|
|
23
|
+
|
|
24
|
+
def migration_version
|
|
25
|
+
"[#{ActiveRecord::VERSION::MAJOR}.#{ActiveRecord::VERSION::MINOR}]"
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
class AddStreamingToLlmApiCalls < ActiveRecord::Migration<%= migration_version %>
|
|
2
|
+
def up
|
|
3
|
+
unless column_exists?(:llm_api_calls, :stream)
|
|
4
|
+
add_column :llm_api_calls, :stream, :boolean, null: false, default: false
|
|
5
|
+
add_index :llm_api_calls, :stream
|
|
6
|
+
end
|
|
7
|
+
|
|
8
|
+
unless column_exists?(:llm_api_calls, :usage_source)
|
|
9
|
+
add_column :llm_api_calls, :usage_source, :string
|
|
10
|
+
add_index :llm_api_calls, :usage_source
|
|
11
|
+
end
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def down
|
|
15
|
+
if column_exists?(:llm_api_calls, :usage_source)
|
|
16
|
+
remove_index :llm_api_calls, :usage_source if index_exists?(:llm_api_calls, :usage_source)
|
|
17
|
+
remove_column :llm_api_calls, :usage_source
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
if column_exists?(:llm_api_calls, :stream)
|
|
21
|
+
remove_index :llm_api_calls, :stream if index_exists?(:llm_api_calls, :stream)
|
|
22
|
+
remove_column :llm_api_calls, :stream
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
end
|
|
@@ -10,6 +10,8 @@ class CreateLlmApiCalls < ActiveRecord::Migration<%= migration_version %>
|
|
|
10
10
|
t.decimal :output_cost, precision: 20, scale: 8
|
|
11
11
|
t.decimal :total_cost, precision: 20, scale: 8
|
|
12
12
|
t.integer :latency_ms
|
|
13
|
+
t.boolean :stream, null: false, default: false
|
|
14
|
+
t.string :usage_source
|
|
13
15
|
if postgresql?
|
|
14
16
|
t.jsonb :tags, null: false, default: {}
|
|
15
17
|
else
|
|
@@ -24,6 +26,8 @@ class CreateLlmApiCalls < ActiveRecord::Migration<%= migration_version %>
|
|
|
24
26
|
add_index :llm_api_calls, :model
|
|
25
27
|
add_index :llm_api_calls, :tracked_at
|
|
26
28
|
add_index :llm_api_calls, [:provider, :tracked_at]
|
|
29
|
+
add_index :llm_api_calls, :stream
|
|
30
|
+
add_index :llm_api_calls, :usage_source
|
|
27
31
|
add_index :llm_api_calls, :tags, using: :gin if postgresql?
|
|
28
32
|
end
|
|
29
33
|
|
data/lib/llm_cost_tracker/generators/llm_cost_tracker/templates/llm_cost_tracker_prices.yml.erb
CHANGED
|
@@ -14,8 +14,11 @@
|
|
|
14
14
|
#
|
|
15
15
|
# Optional metadata keys, ignored by cost calculation:
|
|
16
16
|
# - _source
|
|
17
|
+
# - _source_version
|
|
18
|
+
# - _fetched_at
|
|
17
19
|
# - _updated
|
|
18
20
|
# - _notes
|
|
21
|
+
# - _validator_override
|
|
19
22
|
#
|
|
20
23
|
# Example: custom fine-tune
|
|
21
24
|
# models:
|
|
@@ -30,7 +33,11 @@
|
|
|
30
33
|
# "gpt-4o":
|
|
31
34
|
# input: 2.00
|
|
32
35
|
# output: 8.00
|
|
33
|
-
# _source: "
|
|
36
|
+
# _source: "manual"
|
|
34
37
|
# _updated: "2026-04-18"
|
|
38
|
+
#
|
|
39
|
+
# Use _source: "manual" for custom or orphaned entries you never want sync to touch.
|
|
40
|
+
# Use _validator_override: ["skip_relative_change"] if a negotiated price would
|
|
41
|
+
# otherwise trip the >3x sync warning.
|
|
35
42
|
|
|
36
43
|
models:
|
|
@@ -21,6 +21,14 @@ module LlmCostTracker
|
|
|
21
21
|
scope :without_cost, -> { where(total_cost: nil) }
|
|
22
22
|
scope :unknown_pricing, -> { without_cost }
|
|
23
23
|
scope :with_latency, -> { latency_column? ? where.not(latency_ms: nil) : none }
|
|
24
|
+
scope :streaming, -> { stream_column? ? where(stream: true) : none }
|
|
25
|
+
scope :non_streaming, -> { stream_column? ? where(stream: [false, nil]) : all }
|
|
26
|
+
scope :by_usage_source, ->(source) { usage_source_column? ? where(usage_source: source.to_s) : none }
|
|
27
|
+
scope :streaming_missing_usage, lambda {
|
|
28
|
+
return none unless stream_column? && usage_source_column?
|
|
29
|
+
|
|
30
|
+
where(stream: true).where(usage_source: ["unknown", nil])
|
|
31
|
+
}
|
|
24
32
|
|
|
25
33
|
scope :with_json_tags, lambda {
|
|
26
34
|
if tags_json_column?
|
|
@@ -18,22 +18,39 @@ module LlmCostTracker
|
|
|
18
18
|
|
|
19
19
|
request_url = request_env.url.to_s
|
|
20
20
|
request_body = read_body(request_env.body) || ""
|
|
21
|
+
parser = Parsers::Registry.find_for(request_url)
|
|
22
|
+
streaming = parser&.streaming_request?(request_url, request_body)
|
|
23
|
+
stream_buffer = install_stream_tap(request_env) if streaming
|
|
21
24
|
|
|
22
|
-
|
|
25
|
+
Tracker.enforce_budget! if parser
|
|
23
26
|
started_at = monotonic_time
|
|
24
27
|
|
|
25
28
|
@app.call(request_env).on_complete do |response_env|
|
|
26
|
-
process(
|
|
29
|
+
process(
|
|
30
|
+
parser: parser,
|
|
31
|
+
request_env: request_env,
|
|
32
|
+
request_url: request_url,
|
|
33
|
+
request_body: request_body,
|
|
34
|
+
response_env: response_env,
|
|
35
|
+
latency_ms: elapsed_ms(started_at),
|
|
36
|
+
streaming: streaming,
|
|
37
|
+
stream_buffer: stream_buffer
|
|
38
|
+
)
|
|
27
39
|
end
|
|
28
40
|
end
|
|
29
41
|
|
|
30
42
|
private
|
|
31
43
|
|
|
32
|
-
def process(request_env
|
|
33
|
-
|
|
44
|
+
def process(parser:, request_env:, request_url:, request_body:, response_env:,
|
|
45
|
+
latency_ms:, streaming:, stream_buffer:)
|
|
34
46
|
return unless parser
|
|
35
47
|
|
|
36
|
-
parsed =
|
|
48
|
+
parsed =
|
|
49
|
+
if streaming
|
|
50
|
+
parse_stream(parser, request_url, request_body, response_env, stream_buffer)
|
|
51
|
+
else
|
|
52
|
+
parse_response(parser, request_url, request_body, response_env)
|
|
53
|
+
end
|
|
37
54
|
return unless parsed
|
|
38
55
|
|
|
39
56
|
Tracker.record(
|
|
@@ -42,6 +59,8 @@ module LlmCostTracker
|
|
|
42
59
|
input_tokens: parsed.input_tokens,
|
|
43
60
|
output_tokens: parsed.output_tokens,
|
|
44
61
|
latency_ms: latency_ms,
|
|
62
|
+
stream: parsed.stream,
|
|
63
|
+
usage_source: parsed.usage_source,
|
|
45
64
|
metadata: resolved_tags(request_env).merge(parsed.metadata)
|
|
46
65
|
)
|
|
47
66
|
rescue LlmCostTracker::Error
|
|
@@ -54,7 +73,9 @@ module LlmCostTracker
|
|
|
54
73
|
response_body = read_body(response_env.body)
|
|
55
74
|
unless response_body
|
|
56
75
|
Logging.warn(
|
|
57
|
-
"Unable to read response body for #{request_url};
|
|
76
|
+
"Unable to read response body for #{request_url}; " \
|
|
77
|
+
"streaming responses are captured automatically for OpenAI/Anthropic/Gemini " \
|
|
78
|
+
"or via LlmCostTracker.track_stream for custom clients."
|
|
58
79
|
)
|
|
59
80
|
return nil
|
|
60
81
|
end
|
|
@@ -62,10 +83,37 @@ module LlmCostTracker
|
|
|
62
83
|
parser.parse(request_url, request_body, response_env.status, response_body)
|
|
63
84
|
end
|
|
64
85
|
|
|
65
|
-
def
|
|
66
|
-
|
|
86
|
+
def parse_stream(parser, request_url, request_body, response_env, stream_buffer)
|
|
87
|
+
body = stream_buffer&.string
|
|
88
|
+
body = read_body(response_env.body) if body.nil? || body.empty?
|
|
89
|
+
|
|
90
|
+
if body.nil? || body.empty?
|
|
91
|
+
Logging.warn(
|
|
92
|
+
"Unable to capture streaming response for #{request_url}; " \
|
|
93
|
+
"fall back to LlmCostTracker.track_stream for manual capture."
|
|
94
|
+
)
|
|
95
|
+
return nil
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
events = Parsers::SSE.parse(body)
|
|
99
|
+
parser.parse_stream(request_url, request_body, response_env.status, events)
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
def install_stream_tap(request_env)
|
|
103
|
+
return nil unless request_env.respond_to?(:request) && request_env.request
|
|
67
104
|
|
|
68
|
-
|
|
105
|
+
original = request_env.request.on_data
|
|
106
|
+
return nil unless original
|
|
107
|
+
|
|
108
|
+
buffer = StringIO.new
|
|
109
|
+
request_env.request.on_data = proc do |chunk, size, env|
|
|
110
|
+
buffer << chunk.to_s
|
|
111
|
+
original.call(chunk, size, env)
|
|
112
|
+
end
|
|
113
|
+
buffer
|
|
114
|
+
rescue StandardError => e
|
|
115
|
+
Logging.warn("Unable to install streaming tap: #{e.class}: #{e.message}")
|
|
116
|
+
nil
|
|
69
117
|
end
|
|
70
118
|
|
|
71
119
|
def read_body(body)
|
|
@@ -10,11 +10,13 @@ module LlmCostTracker
|
|
|
10
10
|
:cached_input_tokens,
|
|
11
11
|
:cache_read_input_tokens,
|
|
12
12
|
:cache_creation_input_tokens,
|
|
13
|
-
:reasoning_tokens
|
|
13
|
+
:reasoning_tokens,
|
|
14
|
+
:stream,
|
|
15
|
+
:usage_source
|
|
14
16
|
)
|
|
15
17
|
|
|
16
18
|
class ParsedUsage
|
|
17
|
-
TRACKING_KEYS = %i[provider model input_tokens output_tokens total_tokens].freeze
|
|
19
|
+
TRACKING_KEYS = %i[provider model input_tokens output_tokens total_tokens stream usage_source].freeze
|
|
18
20
|
|
|
19
21
|
def self.build(**attributes)
|
|
20
22
|
new(
|
|
@@ -26,7 +28,9 @@ module LlmCostTracker
|
|
|
26
28
|
cached_input_tokens: attributes[:cached_input_tokens],
|
|
27
29
|
cache_read_input_tokens: attributes[:cache_read_input_tokens],
|
|
28
30
|
cache_creation_input_tokens: attributes[:cache_creation_input_tokens],
|
|
29
|
-
reasoning_tokens: attributes[:reasoning_tokens]
|
|
31
|
+
reasoning_tokens: attributes[:reasoning_tokens],
|
|
32
|
+
stream: attributes[:stream] || false,
|
|
33
|
+
usage_source: attributes[:usage_source]
|
|
30
34
|
)
|
|
31
35
|
end
|
|
32
36
|
|
|
@@ -16,6 +16,10 @@ module LlmCostTracker
|
|
|
16
16
|
false
|
|
17
17
|
end
|
|
18
18
|
|
|
19
|
+
def provider_names
|
|
20
|
+
%w[anthropic]
|
|
21
|
+
end
|
|
22
|
+
|
|
19
23
|
def parse(_request_url, request_body, response_status, response_body)
|
|
20
24
|
return nil unless response_status == 200
|
|
21
25
|
|
|
@@ -33,9 +37,83 @@ module LlmCostTracker
|
|
|
33
37
|
total_tokens: usage["input_tokens"].to_i + usage["output_tokens"].to_i +
|
|
34
38
|
usage["cache_read_input_tokens"].to_i + usage["cache_creation_input_tokens"].to_i,
|
|
35
39
|
cache_read_input_tokens: usage["cache_read_input_tokens"],
|
|
36
|
-
cache_creation_input_tokens: usage["cache_creation_input_tokens"]
|
|
40
|
+
cache_creation_input_tokens: usage["cache_creation_input_tokens"],
|
|
41
|
+
usage_source: :response
|
|
37
42
|
)
|
|
38
43
|
end
|
|
44
|
+
|
|
45
|
+
def parse_stream(_request_url, request_body, response_status, events)
|
|
46
|
+
return nil unless response_status == 200
|
|
47
|
+
|
|
48
|
+
request = safe_json_parse(request_body)
|
|
49
|
+
model = stream_model(events) || request["model"]
|
|
50
|
+
usage = stream_usage(events)
|
|
51
|
+
|
|
52
|
+
if usage
|
|
53
|
+
input = usage["input_tokens"].to_i
|
|
54
|
+
output = usage["output_tokens"].to_i
|
|
55
|
+
cache_read = usage["cache_read_input_tokens"].to_i
|
|
56
|
+
cache_creation = usage["cache_creation_input_tokens"].to_i
|
|
57
|
+
|
|
58
|
+
ParsedUsage.build(
|
|
59
|
+
provider: "anthropic",
|
|
60
|
+
model: model,
|
|
61
|
+
input_tokens: input,
|
|
62
|
+
output_tokens: output,
|
|
63
|
+
total_tokens: input + output + cache_read + cache_creation,
|
|
64
|
+
cache_read_input_tokens: usage["cache_read_input_tokens"],
|
|
65
|
+
cache_creation_input_tokens: usage["cache_creation_input_tokens"],
|
|
66
|
+
stream: true,
|
|
67
|
+
usage_source: :stream_final
|
|
68
|
+
)
|
|
69
|
+
else
|
|
70
|
+
ParsedUsage.build(
|
|
71
|
+
provider: "anthropic",
|
|
72
|
+
model: model,
|
|
73
|
+
input_tokens: 0,
|
|
74
|
+
output_tokens: 0,
|
|
75
|
+
total_tokens: 0,
|
|
76
|
+
stream: true,
|
|
77
|
+
usage_source: :unknown
|
|
78
|
+
)
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
private
|
|
83
|
+
|
|
84
|
+
def stream_usage(events)
|
|
85
|
+
start_usage = nil
|
|
86
|
+
latest_delta = nil
|
|
87
|
+
|
|
88
|
+
events.each do |event|
|
|
89
|
+
data = event[:data]
|
|
90
|
+
next unless data.is_a?(Hash)
|
|
91
|
+
|
|
92
|
+
case data["type"]
|
|
93
|
+
when "message_start"
|
|
94
|
+
start_usage = data.dig("message", "usage")
|
|
95
|
+
when "message_delta"
|
|
96
|
+
latest_delta = data["usage"] if data["usage"].is_a?(Hash)
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
return nil unless start_usage || latest_delta
|
|
101
|
+
|
|
102
|
+
(start_usage || {}).merge(latest_delta || {}) do |_key, start_val, delta_val|
|
|
103
|
+
delta_val.nil? ? start_val : delta_val
|
|
104
|
+
end
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
def stream_model(events)
|
|
108
|
+
events.each do |event|
|
|
109
|
+
data = event[:data]
|
|
110
|
+
next unless data.is_a?(Hash)
|
|
111
|
+
|
|
112
|
+
model = data.dig("message", "model")
|
|
113
|
+
return model if model && !model.empty?
|
|
114
|
+
end
|
|
115
|
+
nil
|
|
116
|
+
end
|
|
39
117
|
end
|
|
40
118
|
end
|
|
41
119
|
end
|
|
@@ -5,19 +5,31 @@ require "json"
|
|
|
5
5
|
module LlmCostTracker
|
|
6
6
|
module Parsers
|
|
7
7
|
class Base
|
|
8
|
-
# Parse a provider response into a {LlmCostTracker::ParsedUsage}, or return
|
|
9
|
-
# nil when the response is not trackable (non-200, missing usage, etc).
|
|
10
|
-
#
|
|
11
|
-
# @return [LlmCostTracker::ParsedUsage, nil]
|
|
12
8
|
def parse(request_url, request_body, response_status, response_body)
|
|
13
9
|
raise NotImplementedError
|
|
14
10
|
end
|
|
15
11
|
|
|
16
|
-
|
|
12
|
+
def provider_names
|
|
13
|
+
[]
|
|
14
|
+
end
|
|
15
|
+
|
|
17
16
|
def match?(url)
|
|
18
17
|
raise NotImplementedError
|
|
19
18
|
end
|
|
20
19
|
|
|
20
|
+
def streaming_request?(_request_url, request_body)
|
|
21
|
+
return false if request_body.nil?
|
|
22
|
+
|
|
23
|
+
body = request_body.to_s
|
|
24
|
+
return false if body.empty?
|
|
25
|
+
|
|
26
|
+
body.include?('"stream":true') || body.include?('"stream": true') || body.include?("stream: true")
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
def parse_stream(_request_url, _request_body, _response_status, _events)
|
|
30
|
+
nil
|
|
31
|
+
end
|
|
32
|
+
|
|
21
33
|
private
|
|
22
34
|
|
|
23
35
|
def safe_json_parse(body)
|
|
@@ -9,6 +9,7 @@ module LlmCostTracker
|
|
|
9
9
|
class Gemini < Base
|
|
10
10
|
HOSTS = %w[generativelanguage.googleapis.com].freeze
|
|
11
11
|
TRACKED_PATH_PATTERN = %r{/models/[^/:]+:(?:generateContent|streamGenerateContent)\z}
|
|
12
|
+
STREAM_PATH_PATTERN = /:streamGenerateContent\z/
|
|
12
13
|
|
|
13
14
|
def match?(url)
|
|
14
15
|
uri = URI.parse(url.to_s)
|
|
@@ -17,6 +18,16 @@ module LlmCostTracker
|
|
|
17
18
|
false
|
|
18
19
|
end
|
|
19
20
|
|
|
21
|
+
def provider_names
|
|
22
|
+
%w[gemini]
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def streaming_request?(request_url, request_body)
|
|
26
|
+
return true if streaming_url?(request_url)
|
|
27
|
+
|
|
28
|
+
super
|
|
29
|
+
end
|
|
30
|
+
|
|
20
31
|
def parse(request_url, _request_body, response_status, response_body)
|
|
21
32
|
return nil unless response_status == 200
|
|
22
33
|
|
|
@@ -24,31 +35,73 @@ module LlmCostTracker
|
|
|
24
35
|
usage = response["usageMetadata"]
|
|
25
36
|
return nil unless usage
|
|
26
37
|
|
|
27
|
-
|
|
38
|
+
build_parsed_usage(request_url, usage, usage_source: :response)
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def parse_stream(request_url, _request_body, response_status, events)
|
|
42
|
+
return nil unless response_status == 200
|
|
43
|
+
|
|
44
|
+
usage = merged_stream_usage(events)
|
|
28
45
|
model = extract_model_from_url(request_url)
|
|
29
46
|
|
|
47
|
+
if usage
|
|
48
|
+
build_parsed_usage(request_url, usage, stream: true, usage_source: :stream_final)
|
|
49
|
+
else
|
|
50
|
+
ParsedUsage.build(
|
|
51
|
+
provider: "gemini",
|
|
52
|
+
model: model,
|
|
53
|
+
input_tokens: 0,
|
|
54
|
+
output_tokens: 0,
|
|
55
|
+
total_tokens: 0,
|
|
56
|
+
stream: true,
|
|
57
|
+
usage_source: :unknown
|
|
58
|
+
)
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
private
|
|
63
|
+
|
|
64
|
+
def build_parsed_usage(request_url, usage, usage_source:, stream: false)
|
|
30
65
|
ParsedUsage.build(
|
|
31
66
|
provider: "gemini",
|
|
32
|
-
model:
|
|
67
|
+
model: extract_model_from_url(request_url),
|
|
33
68
|
input_tokens: usage["promptTokenCount"].to_i,
|
|
34
69
|
output_tokens: output_tokens(usage),
|
|
35
70
|
total_tokens: usage["totalTokenCount"].to_i,
|
|
36
|
-
cached_input_tokens: usage["cachedContentTokenCount"]
|
|
71
|
+
cached_input_tokens: usage["cachedContentTokenCount"],
|
|
72
|
+
stream: stream,
|
|
73
|
+
usage_source: usage_source
|
|
37
74
|
)
|
|
38
75
|
end
|
|
39
76
|
|
|
40
|
-
|
|
77
|
+
def merged_stream_usage(events)
|
|
78
|
+
latest = nil
|
|
79
|
+
events.each do |event|
|
|
80
|
+
data = event[:data]
|
|
81
|
+
next unless data.is_a?(Hash)
|
|
82
|
+
|
|
83
|
+
meta = data["usageMetadata"]
|
|
84
|
+
latest = meta if meta.is_a?(Hash)
|
|
85
|
+
end
|
|
86
|
+
latest
|
|
87
|
+
end
|
|
41
88
|
|
|
42
89
|
def output_tokens(usage)
|
|
43
90
|
usage["candidatesTokenCount"].to_i + usage["thoughtsTokenCount"].to_i
|
|
44
91
|
end
|
|
45
92
|
|
|
93
|
+
def streaming_url?(request_url)
|
|
94
|
+
URI.parse(request_url.to_s).path.match?(STREAM_PATH_PATTERN)
|
|
95
|
+
rescue URI::InvalidURIError
|
|
96
|
+
false
|
|
97
|
+
end
|
|
98
|
+
|
|
46
99
|
def extract_model_from_url(url)
|
|
47
100
|
uri = URI.parse(url.to_s)
|
|
48
101
|
match = uri.path.match(%r{/models/([^/:]+)})
|
|
49
|
-
match
|
|
102
|
+
match && match[1]
|
|
50
103
|
rescue URI::InvalidURIError
|
|
51
|
-
|
|
104
|
+
nil
|
|
52
105
|
end
|
|
53
106
|
end
|
|
54
107
|
end
|
|
@@ -20,10 +20,18 @@ module LlmCostTracker
|
|
|
20
20
|
false
|
|
21
21
|
end
|
|
22
22
|
|
|
23
|
+
def provider_names
|
|
24
|
+
%w[openai]
|
|
25
|
+
end
|
|
26
|
+
|
|
23
27
|
def parse(request_url, request_body, response_status, response_body)
|
|
24
28
|
parse_openai_usage(request_url, request_body, response_status, response_body)
|
|
25
29
|
end
|
|
26
30
|
|
|
31
|
+
def parse_stream(request_url, request_body, response_status, events)
|
|
32
|
+
parse_openai_stream_usage(request_url, request_body, response_status, events)
|
|
33
|
+
end
|
|
34
|
+
|
|
27
35
|
private
|
|
28
36
|
|
|
29
37
|
def provider_for(_request_url)
|