token-lens 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module TokenLens
4
+ module Renderer
5
+ class Layout
6
+ CANVAS_WIDTH = 1200
7
+ ROW_HEIGHT = 32
8
+
9
+ def initialize(canvas_width: CANVAS_WIDTH)
10
+ @canvas_width = canvas_width
11
+ end
12
+
13
+ def layout(nodes)
14
+ max_depth = all_nodes(nodes).map { |n| n[:depth] }.max || 0
15
+ total = nodes.sum { |n| n[:subtree_tokens] }
16
+ scale = (total > 0) ? @canvas_width.to_f / total : 1.0
17
+ position(nodes, x: 0, scale: scale, max_depth: max_depth)
18
+
19
+ total_cost = nodes.sum { |n| n[:subtree_cost] }
20
+ cost_scale = (total_cost > 0) ? @canvas_width.to_f / total_cost : 1.0
21
+ position_cost(nodes, x: 0, scale: cost_scale, max_depth: max_depth)
22
+
23
+ nodes
24
+ end
25
+
26
+ private
27
+
28
+ # Bottom-up layout: roots at bottom (y = max_depth * ROW_HEIGHT),
29
+ # deepest children at top (y = 0).
30
+ def position(nodes, x:, scale:, max_depth:)
31
+ cursor = x
32
+ nodes.each do |node|
33
+ node[:x] = cursor
34
+ node[:y] = (max_depth - node[:depth]) * ROW_HEIGHT
35
+ node[:w] = (node[:subtree_tokens] * scale).round
36
+ position(node[:children], x: cursor, scale: scale, max_depth: max_depth)
37
+ cursor += node[:w]
38
+ end
39
+ end
40
+
41
+ def position_cost(nodes, x:, scale:, max_depth:)
42
+ cursor = x
43
+ nodes.each do |node|
44
+ node[:cost_x] = cursor
45
+ node[:cost_w] = (node[:subtree_cost] * scale).round
46
+ position_cost(node[:children], x: cursor, scale: scale, max_depth: max_depth)
47
+ cursor += node[:cost_w]
48
+ end
49
+ end
50
+
51
+ def all_nodes(nodes)
52
+ nodes.flat_map { |n| [n, *all_nodes(n[:children])] }
53
+ end
54
+ end
55
+ end
56
+ end
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+
3
+ module TokenLens
4
+ module Renderer
5
+ class Reshaper
6
+ def reshape(nodes)
7
+ nodes = collapse_streaming(nodes)
8
+ nodes.flat_map { |node| process_root(node) }
9
+ end
10
+
11
+ private
12
+
13
+ # Collapse streaming chains: thinking → text → tool_use events emitted by
14
+ # Claude Code for a single API response, detected by identical input usage.
15
+ def collapse_streaming(nodes)
16
+ nodes.flat_map do |node|
17
+ if streaming_intermediate?(node)
18
+ collapse_streaming(node[:children])
19
+ else
20
+ node[:children] = collapse_streaming(node[:children])
21
+ [node]
22
+ end
23
+ end
24
+ end
25
+
26
+ def streaming_intermediate?(node)
27
+ return false unless node[:token].role == "assistant"
28
+ return false unless node[:children].size == 1
29
+ child = node[:children].first
30
+ return false unless child[:token].role == "assistant"
31
+ t, c = node[:token], child[:token]
32
+ # Prefer request_id equality (same API call); fall back to token count fingerprint
33
+ if t.request_id && c.request_id
34
+ t.request_id == c.request_id
35
+ else
36
+ t.input_tokens == c.input_tokens &&
37
+ t.cache_read_tokens == c.cache_read_tokens &&
38
+ t.cache_creation_tokens == c.cache_creation_tokens
39
+ end
40
+ end
41
+
42
+ # Re-root the tree around human prompt nodes. Human prompts become roots;
43
+ # the linear assistant chain beneath them becomes a flat list of siblings.
44
+ def process_root(node)
45
+ t = node[:token]
46
+ if t.is_human_prompt?
47
+ siblings = flatten_thread(node[:children], prev_input: 0)
48
+ [node.merge(children: siblings)]
49
+ elsif t.role == "user"
50
+ # Tool-result-only user at root level — hoist children
51
+ node[:children].flat_map { |c| process_root(c) }
52
+ else
53
+ # Orphan assistant root (no human prompt ancestor)
54
+ flatten_thread([node], prev_input: 0)
55
+ end
56
+ end
57
+
58
+ # Flatten a linear user→assistant→user(tool_result)→assistant chain into
59
+ # a flat list of assistant siblings, computing marginal_input_tokens deltas.
60
+ # Sidechain children stay nested under the assistant that spawned them.
61
+ def flatten_thread(nodes, prev_input:)
62
+ nodes.flat_map do |node|
63
+ t = node[:token]
64
+ if t.role == "user" && !t.is_human_prompt?
65
+ flatten_thread(node[:children], prev_input: prev_input)
66
+ elsif t.role == "assistant"
67
+ marginal = [t.input_tokens - prev_input, 0].max
68
+ compaction = prev_input > 0 && t.input_tokens < prev_input * 0.5
69
+ sidechain = node[:children].select { |c| c[:token].is_sidechain }
70
+ chain = node[:children].reject { |c| c[:token].is_sidechain }
71
+ # Flatten the response chain inside task-notification sidechains so
72
+ # they don't create arbitrarily deep linked-list nesting.
73
+ sidechain = sidechain.map do |sc|
74
+ sc[:token].is_task_notification? ? sc.merge(children: flatten_thread(sc[:children], prev_input: 0)) : sc
75
+ end
76
+ updated = node.merge(
77
+ token: t.with(marginal_input_tokens: marginal, is_compaction: compaction),
78
+ children: sidechain
79
+ )
80
+ [updated] + flatten_thread(chain, prev_input: t.input_tokens)
81
+ else
82
+ process_root(node)
83
+ end
84
+ end
85
+ end
86
+ end
87
+ end
88
+ end
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "pathname"
5
+
6
+ module TokenLens
7
+ module Session
8
+ # Claude Code stores sessions at ~/.claude/projects/<encoded-cwd>/<session-id>.jsonl
9
+ # where encoded-cwd is the absolute working directory with every non-alphanumeric
10
+ # character replaced by a hyphen (e.g. /Users/me/proj => -Users-me-proj)
11
+ CLAUDE_DIR = Pathname.new(File.expand_path("~/.claude/projects"))
12
+
13
+ def self.encoded_cwd(dir = Dir.pwd)
14
+ dir.gsub(/[^a-zA-Z0-9]/, "-")
15
+ end
16
+
17
+ def self.active_jsonl(dir = Dir.pwd)
18
+ project_dir = CLAUDE_DIR / encoded_cwd(dir)
19
+
20
+ # find all *.jsonl files in the project directory
21
+ jsonl_files = project_dir.glob("*.jsonl")
22
+ raise "No session files found in #{project_dir}" if jsonl_files.empty?
23
+
24
+ # get most recently modified file
25
+ jsonl_files.max_by(&:mtime)
26
+ end
27
+
28
+ def self.tail(path, &block)
29
+ last_pos = File.size(path)
30
+ loop do
31
+ sleep 0.1
32
+ current_size = File.size(path)
33
+ # if file size hasn't changed, skip
34
+ next if current_size == last_pos
35
+
36
+ # otherwise, read new lines from the file
37
+ File.open(path) do |f|
38
+ f.seek(last_pos)
39
+ f.each_line { |line| block.call(JSON.parse(line)) }
40
+ last_pos = f.pos
41
+ end
42
+ end
43
+ end
44
+ end
45
+ end
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "token_lens/session"
5
+
6
+ module TokenLens
7
+ module Sources
8
+ class Jsonl
9
+ def initialize(queue)
10
+ @queue = queue
11
+ @path = Session.active_jsonl
12
+ end
13
+
14
+ def start
15
+ warn " [jsonl] tailing #{@path.basename}"
16
+ last_pos = File.size(@path)
17
+
18
+ loop do
19
+ sleep 0.1
20
+ current_size = File.size(@path)
21
+ next if current_size == last_pos
22
+
23
+ File.open(@path) do |f|
24
+ f.seek(last_pos)
25
+ f.each_line do |line|
26
+ event = JSON.parse(line)
27
+ @queue << {source: "jsonl", event: event}
28
+ end
29
+ last_pos = f.pos
30
+ end
31
+ end
32
+ end
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,99 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../pricing"
4
+
5
+ module TokenLens
6
+ module Tokens
7
+ Jsonl = Data.define(
8
+ :uuid,
9
+ :parent_uuid,
10
+ :request_id,
11
+ :type,
12
+ :role,
13
+ :model,
14
+ :is_sidechain,
15
+ :agent_id,
16
+ :content,
17
+ :input_tokens,
18
+ :output_tokens,
19
+ :cache_read_tokens,
20
+ :cache_creation_tokens,
21
+ :marginal_input_tokens,
22
+ :timestamp,
23
+ :is_compaction
24
+ ) do
25
+ def self.from_raw(raw)
26
+ msg = raw["message"] || {}
27
+ usage = msg["usage"] || {}
28
+
29
+ new(
30
+ uuid: raw["uuid"],
31
+ parent_uuid: raw["parentUuid"],
32
+ request_id: raw["requestId"],
33
+ type: raw["type"],
34
+ role: msg["role"],
35
+ model: msg["model"],
36
+ is_sidechain: raw["isSidechain"] || false,
37
+ agent_id: nil,
38
+ content: Array(msg["content"]),
39
+ input_tokens: usage["input_tokens"].to_i,
40
+ output_tokens: usage["output_tokens"].to_i,
41
+ cache_read_tokens: usage["cache_read_input_tokens"].to_i,
42
+ cache_creation_tokens: usage["cache_creation_input_tokens"].to_i,
43
+ marginal_input_tokens: 0,
44
+ timestamp: raw["timestamp"],
45
+ is_compaction: false
46
+ )
47
+ end
48
+
49
+ def cost_usd
50
+ p = Pricing.for_model(model)
51
+ (marginal_input_tokens * p[:input] +
52
+ cache_read_tokens * p[:cache_read] +
53
+ cache_creation_tokens * p[:cache_creation] +
54
+ output_tokens * p[:output]) / 1_000_000.0
55
+ end
56
+
57
+ def total_tokens
58
+ input_tokens + output_tokens + cache_read_tokens + cache_creation_tokens
59
+ end
60
+
61
+ def display_width
62
+ marginal_input_tokens + cache_creation_tokens + output_tokens
63
+ end
64
+
65
+ def assistant?
66
+ role == "assistant"
67
+ end
68
+
69
+ def is_human_prompt?
70
+ return false unless role == "user"
71
+ return false if tool_results.any?
72
+ content.any? { |b| b.is_a?(String) || (b.is_a?(Hash) && b["type"] == "text") }
73
+ end
74
+
75
+ def is_task_notification?
76
+ is_human_prompt? && human_text.start_with?("<task-notification>")
77
+ end
78
+
79
+ def task_notification_summary
80
+ human_text.match(/<summary>(.*?)<\/summary>/m)&.[](1)&.strip
81
+ end
82
+
83
+ def human_text
84
+ block = content.find { |b| b.is_a?(String) }
85
+ return block if block
86
+ block = content.find { |b| b.is_a?(Hash) && b["type"] == "text" }
87
+ block&.dig("text") || ""
88
+ end
89
+
90
+ def tool_uses
91
+ content.select { |b| b.is_a?(Hash) && b["type"] == "tool_use" }
92
+ end
93
+
94
+ def tool_results
95
+ content.select { |b| b.is_a?(Hash) && b["type"] == "tool_result" }
96
+ end
97
+ end
98
+ end
99
+ end
@@ -0,0 +1,5 @@
1
+ # frozen_string_literal: true
2
+
3
+ module TokenLens
4
+ VERSION = "0.1.0"
5
+ end
data/lib/token_lens.rb ADDED
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "token_lens/version"
4
+
5
+ module TokenLens
6
+ def self.hello
7
+ "token-lens v#{VERSION}"
8
+ end
9
+ end
metadata ADDED
@@ -0,0 +1,71 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: token-lens
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - rob durst
8
+ bindir: bin
9
+ cert_chain: []
10
+ date: 2026-03-23 00:00:00.000000000 Z
11
+ dependencies:
12
+ - !ruby/object:Gem::Dependency
13
+ name: thor
14
+ requirement: !ruby/object:Gem::Requirement
15
+ requirements:
16
+ - - "~>"
17
+ - !ruby/object:Gem::Version
18
+ version: '1.3'
19
+ type: :runtime
20
+ prerelease: false
21
+ version_requirements: !ruby/object:Gem::Requirement
22
+ requirements:
23
+ - - "~>"
24
+ - !ruby/object:Gem::Version
25
+ version: '1.3'
26
+ email:
27
+ - me@robdurst.com
28
+ executables:
29
+ - token-lens
30
+ extensions: []
31
+ extra_rdoc_files: []
32
+ files:
33
+ - LICENSE
34
+ - README.md
35
+ - bin/token-lens
36
+ - lib/token_lens.rb
37
+ - lib/token_lens/cli.rb
38
+ - lib/token_lens/commands/record.rb
39
+ - lib/token_lens/commands/render.rb
40
+ - lib/token_lens/parser.rb
41
+ - lib/token_lens/pricing.rb
42
+ - lib/token_lens/renderer/annotator.rb
43
+ - lib/token_lens/renderer/html.rb
44
+ - lib/token_lens/renderer/layout.rb
45
+ - lib/token_lens/renderer/reshaper.rb
46
+ - lib/token_lens/session.rb
47
+ - lib/token_lens/sources/jsonl.rb
48
+ - lib/token_lens/tokens/jsonl.rb
49
+ - lib/token_lens/version.rb
50
+ homepage: https://github.com/BrickellResearch/token-lens
51
+ licenses:
52
+ - MIT
53
+ metadata: {}
54
+ rdoc_options: []
55
+ require_paths:
56
+ - lib
57
+ required_ruby_version: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - ">="
60
+ - !ruby/object:Gem::Version
61
+ version: '3.2'
62
+ required_rubygems_version: !ruby/object:Gem::Requirement
63
+ requirements:
64
+ - - ">="
65
+ - !ruby/object:Gem::Version
66
+ version: '0'
67
+ requirements: []
68
+ rubygems_version: 3.6.2
69
+ specification_version: 4
70
+ summary: Flame graphs for Claude Code token usage
71
+ test_files: []