agentfit 0.1.3 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/.github/workflows/release.yml +4 -0
  2. package/README.md +0 -2
  3. package/app/(dashboard)/ai-insights/page.tsx +271 -0
  4. package/app/(dashboard)/models/page.tsx +21 -0
  5. package/app/(dashboard)/page.tsx +2 -0
  6. package/app/(dashboard)/sessions/[id]/page.tsx +16 -2
  7. package/app/(dashboard)/settings/page.tsx +168 -0
  8. package/app/api/analyze/aggregate/route.ts +88 -0
  9. package/app/api/analyze/estimate/route.ts +62 -0
  10. package/app/api/analyze/route.ts +142 -0
  11. package/app/api/cc-versions/route.ts +84 -0
  12. package/app/api/config/route.ts +35 -0
  13. package/bin/agentfit.mjs +18 -8
  14. package/components/analyze-confirm-dialog.tsx +81 -0
  15. package/components/app-sidebar.tsx +14 -0
  16. package/components/data-provider.tsx +4 -2
  17. package/components/model-usage-chart.tsx +216 -0
  18. package/components/overview-cards.tsx +1 -1
  19. package/components/session-ai-analysis.tsx +318 -0
  20. package/components/sessions-table.tsx +169 -15
  21. package/components/version-lag-chart.tsx +284 -0
  22. package/electron/main.mjs +61 -34
  23. package/generated/prisma/browser.ts +5 -0
  24. package/generated/prisma/client.ts +5 -0
  25. package/generated/prisma/internal/class.ts +14 -4
  26. package/generated/prisma/internal/prismaNamespace.ts +95 -2
  27. package/generated/prisma/internal/prismaNamespaceBrowser.ts +19 -1
  28. package/generated/prisma/models/Session.ts +57 -1
  29. package/generated/prisma/models/SessionAnalysis.ts +1321 -0
  30. package/generated/prisma/models.ts +1 -0
  31. package/lib/config.ts +45 -0
  32. package/lib/openai.ts +253 -0
  33. package/lib/parse-codex.ts +2 -0
  34. package/lib/parse-logs.ts +21 -7
  35. package/lib/queries.ts +5 -1
  36. package/lib/sync.ts +17 -5
  37. package/package.json +2 -1
  38. package/prisma/migrations/20260404151230_add_session_analysis/migration.sql +18 -0
  39. package/prisma/migrations/20260405230736_add_cli_version/migration.sql +41 -0
  40. package/prisma/migrations/20260406205546_add_model_counts/migration.sql +42 -0
  41. package/prisma/schema.prisma +16 -0
  42. package/prisma/schema.sql +21 -0
@@ -12,4 +12,5 @@ export type * from './models/Session'
12
12
  export type * from './models/Image'
13
13
  export type * from './models/SyncLog'
14
14
  export type * from './models/Report'
15
+ export type * from './models/SessionAnalysis'
15
16
  export type * from './commonInputTypes'
package/lib/config.ts ADDED
@@ -0,0 +1,45 @@
1
+ // ─── User Configuration ─────────────────────────────────────────────
2
+ // Stored in ~/.agentfit/config.json — separate from DB and backups.
3
+
4
+ import fs from 'fs'
5
+ import path from 'path'
6
+ import os from 'os'
7
+
8
+ const CONFIG_DIR = path.join(os.homedir(), '.agentfit')
9
+ const CONFIG_PATH = path.join(CONFIG_DIR, 'config.json')
10
+
11
+ interface Config {
12
+ openaiApiKey?: string
13
+ }
14
+
15
+ export function readConfig(): Config {
16
+ try {
17
+ if (!fs.existsSync(CONFIG_PATH)) return {}
18
+ return JSON.parse(fs.readFileSync(CONFIG_PATH, 'utf-8'))
19
+ } catch {
20
+ return {}
21
+ }
22
+ }
23
+
24
+ export function writeConfig(updates: Partial<Config>) {
25
+ const current = readConfig()
26
+ const merged = { ...current, ...updates }
27
+
28
+ if (!fs.existsSync(CONFIG_DIR)) {
29
+ fs.mkdirSync(CONFIG_DIR, { recursive: true })
30
+ }
31
+
32
+ fs.writeFileSync(CONFIG_PATH, JSON.stringify(merged, null, 2) + '\n', { mode: 0o600 })
33
+ }
34
+
35
+ export function getOpenAIKey(): string | undefined {
36
+ return readConfig().openaiApiKey
37
+ }
38
+
39
+ export function setOpenAIKey(key: string) {
40
+ writeConfig({ openaiApiKey: key })
41
+ }
42
+
43
+ export function clearOpenAIKey() {
44
+ writeConfig({ openaiApiKey: undefined })
45
+ }
package/lib/openai.ts ADDED
@@ -0,0 +1,253 @@
1
+ // ─── OpenAI Message Classification Engine ──────────────────────────
2
+ // Classifies user messages from Claude Code sessions by type, role,
3
+ // skill level, and sentiment using OpenAI's API.
4
+ // Ported from bizpub-cc/scripts/prepare_batch.py
5
+
6
+ import OpenAI from 'openai'
7
+ import type { ChatMessage } from './session-detail'
8
+
9
+ // ─── Types ──────────────────────────────────────────────────────────
10
+
11
+ export interface MessageClassification {
12
+ messageIndex: number
13
+ messagePreview: string
14
+ messageType: string
15
+ role: string
16
+ skillLevel: string
17
+ sentiment: string
18
+ }
19
+
20
+ export interface ClassificationResult {
21
+ classifications: MessageClassification[]
22
+ model: string
23
+ totalMessages: number
24
+ inputTokens: number
25
+ outputTokens: number
26
+ costUSD: number
27
+ }
28
+
29
+ export interface CostEstimate {
30
+ messageCount: number
31
+ estimatedInputTokens: number
32
+ estimatedOutputTokens: number
33
+ estimatedCostUSD: number
34
+ }
35
+
36
+ // ─── Constants ──────────────────────────────────────────────────────
37
+
38
+ const MODEL = 'gpt-4.1-mini'
39
+
40
+ // gpt-4.1-mini pricing (per 1M tokens)
41
+ const INPUT_PRICE_PER_M = 0.40
42
+ const OUTPUT_PRICE_PER_M = 1.60
43
+
44
+ const SYSTEM_PROMPT = `You are a message classifier for software development conversations. Given user messages from an AI coding agent session (e.g., Claude Code, Codex), classify each on these dimensions:
45
+
46
+ 1. **message_type**: One of:
47
+ - instruction: Direct command to do something ("add a button", "fix the bug", "deploy")
48
+ - question: Asking for information or explanation ("how does X work?", "what's wrong?")
49
+ - feedback: Reacting to agent output ("looks good", "no that's wrong", "yes")
50
+ - context: Providing background info, pasting errors, sharing URLs/specs
51
+ - navigation: File/code navigation ("show me the file", "read X", "search for Y")
52
+ - meta: About the conversation itself ("let's move on", "forget that", "start over")
53
+
54
+ 2. **role**: What professional role this message implies:
55
+ - product_ux: UI/UX decisions, user flows, design choices, layout
56
+ - architect: System design, tech stack, data modeling, API design
57
+ - frontend_dev: UI components, styling, client-side code
58
+ - backend_dev: API routes, server logic, database queries, auth
59
+ - data_engineer: Data pipelines, parsing, ETL, batch processing
60
+ - domain_expert: Domain-specific knowledge, business rules
61
+ - qa_tester: Testing, validation, edge cases, bug reports
62
+ - devops: Deployment, CI/CD, environment config, hosting
63
+ - project_manager: Planning, priorities, scope, task management
64
+
65
+ 3. **skill_level**: Technical skill needed to formulate this message:
66
+ - non_technical: Anyone could say this, no programming knowledge needed
67
+ - junior: Basic programming concepts, simple instructions
68
+ - mid: Working knowledge of frameworks and tools
69
+ - senior: Deep implementation details, debugging, architecture trade-offs
70
+ - expert: Cutting-edge technical or deep domain expertise
71
+
72
+ 4. **sentiment**: Emotional tone:
73
+ - neutral: Matter-of-fact
74
+ - positive: Satisfied, excited, appreciative
75
+ - frustrated: Annoyed, impatient, something isn't working
76
+ - exploratory: Curious, brainstorming, trying things out
77
+
78
+ You will receive a numbered list of user messages, each possibly with assistant context.
79
+ Respond with ONLY a JSON array of objects in the same order, no explanation:
80
+ [{"message_type": "...", "role": "...", "skill_level": "...", "sentiment": "..."}, ...]`
81
+
82
+ // ─── Helpers ────────────────────────────────────────────────────────
83
+
84
+ function truncateText(text: string, maxChars: number): string {
85
+ if (text.length <= maxChars) return text
86
+ const half = Math.floor(maxChars / 2)
87
+ return text.slice(0, half) + ' ... ' + text.slice(-half)
88
+ }
89
+
90
+ interface UserMessageWithContext {
91
+ index: number
92
+ text: string
93
+ assistantContext: string
94
+ preview: string
95
+ }
96
+
97
+ /**
98
+ * Extract user messages from a chat log with preceding assistant context.
99
+ */
100
+ export function extractUserMessages(chatLog: ChatMessage[]): UserMessageWithContext[] {
101
+ const messages: UserMessageWithContext[] = []
102
+ let lastAssistantText = ''
103
+
104
+ for (const msg of chatLog) {
105
+ if (msg.role === 'assistant' && !msg.isThinking && !msg.toolName) {
106
+ lastAssistantText = msg.content
107
+ } else if (msg.role === 'user') {
108
+ const text = msg.content.trim()
109
+ if (!text || text.length < 3) continue
110
+
111
+ messages.push({
112
+ index: msg.stepIndex,
113
+ text: text.length > 1500 ? text.slice(0, 1500) + '... [truncated]' : text,
114
+ assistantContext: lastAssistantText
115
+ ? truncateText(lastAssistantText, 2000)
116
+ : '',
117
+ preview: text.slice(0, 80),
118
+ })
119
+ }
120
+ }
121
+
122
+ return messages
123
+ }
124
+
125
+ /**
126
+ * Estimate the cost of classifying messages without making an API call.
127
+ */
128
+ export function estimateCost(messages: UserMessageWithContext[]): CostEstimate {
129
+ const systemChars = SYSTEM_PROMPT.length
130
+ let userChars = 0
131
+
132
+ for (const msg of messages) {
133
+ userChars += msg.text.length
134
+ if (msg.assistantContext) {
135
+ userChars += msg.assistantContext.length + 30 // "[Previous assistant message: ]\n\n"
136
+ }
137
+ userChars += 20 // numbering overhead
138
+ }
139
+
140
+ // Rough token estimate: ~4 chars per token
141
+ const inputTokens = Math.ceil((systemChars + userChars) / 4)
142
+ // ~50 output tokens per message (JSON classification)
143
+ const outputTokens = messages.length * 50
144
+
145
+ const costUSD =
146
+ (inputTokens / 1_000_000) * INPUT_PRICE_PER_M +
147
+ (outputTokens / 1_000_000) * OUTPUT_PRICE_PER_M
148
+
149
+ return {
150
+ messageCount: messages.length,
151
+ estimatedInputTokens: inputTokens,
152
+ estimatedOutputTokens: outputTokens,
153
+ estimatedCostUSD: costUSD,
154
+ }
155
+ }
156
+
157
+ /**
158
+ * Build the user prompt content for a batch of messages.
159
+ */
160
+ function buildBatchPrompt(messages: UserMessageWithContext[]): string {
161
+ return messages
162
+ .map((msg, i) => {
163
+ const parts: string[] = [`${i + 1}.`]
164
+ if (msg.assistantContext) {
165
+ parts.push(`[Previous assistant message: ${msg.assistantContext}]`)
166
+ }
167
+ parts.push(`User message: ${msg.text}`)
168
+ return parts.join('\n')
169
+ })
170
+ .join('\n\n')
171
+ }
172
+
173
+ // Max messages per API call to stay within context limits
174
+ const BATCH_SIZE = 20
175
+
176
+ /**
177
+ * Classify user messages by calling the OpenAI API.
178
+ * Batches messages to reduce API calls.
179
+ */
180
+ export async function classifyMessages(
181
+ apiKey: string,
182
+ messages: UserMessageWithContext[]
183
+ ): Promise<ClassificationResult> {
184
+ const client = new OpenAI({ apiKey })
185
+
186
+ const classifications: MessageClassification[] = []
187
+ let totalInputTokens = 0
188
+ let totalOutputTokens = 0
189
+
190
+ // Process in batches
191
+ for (let i = 0; i < messages.length; i += BATCH_SIZE) {
192
+ const batch = messages.slice(i, i + BATCH_SIZE)
193
+ const userContent = buildBatchPrompt(batch)
194
+
195
+ const response = await client.chat.completions.create({
196
+ model: MODEL,
197
+ messages: [
198
+ { role: 'system', content: SYSTEM_PROMPT },
199
+ { role: 'user', content: userContent },
200
+ ],
201
+ temperature: 0.0,
202
+ response_format: { type: 'json_object' },
203
+ })
204
+
205
+ const choice = response.choices[0]
206
+ const content = choice?.message?.content || '[]'
207
+
208
+ totalInputTokens += response.usage?.prompt_tokens || 0
209
+ totalOutputTokens += response.usage?.completion_tokens || 0
210
+
211
+ // Parse response — could be a JSON array or an object with an array
212
+ let parsed: Array<{
213
+ message_type?: string
214
+ role?: string
215
+ skill_level?: string
216
+ sentiment?: string
217
+ }>
218
+ try {
219
+ const raw = JSON.parse(content)
220
+ parsed = Array.isArray(raw) ? raw : raw.classifications || raw.results || Object.values(raw)[0] || []
221
+ if (!Array.isArray(parsed)) parsed = [parsed]
222
+ } catch {
223
+ // If parsing fails, skip this batch
224
+ continue
225
+ }
226
+
227
+ for (let j = 0; j < batch.length; j++) {
228
+ const msg = batch[j]
229
+ const cls = parsed[j] || {}
230
+ classifications.push({
231
+ messageIndex: msg.index,
232
+ messagePreview: msg.preview,
233
+ messageType: cls.message_type || 'unknown',
234
+ role: cls.role || 'unknown',
235
+ skillLevel: cls.skill_level || 'unknown',
236
+ sentiment: cls.sentiment || 'unknown',
237
+ })
238
+ }
239
+ }
240
+
241
+ const costUSD =
242
+ (totalInputTokens / 1_000_000) * INPUT_PRICE_PER_M +
243
+ (totalOutputTokens / 1_000_000) * OUTPUT_PRICE_PER_M
244
+
245
+ return {
246
+ classifications,
247
+ model: MODEL,
248
+ totalMessages: messages.length,
249
+ inputTokens: totalInputTokens,
250
+ outputTokens: totalOutputTokens,
251
+ costUSD,
252
+ }
253
+ }
@@ -156,6 +156,7 @@ export function parseCodexSession(filePath: string): SessionSummary | null {
156
156
  totalTokens,
157
157
  costUSD: 0, // Codex doesn't expose per-session costs in logs
158
158
  model: model || 'gpt-5',
159
+ modelCounts: model ? { [model]: assistantMessages } : { 'gpt-5': assistantMessages },
159
160
  toolCalls,
160
161
  toolCallsTotal: Object.values(toolCalls).reduce((a, b) => a + b, 0),
161
162
  skillCalls: {},
@@ -164,6 +165,7 @@ export function parseCodexSession(filePath: string): SessionSummary | null {
164
165
  userInterruptions: 0,
165
166
  systemPromptEdits: 0,
166
167
  permissionModes: {},
168
+ cliVersion: 'codex',
167
169
  }
168
170
  } catch {
169
171
  return null
package/lib/parse-logs.ts CHANGED
@@ -25,6 +25,7 @@ export interface SessionSummary {
25
25
  totalTokens: number
26
26
  costUSD: number
27
27
  model: string
28
+ modelCounts: Record<string, number> // per-message model counts
28
29
  toolCalls: Record<string, number>
29
30
  toolCallsTotal: number
30
31
  skillCalls: Record<string, number>
@@ -34,6 +35,7 @@ export interface SessionSummary {
34
35
  userInterruptions: number
35
36
  permissionModes: Record<string, number> // default, acceptEdits, bypassPermissions, plan
36
37
  systemPromptEdits: number // edits/writes to CLAUDE.md, AGENTS.md, agent.md
38
+ cliVersion: string // Claude Code CLI version from JSONL logs
37
39
  }
38
40
 
39
41
  export interface ProjectSummary {
@@ -125,6 +127,7 @@ function getProjectName(projectPath: string): string {
125
127
 
126
128
  interface LogEntry {
127
129
  type?: string
130
+ version?: string
128
131
  message?: {
129
132
  role?: string
130
133
  content?: unknown[]
@@ -155,12 +158,14 @@ function parseSessionFile(
155
158
  let cacheCreationTokens = 0
156
159
  let cacheReadTokens = 0
157
160
  let costUSD = 0
158
- let model = ''
161
+ let currentModel = '' // tracks model for cost calculation per message
162
+ const modelCounts: Record<string, number> = {} // count messages per model
159
163
  let startTime = ''
160
164
  let endTime = ''
161
165
  const toolCalls: Record<string, number> = {}
162
166
  const permissionModes: Record<string, number> = {}
163
167
  let systemPromptEdits = 0
168
+ let cliVersion = ''
164
169
 
165
170
  for (const line of lines) {
166
171
  if (!line.trim()) continue
@@ -177,6 +182,10 @@ function parseSessionFile(
177
182
  endTime = entry.timestamp
178
183
  }
179
184
 
185
+ if (entry.version && !cliVersion) {
186
+ cliVersion = entry.version
187
+ }
188
+
180
189
  if (entry.type === 'user') {
181
190
  userMessages++
182
191
  // Track permission mode
@@ -190,7 +199,8 @@ function parseSessionFile(
190
199
 
191
200
  // Model
192
201
  if (msg.model && msg.model !== '<synthetic>') {
193
- model = msg.model
202
+ currentModel = msg.model
203
+ modelCounts[msg.model] = (modelCounts[msg.model] || 0) + 1
194
204
  }
195
205
 
196
206
  // Usage
@@ -201,8 +211,8 @@ function parseSessionFile(
201
211
  cacheCreationTokens += u.cache_creation_input_tokens || 0
202
212
  cacheReadTokens += u.cache_read_input_tokens || 0
203
213
 
204
- if (model) {
205
- costUSD += calculateCost(model, u, allPricing)
214
+ if (currentModel) {
215
+ costUSD += calculateCost(currentModel, u, allPricing)
206
216
  }
207
217
  }
208
218
 
@@ -258,7 +268,8 @@ function parseSessionFile(
258
268
  cacheReadTokens,
259
269
  totalTokens: inputTokens + outputTokens + cacheCreationTokens + cacheReadTokens,
260
270
  costUSD,
261
- model: model || 'unknown',
271
+ model: Object.entries(modelCounts).sort((a, b) => b[1] - a[1])[0]?.[0] || 'unknown',
272
+ modelCounts,
262
273
  toolCalls,
263
274
  toolCallsTotal: Object.values(toolCalls).reduce((a, b) => a + b, 0),
264
275
  skillCalls: {},
@@ -267,6 +278,7 @@ function parseSessionFile(
267
278
  userInterruptions: 0,
268
279
  permissionModes,
269
280
  systemPromptEdits,
281
+ cliVersion: cliVersion || 'unknown',
270
282
  }
271
283
  } catch {
272
284
  return null
@@ -384,10 +396,12 @@ export async function parseAllLogs(): Promise<UsageData> {
384
396
  // Build projects array sorted by cost desc
385
397
  const projects = Array.from(projectMap.values()).sort((a, b) => b.totalCost - a.totalCost)
386
398
 
387
- // Build overview
399
+ // Build overview — aggregate model counts at message level
388
400
  const models: Record<string, number> = {}
389
401
  for (const s of sessions) {
390
- models[s.model] = (models[s.model] || 0) + 1
402
+ for (const [m, count] of Object.entries(s.modelCounts)) {
403
+ models[m] = (models[m] || 0) + count
404
+ }
391
405
  }
392
406
 
393
407
  const overview: OverviewStats = {
package/lib/queries.ts CHANGED
@@ -43,6 +43,8 @@ export async function getUsageData(): Promise<UsageData> {
43
43
  userInterruptions: s.userInterruptions,
44
44
  permissionModes: JSON.parse(s.permissionModesJson || '{}') as Record<string, number>,
45
45
  systemPromptEdits: s.systemPromptEdits,
46
+ cliVersion: s.cliVersion,
47
+ modelCounts: JSON.parse(s.modelCountsJson || '{}') as Record<string, number>,
46
48
  }))
47
49
 
48
50
  // Aggregate projects
@@ -126,7 +128,9 @@ export async function getUsageData(): Promise<UsageData> {
126
128
  const skillUsage: Record<string, number> = {}
127
129
  const permissionModes: Record<string, number> = {}
128
130
  for (const s of sessions) {
129
- models[s.model] = (models[s.model] || 0) + 1
131
+ for (const [m, count] of Object.entries(s.modelCounts)) {
132
+ models[m] = (models[m] || 0) + count
133
+ }
130
134
  for (const [skill, count] of Object.entries(s.skillCalls)) {
131
135
  skillUsage[skill] = (skillUsage[skill] || 0) + count
132
136
  }
package/lib/sync.ts CHANGED
@@ -10,6 +10,7 @@ const IMAGES_DIR = path.resolve(process.cwd(), 'data', 'images')
10
10
  interface LogEntry {
11
11
  type?: string
12
12
  uuid?: string
13
+ version?: string
13
14
  message?: {
14
15
  role?: string
15
16
  content?: unknown[]
@@ -73,7 +74,8 @@ function parseSessionFile(
73
74
  let cacheCreationTokens = 0
74
75
  let cacheReadTokens = 0
75
76
  let costUSD = 0
76
- let model = ''
77
+ let currentModel = '' // tracks model for cost calculation per message
78
+ const modelCounts: Record<string, number> = {} // count messages per model
77
79
  let startTime = ''
78
80
  let endTime = ''
79
81
  const toolCalls: Record<string, number> = {}
@@ -85,6 +87,7 @@ function parseSessionFile(
85
87
  const skillCalls: Record<string, number> = {}
86
88
  const permissionModes: Record<string, number> = {}
87
89
  let systemPromptEdits = 0
90
+ let cliVersion = ''
88
91
 
89
92
  for (const line of lines) {
90
93
  if (!line.trim()) continue
@@ -101,6 +104,10 @@ function parseSessionFile(
101
104
  messageTimestamps.push(entry.timestamp)
102
105
  }
103
106
 
107
+ if (entry.version && !cliVersion) {
108
+ cliVersion = entry.version
109
+ }
110
+
104
111
  const entryType = entry.type
105
112
  const msg = entry.message
106
113
 
@@ -145,7 +152,8 @@ function parseSessionFile(
145
152
  }
146
153
 
147
154
  if (msg.model && msg.model !== '<synthetic>') {
148
- model = msg.model
155
+ currentModel = msg.model
156
+ modelCounts[msg.model] = (modelCounts[msg.model] || 0) + 1
149
157
  }
150
158
 
151
159
  if (msg.usage) {
@@ -154,8 +162,8 @@ function parseSessionFile(
154
162
  outputTokens += u.output_tokens || 0
155
163
  cacheCreationTokens += u.cache_creation_input_tokens || 0
156
164
  cacheReadTokens += u.cache_read_input_tokens || 0
157
- if (model) {
158
- costUSD += calculateCost(model, u, allPricing)
165
+ if (currentModel) {
166
+ costUSD += calculateCost(currentModel, u, allPricing)
159
167
  }
160
168
  }
161
169
  }
@@ -242,7 +250,8 @@ function parseSessionFile(
242
250
  cacheReadTokens,
243
251
  totalTokens: inputTokens + outputTokens + cacheCreationTokens + cacheReadTokens,
244
252
  costUSD,
245
- model: model || 'unknown',
253
+ model: Object.entries(modelCounts).sort((a, b) => b[1] - a[1])[0]?.[0] || 'unknown',
254
+ modelCounts,
246
255
  toolCalls,
247
256
  toolCallsTotal: Object.values(toolCalls).reduce((a, b) => a + b, 0),
248
257
  messageTimestamps,
@@ -252,6 +261,7 @@ function parseSessionFile(
252
261
  skillCalls,
253
262
  permissionModes,
254
263
  systemPromptEdits,
264
+ cliVersion: cliVersion || 'unknown',
255
265
  images,
256
266
  }
257
267
  }
@@ -350,6 +360,8 @@ export async function syncLogs(): Promise<SyncResult> {
350
360
  userInterruptions: parsed.userInterruptions,
351
361
  permissionModesJson: JSON.stringify(parsed.permissionModes),
352
362
  systemPromptEdits: parsed.systemPromptEdits,
363
+ cliVersion: parsed.cliVersion,
364
+ modelCountsJson: JSON.stringify(parsed.modelCounts),
353
365
  },
354
366
  })
355
367
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "agentfit",
3
- "version": "0.1.3",
3
+ "version": "0.1.6",
4
4
  "description": "Fitness tracker dashboard for AI coding agents (Claude Code, Codex). Visualize usage, cost, tokens, and productivity from local conversation logs.",
5
5
  "type": "module",
6
6
  "bin": {
@@ -55,6 +55,7 @@
55
55
  "lucide-react": "^1.7.0",
56
56
  "next": "16.1.7",
57
57
  "next-themes": "^0.4.6",
58
+ "openai": "^6.33.0",
58
59
  "prisma": "^7.6.0",
59
60
  "react": "^19.2.4",
60
61
  "react-dom": "^19.2.4",
@@ -0,0 +1,18 @@
1
+ -- CreateTable
2
+ CREATE TABLE "SessionAnalysis" (
3
+ "id" TEXT NOT NULL PRIMARY KEY,
4
+ "sessionId" TEXT NOT NULL,
5
+ "model" TEXT NOT NULL,
6
+ "classifications" TEXT NOT NULL,
7
+ "totalMessages" INTEGER NOT NULL,
8
+ "inputTokens" INTEGER NOT NULL,
9
+ "outputTokens" INTEGER NOT NULL,
10
+ "costUSD" REAL NOT NULL,
11
+ "analyzedAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
12
+ );
13
+
14
+ -- CreateIndex
15
+ CREATE UNIQUE INDEX "SessionAnalysis_sessionId_key" ON "SessionAnalysis"("sessionId");
16
+
17
+ -- CreateIndex
18
+ CREATE INDEX "SessionAnalysis_sessionId_idx" ON "SessionAnalysis"("sessionId");
@@ -0,0 +1,41 @@
1
+ -- RedefineTables
2
+ PRAGMA defer_foreign_keys=ON;
3
+ PRAGMA foreign_keys=OFF;
4
+ CREATE TABLE "new_Session" (
5
+ "id" TEXT NOT NULL PRIMARY KEY,
6
+ "sessionId" TEXT NOT NULL,
7
+ "project" TEXT NOT NULL,
8
+ "projectPath" TEXT NOT NULL,
9
+ "startTime" DATETIME NOT NULL,
10
+ "endTime" DATETIME NOT NULL,
11
+ "durationMinutes" REAL NOT NULL,
12
+ "userMessages" INTEGER NOT NULL,
13
+ "assistantMessages" INTEGER NOT NULL,
14
+ "totalMessages" INTEGER NOT NULL,
15
+ "inputTokens" INTEGER NOT NULL,
16
+ "outputTokens" INTEGER NOT NULL,
17
+ "cacheCreationTokens" INTEGER NOT NULL,
18
+ "cacheReadTokens" INTEGER NOT NULL,
19
+ "totalTokens" INTEGER NOT NULL,
20
+ "costUSD" REAL NOT NULL,
21
+ "model" TEXT NOT NULL,
22
+ "toolCallsTotal" INTEGER NOT NULL,
23
+ "toolCallsJson" TEXT NOT NULL,
24
+ "skillCallsJson" TEXT NOT NULL DEFAULT '{}',
25
+ "messageTimestamps" TEXT NOT NULL DEFAULT '[]',
26
+ "apiErrors" INTEGER NOT NULL DEFAULT 0,
27
+ "rateLimitErrors" INTEGER NOT NULL DEFAULT 0,
28
+ "userInterruptions" INTEGER NOT NULL DEFAULT 0,
29
+ "permissionModesJson" TEXT NOT NULL DEFAULT '{}',
30
+ "systemPromptEdits" INTEGER NOT NULL DEFAULT 0,
31
+ "cliVersion" TEXT NOT NULL DEFAULT 'unknown',
32
+ "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
33
+ );
34
+ INSERT INTO "new_Session" ("apiErrors", "assistantMessages", "cacheCreationTokens", "cacheReadTokens", "costUSD", "createdAt", "durationMinutes", "endTime", "id", "inputTokens", "messageTimestamps", "model", "outputTokens", "permissionModesJson", "project", "projectPath", "rateLimitErrors", "sessionId", "skillCallsJson", "startTime", "systemPromptEdits", "toolCallsJson", "toolCallsTotal", "totalMessages", "totalTokens", "userInterruptions", "userMessages") SELECT "apiErrors", "assistantMessages", "cacheCreationTokens", "cacheReadTokens", "costUSD", "createdAt", "durationMinutes", "endTime", "id", "inputTokens", "messageTimestamps", "model", "outputTokens", "permissionModesJson", "project", "projectPath", "rateLimitErrors", "sessionId", "skillCallsJson", "startTime", "systemPromptEdits", "toolCallsJson", "toolCallsTotal", "totalMessages", "totalTokens", "userInterruptions", "userMessages" FROM "Session";
35
+ DROP TABLE "Session";
36
+ ALTER TABLE "new_Session" RENAME TO "Session";
37
+ CREATE UNIQUE INDEX "Session_sessionId_key" ON "Session"("sessionId");
38
+ CREATE INDEX "Session_project_idx" ON "Session"("project");
39
+ CREATE INDEX "Session_startTime_idx" ON "Session"("startTime");
40
+ PRAGMA foreign_keys=ON;
41
+ PRAGMA defer_foreign_keys=OFF;
@@ -0,0 +1,42 @@
1
+ -- RedefineTables
2
+ PRAGMA defer_foreign_keys=ON;
3
+ PRAGMA foreign_keys=OFF;
4
+ CREATE TABLE "new_Session" (
5
+ "id" TEXT NOT NULL PRIMARY KEY,
6
+ "sessionId" TEXT NOT NULL,
7
+ "project" TEXT NOT NULL,
8
+ "projectPath" TEXT NOT NULL,
9
+ "startTime" DATETIME NOT NULL,
10
+ "endTime" DATETIME NOT NULL,
11
+ "durationMinutes" REAL NOT NULL,
12
+ "userMessages" INTEGER NOT NULL,
13
+ "assistantMessages" INTEGER NOT NULL,
14
+ "totalMessages" INTEGER NOT NULL,
15
+ "inputTokens" INTEGER NOT NULL,
16
+ "outputTokens" INTEGER NOT NULL,
17
+ "cacheCreationTokens" INTEGER NOT NULL,
18
+ "cacheReadTokens" INTEGER NOT NULL,
19
+ "totalTokens" INTEGER NOT NULL,
20
+ "costUSD" REAL NOT NULL,
21
+ "model" TEXT NOT NULL,
22
+ "toolCallsTotal" INTEGER NOT NULL,
23
+ "toolCallsJson" TEXT NOT NULL,
24
+ "skillCallsJson" TEXT NOT NULL DEFAULT '{}',
25
+ "messageTimestamps" TEXT NOT NULL DEFAULT '[]',
26
+ "apiErrors" INTEGER NOT NULL DEFAULT 0,
27
+ "rateLimitErrors" INTEGER NOT NULL DEFAULT 0,
28
+ "userInterruptions" INTEGER NOT NULL DEFAULT 0,
29
+ "permissionModesJson" TEXT NOT NULL DEFAULT '{}',
30
+ "systemPromptEdits" INTEGER NOT NULL DEFAULT 0,
31
+ "cliVersion" TEXT NOT NULL DEFAULT 'unknown',
32
+ "modelCountsJson" TEXT NOT NULL DEFAULT '{}',
33
+ "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
34
+ );
35
+ INSERT INTO "new_Session" ("apiErrors", "assistantMessages", "cacheCreationTokens", "cacheReadTokens", "cliVersion", "costUSD", "createdAt", "durationMinutes", "endTime", "id", "inputTokens", "messageTimestamps", "model", "outputTokens", "permissionModesJson", "project", "projectPath", "rateLimitErrors", "sessionId", "skillCallsJson", "startTime", "systemPromptEdits", "toolCallsJson", "toolCallsTotal", "totalMessages", "totalTokens", "userInterruptions", "userMessages") SELECT "apiErrors", "assistantMessages", "cacheCreationTokens", "cacheReadTokens", "cliVersion", "costUSD", "createdAt", "durationMinutes", "endTime", "id", "inputTokens", "messageTimestamps", "model", "outputTokens", "permissionModesJson", "project", "projectPath", "rateLimitErrors", "sessionId", "skillCallsJson", "startTime", "systemPromptEdits", "toolCallsJson", "toolCallsTotal", "totalMessages", "totalTokens", "userInterruptions", "userMessages" FROM "Session";
36
+ DROP TABLE "Session";
37
+ ALTER TABLE "new_Session" RENAME TO "Session";
38
+ CREATE UNIQUE INDEX "Session_sessionId_key" ON "Session"("sessionId");
39
+ CREATE INDEX "Session_project_idx" ON "Session"("project");
40
+ CREATE INDEX "Session_startTime_idx" ON "Session"("startTime");
41
+ PRAGMA foreign_keys=ON;
42
+ PRAGMA defer_foreign_keys=OFF;
@@ -34,6 +34,8 @@ model Session {
34
34
  userInterruptions Int @default(0)
35
35
  permissionModesJson String @default("{}") // JSON: {default:N, acceptEdits:N, bypassPermissions:N, plan:N}
36
36
  systemPromptEdits Int @default(0) // Edits/writes to CLAUDE.md, AGENTS.md, agent.md
37
+ cliVersion String @default("unknown") // Claude Code CLI version from JSONL logs
38
+ modelCountsJson String @default("{}") // JSON: {model_name: message_count}
37
39
  createdAt DateTime @default(now())
38
40
 
39
41
  @@index([project])
@@ -73,3 +75,17 @@ model Report {
73
75
 
74
76
  @@index([generatedAt])
75
77
  }
78
+
79
+ model SessionAnalysis {
80
+ id String @id @default(cuid())
81
+ sessionId String @unique
82
+ model String
83
+ classifications String // JSON array of per-message classifications
84
+ totalMessages Int
85
+ inputTokens Int
86
+ outputTokens Int
87
+ costUSD Float
88
+ analyzedAt DateTime @default(now())
89
+
90
+ @@index([sessionId])
91
+ }