@nextsparkjs/plugin-langchain 0.1.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +41 -0
- package/api/observability/metrics/route.ts +110 -0
- package/api/observability/traces/[traceId]/route.ts +398 -0
- package/api/observability/traces/route.ts +205 -0
- package/api/sessions/route.ts +332 -0
- package/components/observability/CollapsibleJson.tsx +71 -0
- package/components/observability/CompactTimeline.tsx +75 -0
- package/components/observability/ConversationFlow.tsx +271 -0
- package/components/observability/DisabledMessage.tsx +21 -0
- package/components/observability/FiltersPanel.tsx +82 -0
- package/components/observability/ObservabilityDashboard.tsx +230 -0
- package/components/observability/SpansList.tsx +210 -0
- package/components/observability/TraceDetail.tsx +335 -0
- package/components/observability/TraceStatusBadge.tsx +39 -0
- package/components/observability/TracesTable.tsx +97 -0
- package/components/observability/index.ts +7 -0
- package/docs/01-getting-started/01-overview.md +196 -0
- package/docs/01-getting-started/02-installation.md +368 -0
- package/docs/01-getting-started/03-configuration.md +794 -0
- package/docs/02-core-concepts/01-architecture.md +566 -0
- package/docs/02-core-concepts/02-agents.md +597 -0
- package/docs/02-core-concepts/03-tools.md +689 -0
- package/docs/03-orchestration/01-graph-orchestrator.md +809 -0
- package/docs/03-orchestration/02-legacy-react.md +650 -0
- package/docs/04-advanced/01-observability.md +645 -0
- package/docs/04-advanced/02-token-tracking.md +469 -0
- package/docs/04-advanced/03-streaming.md +476 -0
- package/docs/04-advanced/04-guardrails.md +597 -0
- package/docs/05-reference/01-api-reference.md +1403 -0
- package/docs/05-reference/02-customization.md +646 -0
- package/docs/05-reference/03-examples.md +881 -0
- package/docs/index.md +85 -0
- package/hooks/observability/useMetrics.ts +31 -0
- package/hooks/observability/useTraceDetail.ts +48 -0
- package/hooks/observability/useTraces.ts +59 -0
- package/lib/agent-factory.ts +354 -0
- package/lib/agent-helpers.ts +201 -0
- package/lib/db-memory-store.ts +417 -0
- package/lib/graph/index.ts +58 -0
- package/lib/graph/nodes/combiner.ts +399 -0
- package/lib/graph/nodes/router.ts +440 -0
- package/lib/graph/orchestrator-graph.ts +386 -0
- package/lib/graph/prompts/combiner.md +131 -0
- package/lib/graph/prompts/router.md +193 -0
- package/lib/graph/types.ts +365 -0
- package/lib/guardrails.ts +230 -0
- package/lib/index.ts +44 -0
- package/lib/logger.ts +70 -0
- package/lib/memory-store.ts +168 -0
- package/lib/message-serializer.ts +110 -0
- package/lib/prompt-renderer.ts +94 -0
- package/lib/providers.ts +226 -0
- package/lib/streaming.ts +232 -0
- package/lib/token-tracker.ts +298 -0
- package/lib/tools-builder.ts +192 -0
- package/lib/tracer-callbacks.ts +342 -0
- package/lib/tracer.ts +350 -0
- package/migrations/001_langchain_memory.sql +83 -0
- package/migrations/002_token_usage.sql +127 -0
- package/migrations/003_observability.sql +257 -0
- package/package.json +28 -0
- package/plugin.config.ts +170 -0
- package/presets/lib/langchain.config.ts.preset +142 -0
- package/presets/templates/sector7/ai-observability/[traceId]/page.tsx +91 -0
- package/presets/templates/sector7/ai-observability/page.tsx +54 -0
- package/types/langchain.types.ts +274 -0
- package/types/observability.types.ts +270 -0
package/lib/streaming.ts
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Streaming Service
|
|
3
|
+
*
|
|
4
|
+
* Provides token-by-token streaming via AsyncGenerator.
|
|
5
|
+
* Integrates with LangChain's streamEvents() method.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { BaseMessage, HumanMessage, AIMessage } from '@langchain/core/messages'
|
|
9
|
+
import type { AgentContext, SessionConfig } from '../types/langchain.types'
|
|
10
|
+
import { dbMemoryStore } from './db-memory-store'
|
|
11
|
+
import { tokenTracker } from './token-tracker'
|
|
12
|
+
import { tracer } from './tracer'
|
|
13
|
+
import { createTracingCallbacks } from './tracer-callbacks'
|
|
14
|
+
|
|
15
|
+
// Stream chunk types
|
|
16
|
+
export type StreamChunk =
|
|
17
|
+
| { type: 'token'; content: string }
|
|
18
|
+
| { type: 'done'; fullContent: string; agentUsed?: string; tokenUsage?: TokenUsage }
|
|
19
|
+
| { type: 'error'; error: string }
|
|
20
|
+
| { type: 'tool_start'; toolName: string }
|
|
21
|
+
| { type: 'tool_end'; toolName: string; result: unknown }
|
|
22
|
+
|
|
23
|
+
interface TokenUsage {
|
|
24
|
+
inputTokens: number
|
|
25
|
+
outputTokens: number
|
|
26
|
+
totalTokens: number
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export interface StreamChatOptions {
|
|
30
|
+
sessionId?: string
|
|
31
|
+
sessionConfig?: SessionConfig
|
|
32
|
+
agentName?: string
|
|
33
|
+
onToken?: (token: string) => void
|
|
34
|
+
onToolCall?: (name: string, input: unknown) => void
|
|
35
|
+
signal?: AbortSignal // For cancellation
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
interface AgentConfig {
|
|
39
|
+
modelConfig?: {
|
|
40
|
+
provider?: string
|
|
41
|
+
model?: string
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Stream chat with an agent
|
|
47
|
+
*
|
|
48
|
+
* Uses LangChain's streamEvents() for token-by-token streaming.
|
|
49
|
+
* Handles memory persistence and token tracking.
|
|
50
|
+
*/
|
|
51
|
+
export async function* streamChat(
|
|
52
|
+
agent: { streamEvents: Function; invoke: Function },
|
|
53
|
+
input: string,
|
|
54
|
+
context: AgentContext,
|
|
55
|
+
config: AgentConfig,
|
|
56
|
+
options: StreamChatOptions = {}
|
|
57
|
+
): AsyncGenerator<StreamChunk, void, unknown> {
|
|
58
|
+
const { sessionId, sessionConfig, agentName, signal } = options
|
|
59
|
+
|
|
60
|
+
let fullContent = ''
|
|
61
|
+
let tokenUsage: TokenUsage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
|
|
62
|
+
|
|
63
|
+
// Start trace if context exists
|
|
64
|
+
const traceContext = await tracer.startTrace(
|
|
65
|
+
{ userId: context.userId, teamId: context.teamId },
|
|
66
|
+
agentName || 'StreamAgent',
|
|
67
|
+
input,
|
|
68
|
+
{ sessionId }
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
// Create tracing callbacks if trace was started
|
|
72
|
+
const tracingHandler = traceContext
|
|
73
|
+
? createTracingCallbacks({ userId: context.userId, teamId: context.teamId }, traceContext.traceId)
|
|
74
|
+
: null
|
|
75
|
+
const tracingCallbacks = tracingHandler ? [tracingHandler] : []
|
|
76
|
+
|
|
77
|
+
try {
|
|
78
|
+
// Get history if session exists
|
|
79
|
+
let history: BaseMessage[] = []
|
|
80
|
+
if (sessionId) {
|
|
81
|
+
history = await dbMemoryStore.getMessages(sessionId, context)
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Create input with history
|
|
85
|
+
const messages = [...history, new HumanMessage(input)]
|
|
86
|
+
|
|
87
|
+
// Stream events from LangChain with tracing callbacks
|
|
88
|
+
const stream = agent.streamEvents(
|
|
89
|
+
{ messages },
|
|
90
|
+
{
|
|
91
|
+
version: 'v2',
|
|
92
|
+
callbacks: tracingCallbacks,
|
|
93
|
+
}
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
for await (const event of stream) {
|
|
97
|
+
// Check for cancellation
|
|
98
|
+
if (signal?.aborted) {
|
|
99
|
+
yield { type: 'error', error: 'Stream cancelled by user' }
|
|
100
|
+
return
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// Handle different event types
|
|
104
|
+
if (event.event === 'on_chat_model_stream') {
|
|
105
|
+
const token = event.data?.chunk?.content
|
|
106
|
+
if (token && typeof token === 'string') {
|
|
107
|
+
fullContent += token
|
|
108
|
+
yield { type: 'token', content: token }
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
if (event.event === 'on_tool_start') {
|
|
113
|
+
yield {
|
|
114
|
+
type: 'tool_start',
|
|
115
|
+
toolName: event.name || 'unknown',
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
if (event.event === 'on_tool_end') {
|
|
120
|
+
yield {
|
|
121
|
+
type: 'tool_end',
|
|
122
|
+
toolName: event.name || 'unknown',
|
|
123
|
+
result: event.data?.output,
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Capture token usage from LLM events
|
|
128
|
+
if (event.event === 'on_llm_end') {
|
|
129
|
+
const usage = event.data?.output?.usage_metadata ||
|
|
130
|
+
event.data?.output?.llmOutput?.tokenUsage
|
|
131
|
+
if (usage) {
|
|
132
|
+
tokenUsage = {
|
|
133
|
+
inputTokens: usage.input_tokens || usage.promptTokens || 0,
|
|
134
|
+
outputTokens: usage.output_tokens || usage.completionTokens || 0,
|
|
135
|
+
totalTokens: usage.total_tokens || usage.totalTokens || 0,
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// Persist to memory if session exists
|
|
142
|
+
if (sessionId && fullContent) {
|
|
143
|
+
await dbMemoryStore.addMessages(
|
|
144
|
+
sessionId,
|
|
145
|
+
[new HumanMessage(input), new AIMessage(fullContent)],
|
|
146
|
+
context,
|
|
147
|
+
sessionConfig
|
|
148
|
+
)
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// Track token usage
|
|
152
|
+
if (tokenUsage.totalTokens > 0) {
|
|
153
|
+
await tokenTracker.trackUsage({
|
|
154
|
+
context,
|
|
155
|
+
sessionId,
|
|
156
|
+
provider: config.modelConfig?.provider || 'unknown',
|
|
157
|
+
model: config.modelConfig?.model || 'unknown',
|
|
158
|
+
usage: tokenUsage,
|
|
159
|
+
agentName,
|
|
160
|
+
})
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// End trace successfully if started
|
|
164
|
+
if (traceContext) {
|
|
165
|
+
// Flush pending operations and get call counts from tracing handler
|
|
166
|
+
await tracingHandler?.flush()
|
|
167
|
+
const counts = tracingHandler?.getCounts() || { llmCalls: 0, toolCalls: 0 }
|
|
168
|
+
|
|
169
|
+
await tracer.endTrace(
|
|
170
|
+
{ userId: context.userId, teamId: context.teamId },
|
|
171
|
+
traceContext.traceId,
|
|
172
|
+
{
|
|
173
|
+
output: fullContent,
|
|
174
|
+
tokens: tokenUsage.totalTokens > 0
|
|
175
|
+
? {
|
|
176
|
+
input: tokenUsage.inputTokens,
|
|
177
|
+
output: tokenUsage.outputTokens,
|
|
178
|
+
total: tokenUsage.totalTokens,
|
|
179
|
+
}
|
|
180
|
+
: undefined,
|
|
181
|
+
llmCalls: counts.llmCalls,
|
|
182
|
+
toolCalls: counts.toolCalls,
|
|
183
|
+
}
|
|
184
|
+
)
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
yield {
|
|
188
|
+
type: 'done',
|
|
189
|
+
fullContent,
|
|
190
|
+
agentUsed: agentName,
|
|
191
|
+
tokenUsage,
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
} catch (error) {
|
|
195
|
+
// End trace with error if started
|
|
196
|
+
if (traceContext) {
|
|
197
|
+
// Flush pending operations and get call counts from tracing handler (even on error)
|
|
198
|
+
await tracingHandler?.flush()
|
|
199
|
+
const counts = tracingHandler?.getCounts() || { llmCalls: 0, toolCalls: 0 }
|
|
200
|
+
|
|
201
|
+
await tracer.endTrace(
|
|
202
|
+
{ userId: context.userId, teamId: context.teamId },
|
|
203
|
+
traceContext.traceId,
|
|
204
|
+
{
|
|
205
|
+
error: error instanceof Error ? error : new Error(String(error)),
|
|
206
|
+
llmCalls: counts.llmCalls,
|
|
207
|
+
toolCalls: counts.toolCalls,
|
|
208
|
+
}
|
|
209
|
+
)
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
const message = error instanceof Error ? error.message : 'Unknown streaming error'
|
|
213
|
+
yield { type: 'error', error: message }
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
/**
|
|
218
|
+
* Create SSE encoder for streaming responses
|
|
219
|
+
*/
|
|
220
|
+
export function createSSEEncoder() {
|
|
221
|
+
const encoder = new TextEncoder()
|
|
222
|
+
|
|
223
|
+
return {
|
|
224
|
+
encode(chunk: StreamChunk): Uint8Array {
|
|
225
|
+
return encoder.encode(`data: ${JSON.stringify(chunk)}\n\n`)
|
|
226
|
+
},
|
|
227
|
+
|
|
228
|
+
encodeDone(): Uint8Array {
|
|
229
|
+
return encoder.encode('data: [DONE]\n\n')
|
|
230
|
+
},
|
|
231
|
+
}
|
|
232
|
+
}
|
|
@@ -0,0 +1,298 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Token Tracker Service
|
|
3
|
+
*
|
|
4
|
+
* Tracks token usage and calculates costs per request.
|
|
5
|
+
* Integrates with LangChain callbacks for automatic tracking.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { mutateWithRLS, queryWithRLS } from '@nextsparkjs/core/lib/db'
|
|
9
|
+
import type { AgentContext } from '../types/langchain.types'
|
|
10
|
+
|
|
11
|
+
interface TokenUsage {
|
|
12
|
+
inputTokens: number
|
|
13
|
+
outputTokens: number
|
|
14
|
+
totalTokens: number
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
interface TrackUsageParams {
|
|
18
|
+
context: AgentContext
|
|
19
|
+
sessionId?: string
|
|
20
|
+
provider: string
|
|
21
|
+
model: string
|
|
22
|
+
usage: TokenUsage
|
|
23
|
+
agentName?: string
|
|
24
|
+
metadata?: Record<string, unknown>
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
interface UsageStats {
|
|
28
|
+
totalTokens: number
|
|
29
|
+
totalCost: number
|
|
30
|
+
inputTokens: number
|
|
31
|
+
outputTokens: number
|
|
32
|
+
requestCount: number
|
|
33
|
+
byModel: Record<string, { tokens: number; cost: number }>
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
type Period = 'today' | '7d' | '30d' | 'all'
|
|
37
|
+
|
|
38
|
+
// Pricing per 1M tokens (USD)
|
|
39
|
+
const DEFAULT_PRICING: Record<string, { input: number; output: number }> = {
|
|
40
|
+
'gpt-4o': { input: 5.00, output: 15.00 },
|
|
41
|
+
'gpt-4o-mini': { input: 0.15, output: 0.60 },
|
|
42
|
+
'gpt-4-turbo': { input: 10.00, output: 30.00 },
|
|
43
|
+
'gpt-3.5-turbo': { input: 0.50, output: 1.50 },
|
|
44
|
+
'claude-3-5-sonnet': { input: 3.00, output: 15.00 },
|
|
45
|
+
'claude-3-opus': { input: 15.00, output: 75.00 },
|
|
46
|
+
'claude-3-haiku': { input: 0.25, output: 1.25 },
|
|
47
|
+
// Ollama models are free (local)
|
|
48
|
+
'ollama/*': { input: 0, output: 0 },
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
export const tokenTracker = {
|
|
52
|
+
/**
|
|
53
|
+
* Calculate cost for token usage
|
|
54
|
+
*/
|
|
55
|
+
calculateCost(model: string, usage: TokenUsage, customPricing?: typeof DEFAULT_PRICING): {
|
|
56
|
+
inputCost: number
|
|
57
|
+
outputCost: number
|
|
58
|
+
totalCost: number
|
|
59
|
+
} {
|
|
60
|
+
const pricing = customPricing || DEFAULT_PRICING
|
|
61
|
+
|
|
62
|
+
// Check for exact match first, then wildcard
|
|
63
|
+
let modelPricing = pricing[model]
|
|
64
|
+
if (!modelPricing) {
|
|
65
|
+
// Check for provider wildcard (e.g., 'ollama/*')
|
|
66
|
+
const provider = model.split('/')[0] || model.split('-')[0]
|
|
67
|
+
modelPricing = pricing[`${provider}/*`] || { input: 0, output: 0 }
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
const inputCost = (usage.inputTokens / 1_000_000) * modelPricing.input
|
|
71
|
+
const outputCost = (usage.outputTokens / 1_000_000) * modelPricing.output
|
|
72
|
+
|
|
73
|
+
return {
|
|
74
|
+
inputCost,
|
|
75
|
+
outputCost,
|
|
76
|
+
totalCost: inputCost + outputCost,
|
|
77
|
+
}
|
|
78
|
+
},
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Track token usage for a request
|
|
82
|
+
*/
|
|
83
|
+
async trackUsage(params: TrackUsageParams): Promise<void> {
|
|
84
|
+
const { context, sessionId, provider, model, usage, agentName, metadata } = params
|
|
85
|
+
const { userId, teamId } = context
|
|
86
|
+
|
|
87
|
+
const costs = this.calculateCost(model, usage)
|
|
88
|
+
|
|
89
|
+
await mutateWithRLS(
|
|
90
|
+
`INSERT INTO public."langchain_token_usage"
|
|
91
|
+
(id, "userId", "teamId", "sessionId", provider, model,
|
|
92
|
+
"inputTokens", "outputTokens", "totalTokens",
|
|
93
|
+
"inputCost", "outputCost", "totalCost",
|
|
94
|
+
"agentName", metadata)
|
|
95
|
+
VALUES (gen_random_uuid()::text, $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)`,
|
|
96
|
+
[
|
|
97
|
+
userId, teamId, sessionId || null, provider, model,
|
|
98
|
+
usage.inputTokens, usage.outputTokens, usage.totalTokens,
|
|
99
|
+
costs.inputCost, costs.outputCost, costs.totalCost,
|
|
100
|
+
agentName || null, JSON.stringify(metadata || {})
|
|
101
|
+
],
|
|
102
|
+
userId
|
|
103
|
+
)
|
|
104
|
+
},
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* Get usage statistics for a user
|
|
108
|
+
*/
|
|
109
|
+
async getUsage(context: AgentContext, period: Period = '30d'): Promise<UsageStats> {
|
|
110
|
+
const { userId, teamId } = context
|
|
111
|
+
|
|
112
|
+
const periodClause = this.getPeriodClause(period)
|
|
113
|
+
|
|
114
|
+
const result = await queryWithRLS<{
|
|
115
|
+
totalTokens: string
|
|
116
|
+
totalCost: string
|
|
117
|
+
inputTokens: string
|
|
118
|
+
outputTokens: string
|
|
119
|
+
requestCount: string
|
|
120
|
+
model: string
|
|
121
|
+
modelTokens: string
|
|
122
|
+
modelCost: string
|
|
123
|
+
}>(
|
|
124
|
+
`SELECT
|
|
125
|
+
SUM("totalTokens")::text as "totalTokens",
|
|
126
|
+
SUM("totalCost")::text as "totalCost",
|
|
127
|
+
SUM("inputTokens")::text as "inputTokens",
|
|
128
|
+
SUM("outputTokens")::text as "outputTokens",
|
|
129
|
+
COUNT(*)::text as "requestCount",
|
|
130
|
+
model,
|
|
131
|
+
SUM("totalTokens")::text as "modelTokens",
|
|
132
|
+
SUM("totalCost")::text as "modelCost"
|
|
133
|
+
FROM public."langchain_token_usage"
|
|
134
|
+
WHERE "userId" = $1 AND "teamId" = $2 ${periodClause}
|
|
135
|
+
GROUP BY model`,
|
|
136
|
+
[userId, teamId],
|
|
137
|
+
userId
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
if (!result.length) {
|
|
141
|
+
return {
|
|
142
|
+
totalTokens: 0,
|
|
143
|
+
totalCost: 0,
|
|
144
|
+
inputTokens: 0,
|
|
145
|
+
outputTokens: 0,
|
|
146
|
+
requestCount: 0,
|
|
147
|
+
byModel: {},
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// Aggregate results
|
|
152
|
+
const byModel: Record<string, { tokens: number; cost: number }> = {}
|
|
153
|
+
let totalTokens = 0
|
|
154
|
+
let totalCost = 0
|
|
155
|
+
let inputTokens = 0
|
|
156
|
+
let outputTokens = 0
|
|
157
|
+
let requestCount = 0
|
|
158
|
+
|
|
159
|
+
for (const row of result) {
|
|
160
|
+
byModel[row.model] = {
|
|
161
|
+
tokens: parseInt(row.modelTokens, 10),
|
|
162
|
+
cost: parseFloat(row.modelCost),
|
|
163
|
+
}
|
|
164
|
+
totalTokens += parseInt(row.totalTokens || '0', 10)
|
|
165
|
+
totalCost += parseFloat(row.totalCost || '0')
|
|
166
|
+
inputTokens += parseInt(row.inputTokens || '0', 10)
|
|
167
|
+
outputTokens += parseInt(row.outputTokens || '0', 10)
|
|
168
|
+
requestCount += parseInt(row.requestCount || '0', 10)
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
return {
|
|
172
|
+
totalTokens,
|
|
173
|
+
totalCost,
|
|
174
|
+
inputTokens,
|
|
175
|
+
outputTokens,
|
|
176
|
+
requestCount,
|
|
177
|
+
byModel,
|
|
178
|
+
}
|
|
179
|
+
},
|
|
180
|
+
|
|
181
|
+
/**
|
|
182
|
+
* Get daily usage for charts
|
|
183
|
+
*/
|
|
184
|
+
async getDailyUsage(context: AgentContext, days: number = 30): Promise<Array<{
|
|
185
|
+
date: string
|
|
186
|
+
tokens: number
|
|
187
|
+
cost: number
|
|
188
|
+
requests: number
|
|
189
|
+
}>> {
|
|
190
|
+
const { userId, teamId } = context
|
|
191
|
+
|
|
192
|
+
// Validate days parameter to prevent SQL injection (must be positive integer)
|
|
193
|
+
const safeDays = Math.max(1, Math.min(365, Math.floor(Number(days) || 30)))
|
|
194
|
+
|
|
195
|
+
const result = await queryWithRLS<{
|
|
196
|
+
date: string
|
|
197
|
+
tokens: string
|
|
198
|
+
cost: string
|
|
199
|
+
requests: string
|
|
200
|
+
}>(
|
|
201
|
+
`SELECT
|
|
202
|
+
DATE("createdAt")::text as date,
|
|
203
|
+
SUM("totalTokens")::text as tokens,
|
|
204
|
+
SUM("totalCost")::text as cost,
|
|
205
|
+
COUNT(*)::text as requests
|
|
206
|
+
FROM public."langchain_token_usage"
|
|
207
|
+
WHERE "userId" = $1 AND "teamId" = $2
|
|
208
|
+
AND "createdAt" >= now() - ($3 || ' days')::interval
|
|
209
|
+
GROUP BY DATE("createdAt")
|
|
210
|
+
ORDER BY date DESC`,
|
|
211
|
+
[userId, teamId, safeDays.toString()],
|
|
212
|
+
userId
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
return result.map(row => ({
|
|
216
|
+
date: row.date,
|
|
217
|
+
tokens: parseInt(row.tokens, 10),
|
|
218
|
+
cost: parseFloat(row.cost),
|
|
219
|
+
requests: parseInt(row.requests, 10),
|
|
220
|
+
}))
|
|
221
|
+
},
|
|
222
|
+
|
|
223
|
+
/**
|
|
224
|
+
* Get team usage (admin only)
|
|
225
|
+
*/
|
|
226
|
+
async getTeamUsage(teamId: string, period: Period = '30d'): Promise<UsageStats & {
|
|
227
|
+
byUser: Record<string, { tokens: number; cost: number }>
|
|
228
|
+
}> {
|
|
229
|
+
const periodClause = this.getPeriodClause(period)
|
|
230
|
+
|
|
231
|
+
// This bypasses RLS - caller must verify admin permissions
|
|
232
|
+
const result = await queryWithRLS<{
|
|
233
|
+
userId: string
|
|
234
|
+
totalTokens: string
|
|
235
|
+
totalCost: string
|
|
236
|
+
inputTokens: string
|
|
237
|
+
outputTokens: string
|
|
238
|
+
requestCount: string
|
|
239
|
+
}>(
|
|
240
|
+
`SELECT
|
|
241
|
+
"userId",
|
|
242
|
+
SUM("totalTokens")::text as "totalTokens",
|
|
243
|
+
SUM("totalCost")::text as "totalCost",
|
|
244
|
+
SUM("inputTokens")::text as "inputTokens",
|
|
245
|
+
SUM("outputTokens")::text as "outputTokens",
|
|
246
|
+
COUNT(*)::text as "requestCount"
|
|
247
|
+
FROM public."langchain_token_usage"
|
|
248
|
+
WHERE "teamId" = $1 ${periodClause}
|
|
249
|
+
GROUP BY "userId"`,
|
|
250
|
+
[teamId],
|
|
251
|
+
'admin' // Use admin context for team-wide queries
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
const byUser: Record<string, { tokens: number; cost: number }> = {}
|
|
255
|
+
let totalTokens = 0
|
|
256
|
+
let totalCost = 0
|
|
257
|
+
let inputTokens = 0
|
|
258
|
+
let outputTokens = 0
|
|
259
|
+
let requestCount = 0
|
|
260
|
+
|
|
261
|
+
for (const row of result) {
|
|
262
|
+
byUser[row.userId] = {
|
|
263
|
+
tokens: parseInt(row.totalTokens, 10),
|
|
264
|
+
cost: parseFloat(row.totalCost),
|
|
265
|
+
}
|
|
266
|
+
totalTokens += parseInt(row.totalTokens || '0', 10)
|
|
267
|
+
totalCost += parseFloat(row.totalCost || '0')
|
|
268
|
+
inputTokens += parseInt(row.inputTokens || '0', 10)
|
|
269
|
+
outputTokens += parseInt(row.outputTokens || '0', 10)
|
|
270
|
+
requestCount += parseInt(row.requestCount || '0', 10)
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
return {
|
|
274
|
+
totalTokens,
|
|
275
|
+
totalCost,
|
|
276
|
+
inputTokens,
|
|
277
|
+
outputTokens,
|
|
278
|
+
requestCount,
|
|
279
|
+
byModel: {}, // Not grouped by model for team view
|
|
280
|
+
byUser,
|
|
281
|
+
}
|
|
282
|
+
},
|
|
283
|
+
|
|
284
|
+
// Helper: Get SQL period clause
|
|
285
|
+
getPeriodClause(period: Period): string {
|
|
286
|
+
switch (period) {
|
|
287
|
+
case 'today':
|
|
288
|
+
return `AND "createdAt" >= CURRENT_DATE`
|
|
289
|
+
case '7d':
|
|
290
|
+
return `AND "createdAt" >= now() - interval '7 days'`
|
|
291
|
+
case '30d':
|
|
292
|
+
return `AND "createdAt" >= now() - interval '30 days'`
|
|
293
|
+
case 'all':
|
|
294
|
+
default:
|
|
295
|
+
return ''
|
|
296
|
+
}
|
|
297
|
+
},
|
|
298
|
+
}
|