@nextsparkjs/plugin-langchain 0.1.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/.env.example +41 -0
  2. package/api/observability/metrics/route.ts +110 -0
  3. package/api/observability/traces/[traceId]/route.ts +398 -0
  4. package/api/observability/traces/route.ts +205 -0
  5. package/api/sessions/route.ts +332 -0
  6. package/components/observability/CollapsibleJson.tsx +71 -0
  7. package/components/observability/CompactTimeline.tsx +75 -0
  8. package/components/observability/ConversationFlow.tsx +271 -0
  9. package/components/observability/DisabledMessage.tsx +21 -0
  10. package/components/observability/FiltersPanel.tsx +82 -0
  11. package/components/observability/ObservabilityDashboard.tsx +230 -0
  12. package/components/observability/SpansList.tsx +210 -0
  13. package/components/observability/TraceDetail.tsx +335 -0
  14. package/components/observability/TraceStatusBadge.tsx +39 -0
  15. package/components/observability/TracesTable.tsx +97 -0
  16. package/components/observability/index.ts +7 -0
  17. package/docs/01-getting-started/01-overview.md +196 -0
  18. package/docs/01-getting-started/02-installation.md +368 -0
  19. package/docs/01-getting-started/03-configuration.md +794 -0
  20. package/docs/02-core-concepts/01-architecture.md +566 -0
  21. package/docs/02-core-concepts/02-agents.md +597 -0
  22. package/docs/02-core-concepts/03-tools.md +689 -0
  23. package/docs/03-orchestration/01-graph-orchestrator.md +809 -0
  24. package/docs/03-orchestration/02-legacy-react.md +650 -0
  25. package/docs/04-advanced/01-observability.md +645 -0
  26. package/docs/04-advanced/02-token-tracking.md +469 -0
  27. package/docs/04-advanced/03-streaming.md +476 -0
  28. package/docs/04-advanced/04-guardrails.md +597 -0
  29. package/docs/05-reference/01-api-reference.md +1403 -0
  30. package/docs/05-reference/02-customization.md +646 -0
  31. package/docs/05-reference/03-examples.md +881 -0
  32. package/docs/index.md +85 -0
  33. package/hooks/observability/useMetrics.ts +31 -0
  34. package/hooks/observability/useTraceDetail.ts +48 -0
  35. package/hooks/observability/useTraces.ts +59 -0
  36. package/lib/agent-factory.ts +354 -0
  37. package/lib/agent-helpers.ts +201 -0
  38. package/lib/db-memory-store.ts +417 -0
  39. package/lib/graph/index.ts +58 -0
  40. package/lib/graph/nodes/combiner.ts +399 -0
  41. package/lib/graph/nodes/router.ts +440 -0
  42. package/lib/graph/orchestrator-graph.ts +386 -0
  43. package/lib/graph/prompts/combiner.md +131 -0
  44. package/lib/graph/prompts/router.md +193 -0
  45. package/lib/graph/types.ts +365 -0
  46. package/lib/guardrails.ts +230 -0
  47. package/lib/index.ts +44 -0
  48. package/lib/logger.ts +70 -0
  49. package/lib/memory-store.ts +168 -0
  50. package/lib/message-serializer.ts +110 -0
  51. package/lib/prompt-renderer.ts +94 -0
  52. package/lib/providers.ts +226 -0
  53. package/lib/streaming.ts +232 -0
  54. package/lib/token-tracker.ts +298 -0
  55. package/lib/tools-builder.ts +192 -0
  56. package/lib/tracer-callbacks.ts +342 -0
  57. package/lib/tracer.ts +350 -0
  58. package/migrations/001_langchain_memory.sql +83 -0
  59. package/migrations/002_token_usage.sql +127 -0
  60. package/migrations/003_observability.sql +257 -0
  61. package/package.json +28 -0
  62. package/plugin.config.ts +170 -0
  63. package/presets/lib/langchain.config.ts.preset +142 -0
  64. package/presets/templates/sector7/ai-observability/[traceId]/page.tsx +91 -0
  65. package/presets/templates/sector7/ai-observability/page.tsx +54 -0
  66. package/types/langchain.types.ts +274 -0
  67. package/types/observability.types.ts +270 -0
@@ -0,0 +1,476 @@
1
+ # Streaming Responses
2
+
3
+ This guide covers real-time token-by-token streaming via Server-Sent Events (SSE).
4
+
5
+ ## Overview
6
+
7
+ Streaming enables:
8
+ - **Real-time display**: Show tokens as they're generated
9
+ - **Better UX**: User sees immediate feedback
10
+ - **Cancellation**: Abort long-running requests
11
+ - **Tool visibility**: See when tools are being called
12
+
13
+ ---
14
+
15
+ ## Why SSE over WebSocket?
16
+
17
+ | Aspect | SSE | WebSocket |
18
+ |--------|-----|-----------|
19
+ | **Complexity** | Simple | More complex |
20
+ | **Direction** | Server → Client | Bidirectional |
21
+ | **Reconnection** | Built-in | Manual |
22
+ | **HTTP/2** | Full support | Separate protocol |
23
+ | **Firewall** | Standard HTTP | May be blocked |
24
+
25
+ For LLM streaming (one-way, server to client), SSE is simpler and more reliable.
26
+
27
+ ---
28
+
29
+ ## Stream Chunk Types
30
+
31
+ ```typescript
32
+ type StreamChunk =
33
+ | { type: 'token'; content: string }
34
+ | { type: 'done'; fullContent: string; tokenUsage?: TokenUsage }
35
+ | { type: 'error'; error: string }
36
+ | { type: 'tool_start'; toolName: string }
37
+ | { type: 'tool_end'; toolName: string; result: unknown }
38
+ ```
39
+
40
+ | Type | Description | Example |
41
+ |------|-------------|---------|
42
+ | `token` | A generated token | `{ type: 'token', content: 'Hello' }` |
43
+ | `done` | Stream complete | `{ type: 'done', fullContent: 'Hello world!' }` |
44
+ | `error` | Error occurred | `{ type: 'error', error: 'Timeout' }` |
45
+ | `tool_start` | Tool invocation started | `{ type: 'tool_start', toolName: 'list_tasks' }` |
46
+ | `tool_end` | Tool completed | `{ type: 'tool_end', toolName: 'list_tasks', result: [...] }` |
47
+
48
+ ---
49
+
50
+ ## API Endpoint
51
+
52
+ ### POST /api/.../ai/chat/stream
53
+
54
+ Streaming chat endpoint using SSE.
55
+
56
+ **Request**:
57
+ ```json
58
+ {
59
+ "message": "Show my tasks",
60
+ "sessionId": "session-123",
61
+ "agentName": "orchestrator"
62
+ }
63
+ ```
64
+
65
+ **Response Headers**:
66
+ ```
67
+ Content-Type: text/event-stream
68
+ Cache-Control: no-cache
69
+ Connection: keep-alive
70
+ ```
71
+
72
+ **Response Stream**:
73
+ ```
74
+ data: {"type":"token","content":"I"}
75
+
76
+ data: {"type":"token","content":" found"}
77
+
78
+ data: {"type":"tool_start","toolName":"list_tasks"}
79
+
80
+ data: {"type":"tool_end","toolName":"list_tasks","result":[...]}
81
+
82
+ data: {"type":"token","content":" 3"}
83
+
84
+ data: {"type":"token","content":" tasks"}
85
+
86
+ data: {"type":"done","fullContent":"I found 3 tasks...","tokenUsage":{"inputTokens":50,"outputTokens":30,"totalTokens":80}}
87
+
88
+ data: [DONE]
89
+ ```
90
+
91
+ ---
92
+
93
+ ## Backend Implementation
94
+
95
+ ### streamChat Generator
96
+
97
+ ```typescript
98
+ import { streamChat, StreamChunk } from '@/contents/plugins/langchain/lib/streaming'
99
+
100
+ async function* handleStream(
101
+ agent: Agent,
102
+ message: string,
103
+ context: AgentContext
104
+ ): AsyncGenerator<StreamChunk> {
105
+ yield* streamChat(
106
+ agent,
107
+ message,
108
+ context,
109
+ { modelConfig: { provider: 'openai', model: 'gpt-4o-mini' } },
110
+ {
111
+ sessionId: 'session-123',
112
+ agentName: 'orchestrator',
113
+ signal: abortController.signal,
114
+ }
115
+ )
116
+ }
117
+ ```
118
+
119
+ ### SSE Response
120
+
121
+ ```typescript
122
+ import { createSSEEncoder } from '@/contents/plugins/langchain/lib/streaming'
123
+
124
+ export async function POST(request: NextRequest) {
125
+ const { message, sessionId, agentName } = await request.json()
126
+
127
+ // Create streaming response
128
+ const stream = new ReadableStream({
129
+ async start(controller) {
130
+ const encoder = createSSEEncoder()
131
+
132
+ for await (const chunk of streamChat(agent, message, context, config, options)) {
133
+ controller.enqueue(encoder.encode(chunk))
134
+
135
+ if (chunk.type === 'done' || chunk.type === 'error') {
136
+ controller.enqueue(encoder.encodeDone())
137
+ controller.close()
138
+ break
139
+ }
140
+ }
141
+ },
142
+ })
143
+
144
+ return new Response(stream, {
145
+ headers: {
146
+ 'Content-Type': 'text/event-stream',
147
+ 'Cache-Control': 'no-cache',
148
+ 'Connection': 'keep-alive',
149
+ },
150
+ })
151
+ }
152
+ ```
153
+
154
+ ---
155
+
156
+ ## Frontend Integration
157
+
158
+ ### useStreamingChat Hook
159
+
160
+ ```typescript
161
+ import { useStreamingChat } from '@/contents/plugins/langchain/hooks/useStreamingChat'
162
+
163
+ function ChatPanel() {
164
+ const [messages, setMessages] = useState<Message[]>([])
165
+ const [input, setInput] = useState('')
166
+
167
+ const {
168
+ isStreaming,
169
+ streamContent,
170
+ error,
171
+ sendMessage,
172
+ cancelStream,
173
+ } = useStreamingChat({
174
+ sessionId: 'session-123',
175
+ agentName: 'orchestrator',
176
+ onToken: (token) => {
177
+ // Called for each token
178
+ console.log('Token:', token)
179
+ },
180
+ onToolStart: (toolName) => {
181
+ console.log('Tool started:', toolName)
182
+ },
183
+ onToolEnd: (toolName, result) => {
184
+ console.log('Tool completed:', toolName, result)
185
+ },
186
+ onComplete: (fullContent, tokenUsage) => {
187
+ setMessages(prev => [...prev, { role: 'assistant', content: fullContent }])
188
+ },
189
+ onError: (error) => {
190
+ console.error('Stream error:', error)
191
+ },
192
+ })
193
+
194
+ const handleSend = async () => {
195
+ setMessages(prev => [...prev, { role: 'user', content: input }])
196
+ await sendMessage(input)
197
+ setInput('')
198
+ }
199
+
200
+ return (
201
+ <div>
202
+ <MessageList messages={messages} />
203
+
204
+ {isStreaming && (
205
+ <div>
206
+ <TypingIndicator content={streamContent} />
207
+ <button onClick={cancelStream}>Stop</button>
208
+ </div>
209
+ )}
210
+
211
+ <input
212
+ value={input}
213
+ onChange={(e) => setInput(e.target.value)}
214
+ disabled={isStreaming}
215
+ />
216
+ <button onClick={handleSend} disabled={isStreaming}>
217
+ Send
218
+ </button>
219
+ </div>
220
+ )
221
+ }
222
+ ```
223
+
224
+ ### Hook Options
225
+
226
+ ```typescript
227
+ interface UseStreamingChatOptions {
228
+ sessionId?: string
229
+ agentName?: string
230
+
231
+ // Callbacks
232
+ onToken?: (token: string) => void
233
+ onToolStart?: (toolName: string) => void
234
+ onToolEnd?: (toolName: string, result: unknown) => void
235
+ onComplete?: (fullContent: string, tokenUsage?: TokenUsage) => void
236
+ onError?: (error: string) => void
237
+ }
238
+
239
+ interface UseStreamingChatReturn {
240
+ isStreaming: boolean
241
+ streamContent: string // Accumulated content so far
242
+ error: string | null
243
+ sendMessage: (message: string) => Promise<void>
244
+ cancelStream: () => void
245
+ }
246
+ ```
247
+
248
+ ---
249
+
250
+ ## Cancellation
251
+
252
+ ### Backend: AbortSignal
253
+
254
+ ```typescript
255
+ const controller = new AbortController()
256
+
257
+ // Cancel after 30 seconds
258
+ setTimeout(() => controller.abort(), 30000)
259
+
260
+ yield* streamChat(agent, message, context, config, {
261
+ signal: controller.signal,
262
+ })
263
+ ```
264
+
265
+ ### Frontend: Cancel Button
266
+
267
+ ```typescript
268
+ const { isStreaming, cancelStream } = useStreamingChat(options)
269
+
270
+ <button
271
+ onClick={cancelStream}
272
+ disabled={!isStreaming}
273
+ >
274
+ Stop Generating
275
+ </button>
276
+ ```
277
+
278
+ When cancelled, stream emits:
279
+ ```json
280
+ { "type": "error", "error": "Stream cancelled by user" }
281
+ ```
282
+
283
+ ---
284
+
285
+ ## Integration with Services
286
+
287
+ ### Memory Persistence
288
+
289
+ After streaming completes, messages are automatically saved:
290
+
291
+ ```typescript
292
+ // In streamChat()
293
+ if (sessionId && fullContent) {
294
+ await dbMemoryStore.addMessages(
295
+ sessionId,
296
+ [new HumanMessage(input), new AIMessage(fullContent)],
297
+ context,
298
+ sessionConfig
299
+ )
300
+ }
301
+ ```
302
+
303
+ ### Token Tracking
304
+
305
+ Token usage is tracked on completion:
306
+
307
+ ```typescript
308
+ // In streamChat()
309
+ if (tokenUsage.totalTokens > 0) {
310
+ await tokenTracker.trackUsage({
311
+ context,
312
+ sessionId,
313
+ provider: config.modelConfig?.provider,
314
+ model: config.modelConfig?.model,
315
+ usage: tokenUsage,
316
+ agentName,
317
+ })
318
+ }
319
+ ```
320
+
321
+ ### Observability
322
+
323
+ Traces are automatically created:
324
+
325
+ ```typescript
326
+ // In streamChat()
327
+ const traceContext = await tracer.startTrace(...)
328
+
329
+ // On completion
330
+ await tracer.endTrace(context, traceId, {
331
+ output: fullContent,
332
+ tokens: tokenUsage,
333
+ llmCalls: counts.llmCalls,
334
+ toolCalls: counts.toolCalls,
335
+ })
336
+ ```
337
+
338
+ ---
339
+
340
+ ## UI Components
341
+
342
+ ### Typing Indicator
343
+
344
+ ```typescript
345
+ function TypingIndicator({ content }: { content: string }) {
346
+ return (
347
+ <div className="typing-indicator">
348
+ <div className="message-content">
349
+ {content}
350
+ <span className="cursor">|</span>
351
+ </div>
352
+ </div>
353
+ )
354
+ }
355
+ ```
356
+
357
+ ### Tool Call Display
358
+
359
+ ```typescript
360
+ function ToolCallIndicator({ toolName, isActive }: Props) {
361
+ return (
362
+ <div className={`tool-call ${isActive ? 'active' : 'completed'}`}>
363
+ <ToolIcon />
364
+ <span>{toolName}</span>
365
+ {isActive && <Spinner />}
366
+ </div>
367
+ )
368
+ }
369
+ ```
370
+
371
+ ---
372
+
373
+ ## Error Handling
374
+
375
+ ### Network Errors
376
+
377
+ ```typescript
378
+ const { error } = useStreamingChat({
379
+ onError: (error) => {
380
+ if (error.includes('network')) {
381
+ // Retry or show reconnect button
382
+ } else if (error.includes('cancelled')) {
383
+ // User cancelled, no action needed
384
+ } else {
385
+ // Show error message
386
+ toast.error(error)
387
+ }
388
+ },
389
+ })
390
+ ```
391
+
392
+ ### Timeout
393
+
394
+ Configure server-side timeout:
395
+
396
+ ```typescript
397
+ // In API route
398
+ export const runtime = 'edge' // Or 'nodejs'
399
+ export const maxDuration = 60 // seconds
400
+ ```
401
+
402
+ ---
403
+
404
+ ## Best Practices
405
+
406
+ ### 1. Show Immediate Feedback
407
+
408
+ ```typescript
409
+ // Show user message immediately
410
+ setMessages(prev => [...prev, { role: 'user', content: input }])
411
+
412
+ // Then start streaming
413
+ await sendMessage(input)
414
+ ```
415
+
416
+ ### 2. Handle Edge Cases
417
+
418
+ ```typescript
419
+ // Empty response
420
+ if (chunk.type === 'done' && !chunk.fullContent) {
421
+ showMessage('No response generated')
422
+ }
423
+
424
+ // Very long response
425
+ if (streamContent.length > MAX_LENGTH) {
426
+ cancelStream()
427
+ showWarning('Response too long')
428
+ }
429
+ ```
430
+
431
+ ### 3. Provide Cancel Option
432
+
433
+ Always show cancel button during streaming for user control.
434
+
435
+ ### 4. Buffer Tool Calls
436
+
437
+ ```typescript
438
+ const [activeTools, setActiveTools] = useState<string[]>([])
439
+
440
+ onToolStart: (name) => {
441
+ setActiveTools(prev => [...prev, name])
442
+ },
443
+ onToolEnd: (name) => {
444
+ setActiveTools(prev => prev.filter(t => t !== name))
445
+ },
446
+ ```
447
+
448
+ ---
449
+
450
+ ## Troubleshooting
451
+
452
+ ### Stream Not Connecting
453
+
454
+ 1. Check Content-Type header is `text/event-stream`
455
+ 2. Verify no caching headers interfere
456
+ 3. Check CORS if cross-origin
457
+
458
+ ### Tokens Not Appearing
459
+
460
+ 1. Verify LangChain's `streamEvents` is working
461
+ 2. Check `on_chat_model_stream` events are emitted
462
+ 3. Ensure model supports streaming
463
+
464
+ ### Memory Issues
465
+
466
+ 1. Don't accumulate too much content in state
467
+ 2. Clean up on component unmount
468
+ 3. Use `cancelStream` when navigating away
469
+
470
+ ---
471
+
472
+ ## Related Documentation
473
+
474
+ - [Graph Orchestrator](../03-orchestration/01-graph-orchestrator.md) - How streaming integrates with orchestration
475
+ - [Token Tracking](./02-token-tracking.md) - Usage tracking during streams
476
+ - [Observability](./01-observability.md) - Tracing streaming requests