@nextsparkjs/plugin-langchain 0.1.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/.env.example +41 -0
  2. package/api/observability/metrics/route.ts +110 -0
  3. package/api/observability/traces/[traceId]/route.ts +398 -0
  4. package/api/observability/traces/route.ts +205 -0
  5. package/api/sessions/route.ts +332 -0
  6. package/components/observability/CollapsibleJson.tsx +71 -0
  7. package/components/observability/CompactTimeline.tsx +75 -0
  8. package/components/observability/ConversationFlow.tsx +271 -0
  9. package/components/observability/DisabledMessage.tsx +21 -0
  10. package/components/observability/FiltersPanel.tsx +82 -0
  11. package/components/observability/ObservabilityDashboard.tsx +230 -0
  12. package/components/observability/SpansList.tsx +210 -0
  13. package/components/observability/TraceDetail.tsx +335 -0
  14. package/components/observability/TraceStatusBadge.tsx +39 -0
  15. package/components/observability/TracesTable.tsx +97 -0
  16. package/components/observability/index.ts +7 -0
  17. package/docs/01-getting-started/01-overview.md +196 -0
  18. package/docs/01-getting-started/02-installation.md +368 -0
  19. package/docs/01-getting-started/03-configuration.md +794 -0
  20. package/docs/02-core-concepts/01-architecture.md +566 -0
  21. package/docs/02-core-concepts/02-agents.md +597 -0
  22. package/docs/02-core-concepts/03-tools.md +689 -0
  23. package/docs/03-orchestration/01-graph-orchestrator.md +809 -0
  24. package/docs/03-orchestration/02-legacy-react.md +650 -0
  25. package/docs/04-advanced/01-observability.md +645 -0
  26. package/docs/04-advanced/02-token-tracking.md +469 -0
  27. package/docs/04-advanced/03-streaming.md +476 -0
  28. package/docs/04-advanced/04-guardrails.md +597 -0
  29. package/docs/05-reference/01-api-reference.md +1403 -0
  30. package/docs/05-reference/02-customization.md +646 -0
  31. package/docs/05-reference/03-examples.md +881 -0
  32. package/docs/index.md +85 -0
  33. package/hooks/observability/useMetrics.ts +31 -0
  34. package/hooks/observability/useTraceDetail.ts +48 -0
  35. package/hooks/observability/useTraces.ts +59 -0
  36. package/lib/agent-factory.ts +354 -0
  37. package/lib/agent-helpers.ts +201 -0
  38. package/lib/db-memory-store.ts +417 -0
  39. package/lib/graph/index.ts +58 -0
  40. package/lib/graph/nodes/combiner.ts +399 -0
  41. package/lib/graph/nodes/router.ts +440 -0
  42. package/lib/graph/orchestrator-graph.ts +386 -0
  43. package/lib/graph/prompts/combiner.md +131 -0
  44. package/lib/graph/prompts/router.md +193 -0
  45. package/lib/graph/types.ts +365 -0
  46. package/lib/guardrails.ts +230 -0
  47. package/lib/index.ts +44 -0
  48. package/lib/logger.ts +70 -0
  49. package/lib/memory-store.ts +168 -0
  50. package/lib/message-serializer.ts +110 -0
  51. package/lib/prompt-renderer.ts +94 -0
  52. package/lib/providers.ts +226 -0
  53. package/lib/streaming.ts +232 -0
  54. package/lib/token-tracker.ts +298 -0
  55. package/lib/tools-builder.ts +192 -0
  56. package/lib/tracer-callbacks.ts +342 -0
  57. package/lib/tracer.ts +350 -0
  58. package/migrations/001_langchain_memory.sql +83 -0
  59. package/migrations/002_token_usage.sql +127 -0
  60. package/migrations/003_observability.sql +257 -0
  61. package/package.json +28 -0
  62. package/plugin.config.ts +170 -0
  63. package/presets/lib/langchain.config.ts.preset +142 -0
  64. package/presets/templates/sector7/ai-observability/[traceId]/page.tsx +91 -0
  65. package/presets/templates/sector7/ai-observability/page.tsx +54 -0
  66. package/types/langchain.types.ts +274 -0
  67. package/types/observability.types.ts +270 -0
package/docs/index.md ADDED
@@ -0,0 +1,85 @@
1
+ # LangChain Plugin Documentation
2
+
3
+ Complete documentation for the LangChain Plugin - a comprehensive AI agent infrastructure for Next.js applications.
4
+
5
+ ---
6
+
7
+ ## Quick Links
8
+
9
+ | I want to... | Go to... |
10
+ |--------------|----------|
11
+ | Understand what this plugin does | [Overview](./01-getting-started/01-overview.md) |
12
+ | Install and configure the plugin | [Installation](./01-getting-started/02-installation.md) |
13
+ | Set up the graph orchestrator | [Graph Orchestrator](./03-orchestration/01-graph-orchestrator.md) |
14
+ | Monitor AI usage and costs | [Token Tracking](./04-advanced/02-token-tracking.md) |
15
+ | Debug agent execution | [Observability](./04-advanced/01-observability.md) |
16
+
17
+ ---
18
+
19
+ ## Documentation Structure
20
+
21
+ ### 01 - Getting Started
22
+
23
+ Essential guides for setting up and configuring the plugin.
24
+
25
+ | Document | Description |
26
+ |----------|-------------|
27
+ | [01 - Overview](./01-getting-started/01-overview.md) | Introduction and core concepts |
28
+ | [02 - Installation](./01-getting-started/02-installation.md) | Setup, migrations, and dependencies |
29
+ | [03 - Configuration](./01-getting-started/03-configuration.md) | Theme-level agent configuration |
30
+
31
+ ### 02 - Core Concepts
32
+
33
+ Deep dives into the fundamental building blocks.
34
+
35
+ | Document | Description |
36
+ |----------|-------------|
37
+ | [01 - Architecture](./02-core-concepts/01-architecture.md) | Technical architecture and patterns |
38
+ | [02 - Agents](./02-core-concepts/02-agents.md) | Creating and customizing agents |
39
+ | [03 - Tools](./02-core-concepts/03-tools.md) | Building tools for agents |
40
+
41
+ ### 03 - Orchestration
42
+
43
+ Multi-agent routing and coordination patterns.
44
+
45
+ | Document | Description |
46
+ |----------|-------------|
47
+ | [01 - Graph Orchestrator](./03-orchestration/01-graph-orchestrator.md) | **Recommended** - Modern state-machine orchestration |
48
+ | [02 - Legacy ReAct](./03-orchestration/02-legacy-react.md) | Deprecated ReAct-based approach |
49
+
50
+ ### 04 - Advanced Topics
51
+
52
+ Production-ready features for monitoring, security, and performance.
53
+
54
+ | Document | Description |
55
+ |----------|-------------|
56
+ | [01 - Observability](./04-advanced/01-observability.md) | Tracing, metrics, and debugging dashboard |
57
+ | [02 - Token Tracking](./04-advanced/02-token-tracking.md) | Token usage and cost monitoring |
58
+ | [03 - Streaming](./04-advanced/03-streaming.md) | Real-time SSE streaming responses |
59
+ | [04 - Guardrails](./04-advanced/04-guardrails.md) | Security: injection detection, PII masking |
60
+
61
+ ### 05 - Reference
62
+
63
+ Complete API documentation and examples.
64
+
65
+ | Document | Description |
66
+ |----------|-------------|
67
+ | [01 - API Reference](./05-reference/01-api-reference.md) | Complete API documentation |
68
+ | [02 - Customization](./05-reference/02-customization.md) | Advanced customization guide |
69
+ | [03 - Examples](./05-reference/03-examples.md) | Real-world implementation examples |
70
+
71
+ ---
72
+
73
+ ## Version History
74
+
75
+ | Version | Changes |
76
+ |---------|---------|
77
+ | v3.0 | Added Graph Orchestrator, Observability, Token Tracking, Streaming, Guardrails |
78
+ | v2.0 | Added multi-provider support, persistent memory |
79
+ | v1.0 | Initial release with ReAct-based orchestration |
80
+
81
+ ---
82
+
83
+ ## Related Documentation
84
+
85
+ - [Theme AI Documentation](../../themes/default/docs/03-ai/) - Theme-specific AI customization
@@ -0,0 +1,31 @@
1
+ 'use client'
2
+
3
+ import { useQuery } from '@tanstack/react-query'
4
+
5
+ interface MetricsResponse {
6
+ success: boolean
7
+ totalTraces: number
8
+ successTraces: number
9
+ errorTraces: number
10
+ avgLatency: number
11
+ totalTokens: number
12
+ }
13
+
14
+ async function fetchMetrics(period: string): Promise<MetricsResponse> {
15
+ const response = await fetch(`/api/v1/plugin/langchain/observability/metrics?period=${period}`)
16
+
17
+ if (!response.ok) {
18
+ throw new Error('Failed to fetch metrics')
19
+ }
20
+
21
+ return response.json()
22
+ }
23
+
24
+ export function useMetrics(period: string = '24h') {
25
+ return useQuery({
26
+ queryKey: ['observability', 'metrics', period],
27
+ queryFn: () => fetchMetrics(period),
28
+ refetchInterval: 30000, // Refresh every 30 seconds
29
+ staleTime: 20000,
30
+ })
31
+ }
@@ -0,0 +1,48 @@
1
+ 'use client'
2
+
3
+ import { useQuery } from '@tanstack/react-query'
4
+ import type { Trace, Span } from '../../types/observability.types'
5
+
6
+ interface ParentTraceInfo {
7
+ traceId: string
8
+ agentName: string
9
+ }
10
+
11
+ interface TraceDetailApiResponse {
12
+ success: boolean
13
+ data: {
14
+ trace: Trace
15
+ spans: Span[]
16
+ childTraces: Trace[]
17
+ childSpansMap: Record<string, Span[]>
18
+ parentTrace?: ParentTraceInfo
19
+ }
20
+ }
21
+
22
+ interface TraceDetailResponse {
23
+ trace: Trace
24
+ spans: Span[]
25
+ childTraces: Trace[]
26
+ childSpansMap: Record<string, Span[]>
27
+ parentTrace?: ParentTraceInfo
28
+ }
29
+
30
+ async function fetchTraceDetail(traceId: string): Promise<TraceDetailResponse> {
31
+ const response = await fetch(`/api/v1/plugin/langchain/observability/traces/${traceId}`)
32
+
33
+ if (!response.ok) {
34
+ throw new Error('Failed to fetch trace detail')
35
+ }
36
+
37
+ const json: TraceDetailApiResponse = await response.json()
38
+ return json.data
39
+ }
40
+
41
+ export function useTraceDetail(traceId: string | null) {
42
+ return useQuery({
43
+ queryKey: ['observability', 'trace', traceId],
44
+ queryFn: () => fetchTraceDetail(traceId!),
45
+ enabled: !!traceId,
46
+ staleTime: 10000,
47
+ })
48
+ }
@@ -0,0 +1,59 @@
1
+ 'use client'
2
+
3
+ import { useQuery } from '@tanstack/react-query'
4
+ import type { Trace } from '../../types/observability.types'
5
+
6
+ interface TracesFilters {
7
+ status?: string
8
+ agent?: string
9
+ teamId?: string
10
+ from?: string
11
+ to?: string
12
+ limit?: number
13
+ cursor?: string
14
+ }
15
+
16
+ interface TracesApiResponse {
17
+ success: boolean
18
+ data: {
19
+ traces: Trace[]
20
+ hasMore: boolean
21
+ nextCursor?: string
22
+ }
23
+ }
24
+
25
+ interface TracesResponse {
26
+ traces: Trace[]
27
+ hasMore: boolean
28
+ nextCursor?: string
29
+ }
30
+
31
+ async function fetchTraces(filters: TracesFilters): Promise<TracesResponse> {
32
+ const params = new URLSearchParams()
33
+
34
+ if (filters.status) params.append('status', filters.status)
35
+ if (filters.agent) params.append('agent', filters.agent)
36
+ if (filters.teamId) params.append('teamId', filters.teamId)
37
+ if (filters.from) params.append('from', filters.from)
38
+ if (filters.to) params.append('to', filters.to)
39
+ if (filters.limit) params.append('limit', filters.limit.toString())
40
+ if (filters.cursor) params.append('cursor', filters.cursor)
41
+
42
+ const response = await fetch(`/api/v1/plugin/langchain/observability/traces?${params}`)
43
+
44
+ if (!response.ok) {
45
+ throw new Error('Failed to fetch traces')
46
+ }
47
+
48
+ const json: TracesApiResponse = await response.json()
49
+ return json.data
50
+ }
51
+
52
+ export function useTraces(filters: TracesFilters = {}) {
53
+ return useQuery({
54
+ queryKey: ['observability', 'traces', filters],
55
+ queryFn: () => fetchTraces(filters),
56
+ refetchInterval: 5000, // Refresh every 5 seconds
57
+ staleTime: 3000,
58
+ })
59
+ }
@@ -0,0 +1,354 @@
1
+ import { HumanMessage, AIMessage, SystemMessage, BaseMessage } from '@langchain/core/messages'
2
+ import { createReactAgent } from '@langchain/langgraph/prebuilt'
3
+ import { getModel } from './providers'
4
+ import { memoryStore } from './memory-store'
5
+ import { ToolDefinition, buildTools, convertToOpenAITools } from './tools-builder'
6
+ import { config } from '../plugin.config'
7
+ import { createAgentLogger } from './logger'
8
+ import { tokenTracker } from './token-tracker'
9
+ import { streamChat as streamChatFn, StreamChunk, StreamChatOptions } from './streaming'
10
+ import { guardrails, GuardrailsConfig } from './guardrails'
11
+ import { tracer } from './tracer'
12
+ import { createTracingCallbacks } from './tracer-callbacks'
13
+ import type { ModelConfig, AgentContext, SessionConfig } from '../types/langchain.types'
14
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models'
15
+
16
+ // Re-export types for convenience
17
+ export type { StreamChunk, StreamChatOptions }
18
+
19
+ interface CreateAgentOptions {
20
+ /** Unique session identifier for conversation memory */
21
+ sessionId: string
22
+ /** Human-readable agent name for tracing and logging */
23
+ agentName?: string
24
+ /** System prompt that defines the agent's behavior */
25
+ systemPrompt?: string
26
+ /** Tools available to the agent */
27
+ tools?: ToolDefinition<any>[]
28
+ /**
29
+ * Model configuration override.
30
+ * If not provided, uses plugin defaults from environment variables.
31
+ *
32
+ * @example
33
+ * // Use OpenAI GPT-4
34
+ * modelConfig: { provider: 'openai', model: 'gpt-4o' }
35
+ *
36
+ * // Use Anthropic Claude
37
+ * modelConfig: { provider: 'anthropic', model: 'claude-3-5-sonnet-20241022' }
38
+ *
39
+ * // Use local Ollama
40
+ * modelConfig: { provider: 'ollama', model: 'llama3.2:3b' }
41
+ *
42
+ * // Use with temperature
43
+ * modelConfig: { provider: 'openai', model: 'gpt-4o', temperature: 0.7 }
44
+ */
45
+ modelConfig?: Partial<ModelConfig>
46
+ /**
47
+ * Context for multi-tenant memory storage and prompt injection.
48
+ * Required for database-persisted conversation history.
49
+ * Can include additional data for Handlebars template rendering.
50
+ */
51
+ context?: AgentContext
52
+ /**
53
+ * Session configuration for memory customization.
54
+ * Allows per-agent TTL and message limits.
55
+ */
56
+ sessionConfig?: SessionConfig
57
+ /**
58
+ * Guardrails configuration for security and safety
59
+ */
60
+ guardrails?: GuardrailsConfig
61
+ /**
62
+ * Parent trace ID for nested agent calls (e.g., orchestrator -> sub-agent)
63
+ */
64
+ parentId?: string
65
+ /**
66
+ * Maximum number of iterations for the ReAct loop.
67
+ * Default is 25. Increase for complex multi-step tasks.
68
+ * @default 50
69
+ */
70
+ recursionLimit?: number
71
+ }
72
+
73
+ export const createAgent = async (options: CreateAgentOptions) => {
74
+ const {
75
+ sessionId,
76
+ agentName,
77
+ systemPrompt = 'You are a helpful AI assistant.',
78
+ tools = [],
79
+ modelConfig,
80
+ context,
81
+ sessionConfig,
82
+ parentId,
83
+ recursionLimit = 50, // Default higher than LangGraph's 25 for multi-step tasks
84
+ } = options
85
+
86
+ // Warn in development if context is not provided (memory won't be persisted)
87
+ if (!context && config.debug) {
88
+ console.warn('[LangChain Agent] No context provided - conversation history will not be persisted to database')
89
+ }
90
+
91
+ // Get model based on configuration (or defaults from env vars)
92
+ const model = getModel(modelConfig)
93
+ const langChainTools = buildTools(tools)
94
+
95
+ // Determine effective model info for logging
96
+ const effectiveProvider = modelConfig?.provider || config.defaultProvider
97
+ const providerConfig = config.providers[effectiveProvider] as { model?: string; baseUrl?: string; temperature?: number }
98
+ const effectiveModel = modelConfig?.model || providerConfig?.model || 'default'
99
+ const effectiveBaseUrl = modelConfig?.options?.baseUrl || providerConfig?.baseUrl
100
+
101
+ // Create logger for this session (will be garbage collected when agent is done)
102
+ const logger = createAgentLogger({
103
+ agentName: agentName || 'single-agent',
104
+ userName: (context?.userName as string) || 'system',
105
+ })
106
+
107
+ // Log session initialization with model info
108
+ await logger.info('SESSION_INIT', {
109
+ provider: effectiveProvider,
110
+ model: effectiveModel,
111
+ baseUrl: effectiveBaseUrl || 'default',
112
+ temperature: modelConfig?.temperature ?? providerConfig?.temperature,
113
+ toolsCount: tools.length,
114
+ })
115
+
116
+ // For OpenAI-compatible providers (including LM Studio), bind tools with custom conversion
117
+ // to ensure proper type: "object" in JSON Schema
118
+ let boundModel: BaseChatModel = model
119
+ if (effectiveProvider === 'openai' && tools.length > 0) {
120
+ const openAITools = convertToOpenAITools(tools)
121
+ // Bind the model with pre-converted tools
122
+ // Use 'any' cast as LangChain's typing doesn't expose tools in BaseChatModelCallOptions
123
+ boundModel = (model as any).bind({ tools: openAITools }) as BaseChatModel
124
+ }
125
+
126
+ const agent = createReactAgent({
127
+ llm: boundModel,
128
+ tools: langChainTools,
129
+ messageModifier: systemPrompt,
130
+ })
131
+
132
+ // Log recursion limit for debugging
133
+ if (config.debug) {
134
+ console.log(`[LangChain Agent] ${agentName || 'Agent'} created with recursionLimit: ${recursionLimit}`)
135
+ }
136
+
137
+ return {
138
+ /**
139
+ * Send a message to the agent and get a response
140
+ */
141
+ chat: async (message: string) => {
142
+ // Start trace if configured
143
+ const traceContext = context
144
+ ? await tracer.startTrace(
145
+ { userId: context.userId, teamId: context.teamId },
146
+ agentName || 'Agent',
147
+ message,
148
+ { sessionId, parentId }
149
+ )
150
+ : null
151
+
152
+ // Create tracing callbacks if trace was started (pass model name for providers like Ollama)
153
+ const tracingHandler = traceContext && context
154
+ ? createTracingCallbacks({ userId: context.userId, teamId: context.teamId }, traceContext.traceId, effectiveModel)
155
+ : null
156
+ const tracingCallbacks = tracingHandler ? [tracingHandler] : []
157
+
158
+ try {
159
+ // Add user message to history
160
+ const userMessage = new HumanMessage(message)
161
+
162
+ // Get current history (empty array if no context provided)
163
+ const currentHistory = context
164
+ ? await memoryStore.getMessages(sessionId, context)
165
+ : []
166
+
167
+ // Invoke agent with history + new message
168
+ if (config.debug) {
169
+ console.log(`[Agent] Invoking with message: "${message}"`)
170
+ }
171
+
172
+ await logger.info('USER_MESSAGE', { message })
173
+
174
+ const result = await agent.invoke(
175
+ {
176
+ messages: [...currentHistory, userMessage],
177
+ },
178
+ {
179
+ callbacks: tracingCallbacks,
180
+ recursionLimit,
181
+ }
182
+ )
183
+
184
+ if (config.debug) {
185
+ console.log('[Agent] Result messages:', JSON.stringify(result.messages.map(m => ({ type: m._getType(), content: m.content })), null, 2))
186
+ }
187
+
188
+ const newMessages = result.messages.slice(currentHistory.length)
189
+ try {
190
+ await logger.info('AGENT_RESPONSE', {
191
+ messages: newMessages.map(m => ({
192
+ type: m._getType(),
193
+ content: m.content,
194
+ tool_calls: (m as any).tool_calls,
195
+ tool_call_id: (m as any).tool_call_id
196
+ }))
197
+ })
198
+ } catch (logError) {
199
+ if (config.debug) {
200
+ console.error('[Logger] Failed to log AGENT_RESPONSE:', logError)
201
+ }
202
+ }
203
+
204
+ // Result.messages contains the full conversation including the new response
205
+ // We need to extract the last message which is the AI response
206
+ const lastMessage = result.messages[result.messages.length - 1]
207
+ const responseContent = lastMessage.content as string
208
+
209
+ // Track token usage if available and context exists
210
+ const usage = (lastMessage as any).usage_metadata || (lastMessage as any).response_metadata?.usage
211
+ if (usage && context) {
212
+ await tokenTracker.trackUsage({
213
+ context,
214
+ sessionId,
215
+ provider: modelConfig?.provider || effectiveProvider,
216
+ model: modelConfig?.model || effectiveModel,
217
+ usage: {
218
+ inputTokens: usage.input_tokens || 0,
219
+ outputTokens: usage.output_tokens || 0,
220
+ totalTokens: usage.total_tokens || 0,
221
+ },
222
+ agentName,
223
+ })
224
+ }
225
+
226
+ // Update memory with the new interaction (User + AI)
227
+ // Note: LangGraph might return intermediate tool messages too
228
+ // Save new messages if context is provided
229
+ if (context) {
230
+ await memoryStore.addMessages(sessionId, newMessages, context, sessionConfig)
231
+ }
232
+
233
+ // End trace successfully if started
234
+ if (traceContext && context) {
235
+ // Flush pending operations and get call counts from tracing handler
236
+ await tracingHandler?.flush()
237
+ const counts = tracingHandler?.getCounts() || { llmCalls: 0, toolCalls: 0 }
238
+
239
+ await tracer.endTrace(
240
+ { userId: context.userId, teamId: context.teamId },
241
+ traceContext.traceId,
242
+ {
243
+ output: responseContent,
244
+ tokens: usage
245
+ ? {
246
+ input: usage.input_tokens || 0,
247
+ output: usage.output_tokens || 0,
248
+ total: usage.total_tokens || 0,
249
+ }
250
+ : undefined,
251
+ llmCalls: counts.llmCalls,
252
+ toolCalls: counts.toolCalls,
253
+ }
254
+ )
255
+ }
256
+
257
+ return {
258
+ content: responseContent,
259
+ sessionId,
260
+ messages: result.messages, // Expose full messages for orchestration
261
+ traceId: traceContext?.traceId, // Expose trace ID for parent-child linking
262
+ }
263
+ } catch (error) {
264
+ // End trace with error if started
265
+ if (traceContext && context) {
266
+ // Flush pending operations and get call counts from tracing handler (even on error)
267
+ await tracingHandler?.flush()
268
+ const counts = tracingHandler?.getCounts() || { llmCalls: 0, toolCalls: 0 }
269
+
270
+ await tracer.endTrace(
271
+ { userId: context.userId, teamId: context.teamId },
272
+ traceContext.traceId,
273
+ {
274
+ error: error instanceof Error ? error : new Error(String(error)),
275
+ llmCalls: counts.llmCalls,
276
+ toolCalls: counts.toolCalls,
277
+ }
278
+ )
279
+ }
280
+ throw error
281
+ }
282
+ },
283
+
284
+ /**
285
+ * Get chat history
286
+ */
287
+ getHistory: async () => {
288
+ if (!context) {
289
+ return []
290
+ }
291
+ return memoryStore.getMessages(sessionId, context)
292
+ },
293
+
294
+ /**
295
+ * Get the underlying agent for advanced use cases (e.g., streaming)
296
+ */
297
+ getAgent: () => agent,
298
+ }
299
+ }
300
+
301
+ /**
302
+ * Interface for agent configuration used by streamChat
303
+ */
304
+ interface AgentConfig {
305
+ name?: string
306
+ modelConfig?: Partial<ModelConfig>
307
+ guardrails?: GuardrailsConfig
308
+ }
309
+
310
+ /**
311
+ * Stream chat with an agent
312
+ *
313
+ * Uses LangChain's streamEvents() for token-by-token streaming.
314
+ * Applies guardrails if configured, handles memory persistence and token tracking.
315
+ */
316
+ export async function* streamChat(
317
+ input: string,
318
+ context: AgentContext,
319
+ config: AgentConfig,
320
+ options: StreamChatOptions = {}
321
+ ): AsyncGenerator<StreamChunk, void, unknown> {
322
+ // Create agent for streaming (we need access to the raw agent)
323
+ const agentInstance = await createAgent({
324
+ sessionId: options.sessionId || `${context.userId}-${Date.now()}`,
325
+ agentName: config.name,
326
+ modelConfig: config.modelConfig,
327
+ context,
328
+ sessionConfig: options.sessionConfig,
329
+ })
330
+
331
+ // Get the underlying agent
332
+ const agent = agentInstance.getAgent()
333
+
334
+ // Apply guardrails if configured
335
+ if (config.guardrails) {
336
+ try {
337
+ const { processed, warnings } = await guardrails.processInput(
338
+ input,
339
+ config.guardrails
340
+ )
341
+ input = processed
342
+ // Could yield warnings if needed
343
+ } catch (error) {
344
+ yield { type: 'error', error: error instanceof Error ? error.message : 'Guardrail blocked' }
345
+ return
346
+ }
347
+ }
348
+
349
+ // Delegate to streaming function
350
+ yield* streamChatFn(agent, input, context, config, {
351
+ ...options,
352
+ agentName: config.name,
353
+ })
354
+ }