@nextsparkjs/plugin-langchain 0.1.0-beta.1 → 0.1.0-beta.101

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.env.example CHANGED
@@ -1,41 +1,73 @@
1
- # ===========================================
2
- # LangChain Plugin Configuration
3
- # ===========================================
1
+ # ============================================
2
+ # LANGCHAIN PLUGIN CONFIGURATION
3
+ # ============================================
4
+ #
5
+ # Copy this file to .env and configure your LangChain settings
6
+ # This file is loaded automatically by the LangChain plugin
7
+ #
8
+ # Priority: Plugin .env > Root .env > Defaults
9
+ #
10
+ # ============================================
4
11
 
12
+ # ===========================
5
13
  # Plugin Settings
14
+ # ===========================
15
+
16
+ # Enable/disable the LangChain plugin
6
17
  LANGCHAIN_PLUGIN_ENABLED=true
18
+
19
+ # Enable debug logging for development
7
20
  LANGCHAIN_PLUGIN_DEBUG=false
8
21
 
9
- # File Logging (core environment variable)
10
- # Logs to logger/ai/ when enabled
22
+ # Enable file logging (logs to logger/ai/)
11
23
  LOG_ENABLED=false
12
24
 
13
- # ===========================================
25
+ # ===========================
14
26
  # Ollama Configuration (Local)
15
- # ===========================================
16
- # Used by: single-agent
27
+ # ===========================
17
28
  # No API key required - runs locally
29
+ # Install Ollama: https://ollama.ai/download
30
+ # Pull a model: ollama pull llama3.2:3b
18
31
 
19
32
  # Ollama server URL
20
33
  LANGCHAIN_OLLAMA_BASE_URL=http://localhost:11434
21
34
 
22
- # Model to use (run `ollama list` to see available models)
35
+ # Default model (run `ollama list` to see available models)
23
36
  LANGCHAIN_OLLAMA_MODEL=llama3.2:3b
24
37
 
25
- # ===========================================
26
- # OpenAI-compatible Configuration (LM Studio)
27
- # ===========================================
28
- # Used by: orchestrator, task-assistant, customer-assistant, page-assistant
29
- #
30
- # LM Studio provides an OpenAI-compatible API on localhost.
31
- # 1. Start LM Studio and load a model
32
- # 2. Start the local server (default: http://localhost:1234/v1)
38
+ # ===========================
39
+ # OpenAI Configuration
40
+ # ===========================
41
+ # Get your key from: https://platform.openai.com/api-keys
42
+
43
+ # OpenAI API Key
44
+ OPENAI_API_KEY=your-openai-key-here
45
+
46
+ # Default model for OpenAI provider
47
+ LANGCHAIN_OPENAI_MODEL=gpt-4o-mini
48
+
49
+ # OpenAI-compatible base URL (for LM Studio, local servers)
50
+ # LM Studio: http://localhost:1234/v1
51
+ # Leave empty for official OpenAI API
52
+ # LANGCHAIN_OPENAI_BASE_URL=http://localhost:1234/v1
53
+
54
+ # ===========================
55
+ # Anthropic Configuration
56
+ # ===========================
57
+ # Get your key from: https://console.anthropic.com/
58
+
59
+ # Anthropic API Key
60
+ ANTHROPIC_API_KEY=your-anthropic-key-here
61
+
62
+ # Default model for Anthropic provider
63
+ LANGCHAIN_ANTHROPIC_MODEL=claude-3-5-sonnet-20241022
33
64
 
34
- # LM Studio server URL
35
- LANGCHAIN_OPENAI_BASE_URL=http://localhost:1234/v1
65
+ # ===========================
66
+ # Feature Flags
67
+ # ===========================
36
68
 
37
- # Model name as loaded in LM Studio
38
- LANGCHAIN_OPENAI_MODEL=your-loaded-model-name
69
+ # Enable graph-based orchestrator (recommended for multi-agent)
70
+ LANGCHAIN_USE_GRAPH_ORCHESTRATOR=true
39
71
 
40
- # API Key (LM Studio doesn't require a real key, use any string)
41
- OPENAI_API_KEY=lm-studio
72
+ # Enable verbose debug output
73
+ LANGCHAIN_DEBUG=false
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 NextSpark
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,260 @@
1
+ # Observability API
2
+
3
+ Monitor AI agent traces, spans, and performance metrics for LangChain-powered applications.
4
+
5
+ ## Overview
6
+
7
+ The Observability API provides comprehensive monitoring for AI agents, including execution traces, detailed spans, and aggregated metrics. This API is designed for administrators to debug, analyze, and optimize AI agent performance.
8
+
9
+ **Access Level:** Admin only (superadmin or developer roles)
10
+
11
+ ## Authentication
12
+
13
+ All endpoints require authentication via:
14
+ - **Session cookie** (for browser-based requests)
15
+ - **API Key** header (for server-to-server requests)
16
+
17
+ **Role Requirements:**
18
+ - `superadmin` or `developer` role required
19
+
20
+ ## Endpoints
21
+
22
+ ### List Traces
23
+ `GET /api/langchain/observability/traces`
24
+
25
+ Returns a paginated list of root traces (agent executions) with cursor-based pagination.
26
+
27
+ **Query Parameters:**
28
+
29
+ | Parameter | Type | Default | Description |
30
+ |-----------|------|---------|-------------|
31
+ | status | string | - | Filter by status: `running`, `success`, `error` |
32
+ | agent | string | - | Filter by agent name |
33
+ | teamId | string | - | Filter by team ID |
34
+ | from | datetime | - | Start date filter (ISO 8601) |
35
+ | to | datetime | - | End date filter (ISO 8601) |
36
+ | limit | number | 50 | Results per page (max: 100) |
37
+ | cursor | string | - | Pagination cursor (ISO timestamp) |
38
+
39
+ **Example Response:**
40
+ ```json
41
+ {
42
+ "success": true,
43
+ "data": {
44
+ "traces": [
45
+ {
46
+ "traceId": "trace_abc123",
47
+ "userId": "user_456",
48
+ "teamId": "team_789",
49
+ "sessionId": "sess_xyz",
50
+ "agentName": "customer-support-agent",
51
+ "agentType": "conversational",
52
+ "status": "success",
53
+ "input": "How do I reset my password?",
54
+ "output": "To reset your password...",
55
+ "startedAt": "2024-01-15T10:30:00Z",
56
+ "endedAt": "2024-01-15T10:30:05Z",
57
+ "durationMs": 5234,
58
+ "inputTokens": 150,
59
+ "outputTokens": 320,
60
+ "totalTokens": 470,
61
+ "totalCost": 0.0047,
62
+ "llmCalls": 2,
63
+ "toolCalls": 1,
64
+ "metadata": {},
65
+ "tags": ["support", "password"],
66
+ "createdAt": "2024-01-15T10:30:00Z"
67
+ }
68
+ ],
69
+ "hasMore": true,
70
+ "nextCursor": "2024-01-15T10:29:00Z"
71
+ }
72
+ }
73
+ ```
74
+
75
+ ### Get Trace Detail
76
+ `GET /api/langchain/observability/traces/[traceId]`
77
+
78
+ Returns detailed information about a specific trace, including spans and child traces.
79
+
80
+ **Path Parameters:**
81
+ - `traceId` (string, required): The trace ID
82
+
83
+ **Example Response:**
84
+ ```json
85
+ {
86
+ "success": true,
87
+ "data": {
88
+ "trace": {
89
+ "traceId": "trace_abc123",
90
+ "userId": "user_456",
91
+ "teamId": "team_789",
92
+ "sessionId": "sess_xyz",
93
+ "agentName": "customer-support-agent",
94
+ "agentType": "conversational",
95
+ "status": "success",
96
+ "input": "How do I reset my password?",
97
+ "output": "To reset your password...",
98
+ "startedAt": "2024-01-15T10:30:00Z",
99
+ "endedAt": "2024-01-15T10:30:05Z",
100
+ "durationMs": 5234,
101
+ "inputTokens": 150,
102
+ "outputTokens": 320,
103
+ "totalTokens": 470,
104
+ "totalCost": 0.0047,
105
+ "llmCalls": 2,
106
+ "toolCalls": 1,
107
+ "metadata": {},
108
+ "tags": ["support", "password"],
109
+ "createdAt": "2024-01-15T10:30:00Z"
110
+ },
111
+ "spans": [
112
+ {
113
+ "spanId": "span_001",
114
+ "traceId": "trace_abc123",
115
+ "name": "ChatOpenAI",
116
+ "type": "llm",
117
+ "provider": "openai",
118
+ "model": "gpt-4",
119
+ "inputTokens": 100,
120
+ "outputTokens": 200,
121
+ "status": "success",
122
+ "startedAt": "2024-01-15T10:30:01Z",
123
+ "endedAt": "2024-01-15T10:30:03Z",
124
+ "durationMs": 2100,
125
+ "depth": 0,
126
+ "createdAt": "2024-01-15T10:30:01Z"
127
+ },
128
+ {
129
+ "spanId": "span_002",
130
+ "traceId": "trace_abc123",
131
+ "name": "search_knowledge_base",
132
+ "type": "tool",
133
+ "toolName": "search_knowledge_base",
134
+ "toolInput": {"query": "password reset"},
135
+ "toolOutput": {"results": [...]},
136
+ "status": "success",
137
+ "startedAt": "2024-01-15T10:30:02Z",
138
+ "endedAt": "2024-01-15T10:30:02Z",
139
+ "durationMs": 450,
140
+ "depth": 1,
141
+ "createdAt": "2024-01-15T10:30:02Z"
142
+ }
143
+ ],
144
+ "childTraces": [],
145
+ "childSpansMap": {},
146
+ "parentTrace": null
147
+ }
148
+ }
149
+ ```
150
+
151
+ ### Get Metrics
152
+ `GET /api/langchain/observability/metrics`
153
+
154
+ Returns aggregated metrics for a time period.
155
+
156
+ **Query Parameters:**
157
+
158
+ | Parameter | Type | Default | Description |
159
+ |-----------|------|---------|-------------|
160
+ | period | string | 24h | Time period: `1h`, `24h`, `7d`, `30d` |
161
+
162
+ **Example Response:**
163
+ ```json
164
+ {
165
+ "success": true,
166
+ "data": {
167
+ "period": "24h",
168
+ "totalTraces": 1250,
169
+ "successTraces": 1180,
170
+ "errorTraces": 70,
171
+ "avgLatency": 3456,
172
+ "totalTokens": 2450000
173
+ }
174
+ }
175
+ ```
176
+
177
+ ## Data Models
178
+
179
+ ### Trace
180
+
181
+ | Field | Type | Description |
182
+ |-------|------|-------------|
183
+ | traceId | string | Unique trace identifier |
184
+ | userId | string | User who initiated the trace |
185
+ | teamId | string | Team context |
186
+ | sessionId | string | Associated conversation session (optional) |
187
+ | agentName | string | Name of the AI agent |
188
+ | agentType | string | Type of agent (optional) |
189
+ | parentId | string | Parent trace ID for nested agents (optional) |
190
+ | input | string | Input to the agent |
191
+ | output | string | Output from the agent (optional) |
192
+ | status | enum | `running`, `success`, `error` |
193
+ | error | string | Error message if status is error (optional) |
194
+ | errorType | string | Error type classification (optional) |
195
+ | errorStack | string | Error stack trace (optional) |
196
+ | startedAt | datetime | When the trace started |
197
+ | endedAt | datetime | When the trace ended (optional) |
198
+ | durationMs | number | Duration in milliseconds (optional) |
199
+ | inputTokens | number | Input token count |
200
+ | outputTokens | number | Output token count |
201
+ | totalTokens | number | Total token count |
202
+ | totalCost | number | Estimated cost in USD |
203
+ | llmCalls | number | Number of LLM API calls |
204
+ | toolCalls | number | Number of tool invocations |
205
+ | metadata | object | Custom metadata |
206
+ | tags | string[] | Tags for categorization (optional) |
207
+ | createdAt | datetime | Record creation timestamp |
208
+
209
+ ### Span
210
+
211
+ | Field | Type | Description |
212
+ |-------|------|-------------|
213
+ | spanId | string | Unique span identifier |
214
+ | traceId | string | Parent trace ID |
215
+ | parentSpanId | string | Parent span ID for nesting (optional) |
216
+ | name | string | Span name |
217
+ | type | enum | `llm`, `tool`, `chain`, `retriever` |
218
+ | provider | string | LLM provider (e.g., openai, anthropic) (optional) |
219
+ | model | string | Model name (optional) |
220
+ | inputTokens | number | Input tokens for LLM spans (optional) |
221
+ | outputTokens | number | Output tokens for LLM spans (optional) |
222
+ | toolName | string | Tool name for tool spans (optional) |
223
+ | toolInput | any | Tool input data (optional) |
224
+ | toolOutput | any | Tool output data (optional) |
225
+ | input | any | Span input (optional) |
226
+ | output | any | Span output (optional) |
227
+ | status | enum | `running`, `success`, `error` |
228
+ | error | string | Error message (optional) |
229
+ | startedAt | datetime | When the span started |
230
+ | endedAt | datetime | When the span ended (optional) |
231
+ | durationMs | number | Duration in milliseconds (optional) |
232
+ | depth | number | Nesting depth level |
233
+ | createdAt | datetime | Record creation timestamp |
234
+
235
+ ### Metrics
236
+
237
+ | Field | Type | Description |
238
+ |-------|------|-------------|
239
+ | period | string | Time period queried |
240
+ | totalTraces | number | Total number of traces |
241
+ | successTraces | number | Traces with success status |
242
+ | errorTraces | number | Traces with error status |
243
+ | avgLatency | number | Average duration in milliseconds |
244
+ | totalTokens | number | Total tokens consumed |
245
+
246
+ ## Error Responses
247
+
248
+ | Status | Description |
249
+ |--------|-------------|
250
+ | 400 | Invalid period parameter |
251
+ | 401 | Unauthorized - Missing or invalid auth |
252
+ | 403 | Forbidden - Admin access required |
253
+ | 404 | Trace not found |
254
+ | 500 | Internal server error |
255
+
256
+ ## Related APIs
257
+
258
+ - **[Sessions](/api/langchain/sessions)** - Conversation session management
259
+ - **[Teams](/api/v1/teams)** - Team management
260
+ - **[Users](/api/v1/users)** - User management
@@ -7,6 +7,7 @@
7
7
 
8
8
  import { NextRequest, NextResponse } from 'next/server'
9
9
  import { authenticateRequest } from '@nextsparkjs/core/lib/api/auth/dual-auth'
10
+ import { withRateLimitTier } from '@nextsparkjs/core/lib/api/rate-limit'
10
11
  import { queryWithRLS } from '@nextsparkjs/core/lib/db'
11
12
 
12
13
  interface MetricsRow {
@@ -24,7 +25,7 @@ const PERIOD_HOURS: Record<string, number> = {
24
25
  '30d': 24 * 30,
25
26
  }
26
27
 
27
- export async function GET(req: NextRequest) {
28
+ const getHandler = async (req: NextRequest) => {
28
29
  // 1. Authenticate (superadmin only)
29
30
  const authResult = await authenticateRequest(req)
30
31
  if (!authResult.success || !authResult.user) {
@@ -108,3 +109,5 @@ export async function GET(req: NextRequest) {
108
109
  )
109
110
  }
110
111
  }
112
+
113
+ export const GET = withRateLimitTier(getHandler, 'read')
@@ -0,0 +1,141 @@
1
+ /**
2
+ * API Presets for LangChain Observability
3
+ *
4
+ * Predefined API calls for traces and metrics monitoring.
5
+ * Admin access required (superadmin or developer roles).
6
+ */
7
+
8
+ import { defineApiEndpoint } from '@nextsparkjs/core/types/api-presets'
9
+
10
+ export default defineApiEndpoint({
11
+ summary: 'Monitor AI traces and performance metrics',
12
+ presets: [
13
+ {
14
+ id: 'list-traces',
15
+ title: 'List Recent Traces',
16
+ description: 'Get the most recent AI agent traces',
17
+ method: 'GET',
18
+ params: {
19
+ limit: 50
20
+ },
21
+ tags: ['read', 'list', 'traces']
22
+ },
23
+ {
24
+ id: 'list-running',
25
+ title: 'List Running Traces',
26
+ description: 'Get all currently running agent traces',
27
+ method: 'GET',
28
+ params: {
29
+ status: 'running',
30
+ limit: 50
31
+ },
32
+ tags: ['read', 'filter', 'traces']
33
+ },
34
+ {
35
+ id: 'list-success',
36
+ title: 'List Successful Traces',
37
+ description: 'Get completed successful traces',
38
+ method: 'GET',
39
+ params: {
40
+ status: 'success',
41
+ limit: 50
42
+ },
43
+ tags: ['read', 'filter', 'traces']
44
+ },
45
+ {
46
+ id: 'list-errors',
47
+ title: 'List Error Traces',
48
+ description: 'Get traces that ended with an error',
49
+ method: 'GET',
50
+ params: {
51
+ status: 'error',
52
+ limit: 50
53
+ },
54
+ tags: ['read', 'filter', 'traces']
55
+ },
56
+ {
57
+ id: 'list-by-agent',
58
+ title: 'List by Agent',
59
+ description: 'Filter traces by a specific agent name',
60
+ method: 'GET',
61
+ params: {
62
+ agent: '{{agentName}}',
63
+ limit: 50
64
+ },
65
+ tags: ['read', 'filter', 'traces']
66
+ },
67
+ {
68
+ id: 'list-by-team',
69
+ title: 'List by Team',
70
+ description: 'Filter traces for a specific team',
71
+ method: 'GET',
72
+ params: {
73
+ teamId: '{{teamId}}',
74
+ limit: 50
75
+ },
76
+ tags: ['read', 'filter', 'traces']
77
+ },
78
+ {
79
+ id: 'list-by-date-range',
80
+ title: 'List by Date Range',
81
+ description: 'Filter traces within a specific time period',
82
+ method: 'GET',
83
+ params: {
84
+ from: '{{fromDate}}',
85
+ to: '{{toDate}}',
86
+ limit: 50
87
+ },
88
+ tags: ['read', 'filter', 'traces']
89
+ },
90
+ {
91
+ id: 'get-trace-detail',
92
+ title: 'Get Trace Detail',
93
+ description: 'Get detailed information about a specific trace including spans',
94
+ method: 'GET',
95
+ pathParams: {
96
+ traceId: '{{traceId}}'
97
+ },
98
+ tags: ['read', 'detail', 'traces']
99
+ },
100
+ {
101
+ id: 'get-metrics-1h',
102
+ title: 'Get Metrics (1 Hour)',
103
+ description: 'Get aggregated metrics for the last hour',
104
+ method: 'GET',
105
+ params: {
106
+ period: '1h'
107
+ },
108
+ tags: ['read', 'metrics']
109
+ },
110
+ {
111
+ id: 'get-metrics-24h',
112
+ title: 'Get Metrics (24 Hours)',
113
+ description: 'Get aggregated metrics for the last 24 hours',
114
+ method: 'GET',
115
+ params: {
116
+ period: '24h'
117
+ },
118
+ tags: ['read', 'metrics']
119
+ },
120
+ {
121
+ id: 'get-metrics-7d',
122
+ title: 'Get Metrics (7 Days)',
123
+ description: 'Get aggregated metrics for the last 7 days',
124
+ method: 'GET',
125
+ params: {
126
+ period: '7d'
127
+ },
128
+ tags: ['read', 'metrics']
129
+ },
130
+ {
131
+ id: 'get-metrics-30d',
132
+ title: 'Get Metrics (30 Days)',
133
+ description: 'Get aggregated metrics for the last 30 days',
134
+ method: 'GET',
135
+ params: {
136
+ period: '30d'
137
+ },
138
+ tags: ['read', 'metrics']
139
+ }
140
+ ]
141
+ })
@@ -7,6 +7,7 @@
7
7
 
8
8
  import { NextRequest, NextResponse } from 'next/server'
9
9
  import { authenticateRequest } from '@nextsparkjs/core/lib/api/auth/dual-auth'
10
+ import { withRateLimitTier } from '@nextsparkjs/core/lib/api/rate-limit'
10
11
  import { queryWithRLS } from '@nextsparkjs/core/lib/db'
11
12
  import type { Trace, Span } from '../../../../types/observability.types'
12
13
 
@@ -62,10 +63,10 @@ interface SpanRow {
62
63
  createdAt: Date
63
64
  }
64
65
 
65
- export async function GET(
66
+ const getHandler = async (
66
67
  req: NextRequest,
67
68
  { params }: { params: Promise<{ traceId: string }> }
68
- ) {
69
+ ) => {
69
70
  // 1. Authenticate (superadmin only)
70
71
  const authResult = await authenticateRequest(req)
71
72
  if (!authResult.success || !authResult.user) {
@@ -396,3 +397,5 @@ export async function GET(
396
397
  )
397
398
  }
398
399
  }
400
+
401
+ export const GET = withRateLimitTier(getHandler, 'read')
@@ -7,6 +7,7 @@
7
7
 
8
8
  import { NextRequest, NextResponse } from 'next/server'
9
9
  import { authenticateRequest } from '@nextsparkjs/core/lib/api/auth/dual-auth'
10
+ import { withRateLimitTier } from '@nextsparkjs/core/lib/api/rate-limit'
10
11
  import { queryWithRLS } from '@nextsparkjs/core/lib/db'
11
12
  import type { Trace } from '../../../types/observability.types'
12
13
 
@@ -38,7 +39,7 @@ interface TraceRow {
38
39
  createdAt: Date
39
40
  }
40
41
 
41
- export async function GET(req: NextRequest) {
42
+ const getHandler = async (req: NextRequest) => {
42
43
  // 1. Authenticate (superadmin only)
43
44
  const authResult = await authenticateRequest(req)
44
45
  if (!authResult.success || !authResult.user) {
@@ -203,3 +204,5 @@ export async function GET(req: NextRequest) {
203
204
  )
204
205
  }
205
206
  }
207
+
208
+ export const GET = withRateLimitTier(getHandler, 'read')
@@ -0,0 +1,202 @@
1
+ # Sessions API
2
+
3
+ Manage AI conversation sessions with message history, pinning, and per-user limits.
4
+
5
+ ## Overview
6
+
7
+ The Sessions API allows you to create, read, update, and delete AI conversation sessions. Sessions store message history for LangChain-powered chatbots and AI assistants. Each user is limited to 50 conversations per team.
8
+
9
+ ## Authentication
10
+
11
+ All endpoints require authentication via:
12
+ - **Session cookie** (for browser-based requests)
13
+ - **API Key** header (for server-to-server requests)
14
+
15
+ **Required Header:**
16
+ - `x-team-id` (string): Team context for multi-tenancy
17
+
18
+ ## Endpoints
19
+
20
+ ### List Conversations
21
+ `GET /api/langchain/sessions`
22
+
23
+ Returns all conversations for the current user in the team context.
24
+
25
+ **Example Response:**
26
+ ```json
27
+ {
28
+ "success": true,
29
+ "data": {
30
+ "sessions": [
31
+ {
32
+ "sessionId": "sess_abc123",
33
+ "name": "Project Planning",
34
+ "messageCount": 24,
35
+ "firstMessage": "Help me plan my new project...",
36
+ "isPinned": true,
37
+ "createdAt": "2024-01-15T10:30:00Z",
38
+ "updatedAt": "2024-01-15T14:45:00Z"
39
+ }
40
+ ],
41
+ "count": 12,
42
+ "maxAllowed": 50
43
+ }
44
+ }
45
+ ```
46
+
47
+ ### Get Single Conversation
48
+ `GET /api/langchain/sessions?id=[sessionId]`
49
+
50
+ Returns a specific conversation by ID.
51
+
52
+ **Query Parameters:**
53
+ - `id` (string, required): Session ID
54
+
55
+ **Example Response:**
56
+ ```json
57
+ {
58
+ "success": true,
59
+ "data": {
60
+ "sessionId": "sess_abc123",
61
+ "name": "Project Planning",
62
+ "messageCount": 24,
63
+ "firstMessage": "Help me plan my new project...",
64
+ "isPinned": true,
65
+ "createdAt": "2024-01-15T10:30:00Z",
66
+ "updatedAt": "2024-01-15T14:45:00Z"
67
+ }
68
+ }
69
+ ```
70
+
71
+ ### Create Conversation
72
+ `POST /api/langchain/sessions`
73
+
74
+ Create a new conversation session. Limited to 50 conversations per user per team.
75
+
76
+ **Request Body:**
77
+ ```json
78
+ {
79
+ "name": "My New Conversation"
80
+ }
81
+ ```
82
+
83
+ **Response (201 Created):**
84
+ ```json
85
+ {
86
+ "success": true,
87
+ "data": {
88
+ "sessionId": "sess_xyz789",
89
+ "name": "My New Conversation",
90
+ "createdAt": "2024-01-15T16:00:00Z"
91
+ }
92
+ }
93
+ ```
94
+
95
+ **Limit Reached Response (400):**
96
+ ```json
97
+ {
98
+ "success": false,
99
+ "error": "CONVERSATION_LIMIT_REACHED",
100
+ "message": "Maximum of 50 conversations reached. Delete an existing conversation to create a new one.",
101
+ "data": {
102
+ "currentCount": 50,
103
+ "maxAllowed": 50,
104
+ "oldestSession": {
105
+ "sessionId": "sess_old123",
106
+ "name": "Old Conversation",
107
+ "updatedAt": "2024-01-01T10:00:00Z"
108
+ }
109
+ }
110
+ }
111
+ ```
112
+
113
+ ### Update Conversation
114
+ `PATCH /api/langchain/sessions`
115
+
116
+ Update a conversation's name or pinned status.
117
+
118
+ **Request Body:**
119
+ ```json
120
+ {
121
+ "sessionId": "sess_abc123",
122
+ "name": "Renamed Conversation",
123
+ "isPinned": true
124
+ }
125
+ ```
126
+
127
+ | Field | Type | Required | Description |
128
+ |-------|------|----------|-------------|
129
+ | sessionId | string | Yes | Session ID to update |
130
+ | name | string | No | New name for the conversation |
131
+ | isPinned | boolean | No | Pin/unpin the conversation |
132
+
133
+ **Example Response:**
134
+ ```json
135
+ {
136
+ "success": true,
137
+ "data": {
138
+ "sessionId": "sess_abc123",
139
+ "name": "Renamed Conversation",
140
+ "messageCount": 24,
141
+ "firstMessage": "Help me plan my new project...",
142
+ "isPinned": true,
143
+ "createdAt": "2024-01-15T10:30:00Z",
144
+ "updatedAt": "2024-01-15T16:30:00Z"
145
+ }
146
+ }
147
+ ```
148
+
149
+ ### Delete Conversation
150
+ `DELETE /api/langchain/sessions`
151
+
152
+ Delete a conversation and all its message history.
153
+
154
+ **Request Body:**
155
+ ```json
156
+ {
157
+ "sessionId": "sess_abc123"
158
+ }
159
+ ```
160
+
161
+ **Example Response:**
162
+ ```json
163
+ {
164
+ "success": true,
165
+ "message": "Conversation deleted successfully",
166
+ "sessionId": "sess_abc123"
167
+ }
168
+ ```
169
+
170
+ ## Fields
171
+
172
+ | Field | Type | Description |
173
+ |-------|------|-------------|
174
+ | sessionId | string | Unique session identifier |
175
+ | name | string | User-defined conversation name |
176
+ | messageCount | number | Number of messages in the conversation |
177
+ | firstMessage | string | Preview of the first message |
178
+ | isPinned | boolean | Whether the conversation is pinned |
179
+ | createdAt | datetime | When the conversation was created |
180
+ | updatedAt | datetime | When the conversation was last updated |
181
+
182
+ ## Limits
183
+
184
+ | Limit | Value | Description |
185
+ |-------|-------|-------------|
186
+ | MAX_CONVERSATIONS | 50 | Maximum conversations per user per team |
187
+
188
+ ## Error Responses
189
+
190
+ | Status | Code | Description |
191
+ |--------|------|-------------|
192
+ | 400 | TEAM_CONTEXT_REQUIRED | Missing x-team-id header |
193
+ | 400 | CONVERSATION_LIMIT_REACHED | User has reached 50 conversation limit |
194
+ | 400 | - | Session ID is required (for PATCH/DELETE) |
195
+ | 401 | Unauthorized | Missing or invalid authentication |
196
+ | 404 | - | Conversation not found |
197
+ | 500 | - | Internal server error |
198
+
199
+ ## Related APIs
200
+
201
+ - **[Observability](/api/langchain/observability)** - Monitor AI traces and metrics
202
+ - **[Teams](/api/v1/teams)** - Team management
@@ -0,0 +1,104 @@
1
+ /**
2
+ * API Presets for LangChain Sessions
3
+ *
4
+ * Predefined API calls for conversation management.
5
+ */
6
+
7
+ import { defineApiEndpoint } from '@nextsparkjs/core/types/api-presets'
8
+
9
+ export default defineApiEndpoint({
10
+ summary: 'Manage AI conversation sessions',
11
+ presets: [
12
+ {
13
+ id: 'list-all',
14
+ title: 'List All Conversations',
15
+ description: 'Get all conversations for the current user',
16
+ method: 'GET',
17
+ headers: {
18
+ 'x-team-id': '{{teamId}}'
19
+ },
20
+ tags: ['read', 'list']
21
+ },
22
+ {
23
+ id: 'get-by-id',
24
+ title: 'Get Conversation by ID',
25
+ description: 'Get a specific conversation by its session ID',
26
+ method: 'GET',
27
+ params: {
28
+ id: '{{sessionId}}'
29
+ },
30
+ headers: {
31
+ 'x-team-id': '{{teamId}}'
32
+ },
33
+ tags: ['read', 'detail']
34
+ },
35
+ {
36
+ id: 'create-new',
37
+ title: 'Create New Conversation',
38
+ description: 'Create a new AI conversation session',
39
+ method: 'POST',
40
+ headers: {
41
+ 'x-team-id': '{{teamId}}'
42
+ },
43
+ payload: {
44
+ name: 'New Conversation'
45
+ },
46
+ tags: ['write', 'create']
47
+ },
48
+ {
49
+ id: 'rename-conversation',
50
+ title: 'Rename Conversation',
51
+ description: 'Update the name of an existing conversation',
52
+ method: 'PATCH',
53
+ headers: {
54
+ 'x-team-id': '{{teamId}}'
55
+ },
56
+ payload: {
57
+ sessionId: '{{sessionId}}',
58
+ name: '{{newName}}'
59
+ },
60
+ tags: ['write', 'update']
61
+ },
62
+ {
63
+ id: 'pin-conversation',
64
+ title: 'Pin Conversation',
65
+ description: 'Pin a conversation to keep it at the top',
66
+ method: 'PATCH',
67
+ headers: {
68
+ 'x-team-id': '{{teamId}}'
69
+ },
70
+ payload: {
71
+ sessionId: '{{sessionId}}',
72
+ isPinned: true
73
+ },
74
+ tags: ['write', 'update']
75
+ },
76
+ {
77
+ id: 'unpin-conversation',
78
+ title: 'Unpin Conversation',
79
+ description: 'Unpin a conversation',
80
+ method: 'PATCH',
81
+ headers: {
82
+ 'x-team-id': '{{teamId}}'
83
+ },
84
+ payload: {
85
+ sessionId: '{{sessionId}}',
86
+ isPinned: false
87
+ },
88
+ tags: ['write', 'update']
89
+ },
90
+ {
91
+ id: 'delete-conversation',
92
+ title: 'Delete Conversation',
93
+ description: 'Delete a conversation and all its message history',
94
+ method: 'DELETE',
95
+ headers: {
96
+ 'x-team-id': '{{teamId}}'
97
+ },
98
+ payload: {
99
+ sessionId: '{{sessionId}}'
100
+ },
101
+ tags: ['write', 'delete']
102
+ }
103
+ ]
104
+ })
@@ -1,5 +1,6 @@
1
1
  import { NextRequest, NextResponse } from 'next/server'
2
2
  import { authenticateRequest } from '@nextsparkjs/core/lib/api/auth/dual-auth'
3
+ import { withRateLimitTier } from '@nextsparkjs/core/lib/api/rate-limit'
3
4
  import { dbMemoryStore, CONVERSATION_LIMITS } from '../../lib/db-memory-store'
4
5
  import { config } from '../../plugin.config'
5
6
  import type {
@@ -36,7 +37,7 @@ function toApiConversationInfo(conv: {
36
37
  * Without id: returns list of all conversations
37
38
  * With id: returns single conversation details
38
39
  */
39
- export async function GET(req: NextRequest) {
40
+ const getHandler = async (req: NextRequest) => {
40
41
  // 1. Auth
41
42
  const authResult = await authenticateRequest(req)
42
43
  if (!authResult.success || !authResult.user) {
@@ -103,13 +104,15 @@ export async function GET(req: NextRequest) {
103
104
  }
104
105
  }
105
106
 
107
+ export const GET = withRateLimitTier(getHandler, 'read')
108
+
106
109
  /**
107
110
  * POST - Create a new conversation
108
111
  *
109
112
  * Body:
110
113
  * - name: Optional name for the conversation
111
114
  */
112
- export async function POST(req: NextRequest) {
115
+ const postHandler = async (req: NextRequest) => {
113
116
  // 1. Auth
114
117
  const authResult = await authenticateRequest(req)
115
118
  if (!authResult.success || !authResult.user) {
@@ -183,6 +186,8 @@ export async function POST(req: NextRequest) {
183
186
  }
184
187
  }
185
188
 
189
+ export const POST = withRateLimitTier(postHandler, 'write')
190
+
186
191
  /**
187
192
  * PATCH - Update a conversation (rename, pin/unpin)
188
193
  *
@@ -191,7 +196,7 @@ export async function POST(req: NextRequest) {
191
196
  * - name: New name (optional)
192
197
  * - isPinned: New pin status (optional)
193
198
  */
194
- export async function PATCH(req: NextRequest) {
199
+ const patchHandler = async (req: NextRequest) => {
195
200
  // 1. Auth
196
201
  const authResult = await authenticateRequest(req)
197
202
  if (!authResult.success || !authResult.user) {
@@ -262,13 +267,15 @@ export async function PATCH(req: NextRequest) {
262
267
  }
263
268
  }
264
269
 
270
+ export const PATCH = withRateLimitTier(patchHandler, 'write')
271
+
265
272
  /**
266
273
  * DELETE - Delete a conversation
267
274
  *
268
275
  * Body:
269
276
  * - sessionId: Session ID to delete (required)
270
277
  */
271
- export async function DELETE(req: NextRequest) {
278
+ const deleteHandler = async (req: NextRequest) => {
272
279
  // 1. Auth
273
280
  const authResult = await authenticateRequest(req)
274
281
  if (!authResult.success || !authResult.user) {
@@ -330,3 +337,5 @@ export async function DELETE(req: NextRequest) {
330
337
  )
331
338
  }
332
339
  }
340
+
341
+ export const DELETE = withRateLimitTier(deleteHandler, 'write')
@@ -687,6 +687,53 @@ Configure how structured output works for multi-provider compatibility:
687
687
 
688
688
  ## Environment Variables
689
689
 
690
+ ### ⭐ Plugin-Level Environment Configuration (Recommended)
691
+
692
+ The LangChain plugin supports **plugin-level `.env` files** that take priority over root environment variables. This provides isolation and modularity for your AI configuration.
693
+
694
+ #### Setup
695
+
696
+ 1. **Copy the example file:**
697
+ ```bash
698
+ cp contents/plugins/langchain/.env.example contents/plugins/langchain/.env
699
+ ```
700
+
701
+ 2. **Configure your settings:**
702
+ ```env
703
+ # Plugin Settings
704
+ LANGCHAIN_PLUGIN_ENABLED=true
705
+ LANGCHAIN_PLUGIN_DEBUG=false
706
+
707
+ # Ollama Configuration (Local)
708
+ LANGCHAIN_OLLAMA_BASE_URL=http://localhost:11434
709
+ LANGCHAIN_OLLAMA_MODEL=llama3.2:3b
710
+
711
+ # OpenAI Configuration (LM Studio or OpenAI API)
712
+ LANGCHAIN_OPENAI_BASE_URL=http://localhost:1234/v1
713
+ LANGCHAIN_OPENAI_MODEL=gpt-4o-mini
714
+ OPENAI_API_KEY=your-api-key
715
+ ```
716
+
717
+ #### Priority System
718
+
719
+ The plugin environment loader uses this priority:
720
+
721
+ 1. **Plugin `.env`** (`contents/plugins/langchain/.env`) - Highest priority
722
+ 2. **Root `.env`** (`/.env`) - Fallback for variables not in plugin .env
723
+ 3. **Built-in defaults** - Lowest priority
724
+
725
+ This means you can:
726
+ - Keep API keys in the root `.env` (shared across plugins)
727
+ - Override specific settings in the plugin `.env` (isolated configuration)
728
+
729
+ #### Benefits
730
+
731
+ - ✅ **Isolation**: LangChain config doesn't pollute root `.env`
732
+ - ✅ **Modularity**: Each plugin manages its own environment
733
+ - ✅ **Security**: API keys can be scoped to specific plugins
734
+ - ✅ **Flexibility**: Different configurations for different plugins
735
+ - ✅ **Fallback**: Automatically uses root `.env` when plugin `.env` doesn't define a variable
736
+
690
737
  ### Provider Configuration
691
738
 
692
739
  ```env
@@ -0,0 +1,180 @@
1
+ /**
2
+ * LangChain Plugin Environment Configuration (Server-Only)
3
+ *
4
+ * Uses centralized plugin environment loader from core
5
+ * Provides type-safe access to LangChain configuration
6
+ */
7
+
8
+ import { getPluginEnv } from '@nextsparkjs/core/lib/plugins/env-loader'
9
+
10
+ interface LangChainPluginEnvConfig {
11
+ // Plugin settings
12
+ LANGCHAIN_PLUGIN_ENABLED?: string
13
+ LANGCHAIN_PLUGIN_DEBUG?: string
14
+ LOG_ENABLED?: string
15
+
16
+ // Ollama Configuration
17
+ LANGCHAIN_OLLAMA_BASE_URL?: string
18
+ LANGCHAIN_OLLAMA_MODEL?: string
19
+
20
+ // OpenAI Configuration
21
+ LANGCHAIN_OPENAI_BASE_URL?: string
22
+ LANGCHAIN_OPENAI_MODEL?: string
23
+ OPENAI_API_KEY?: string
24
+
25
+ // Anthropic Configuration
26
+ LANGCHAIN_ANTHROPIC_MODEL?: string
27
+ ANTHROPIC_API_KEY?: string
28
+
29
+ // Feature Flags
30
+ LANGCHAIN_USE_GRAPH_ORCHESTRATOR?: string
31
+ LANGCHAIN_DEBUG?: string
32
+ }
33
+
34
+ class PluginEnvironment {
35
+ private static instance: PluginEnvironment
36
+ private config: LangChainPluginEnvConfig = {}
37
+ private loaded = false
38
+
39
+ private constructor() {
40
+ this.loadEnvironment()
41
+ }
42
+
43
+ public static getInstance(): PluginEnvironment {
44
+ if (!PluginEnvironment.instance) {
45
+ PluginEnvironment.instance = new PluginEnvironment()
46
+ }
47
+ return PluginEnvironment.instance
48
+ }
49
+
50
+ private loadEnvironment(forceReload: boolean = false): void {
51
+ if (this.loaded && !forceReload) return
52
+
53
+ try {
54
+ // Use centralized plugin env loader
55
+ const env = getPluginEnv('langchain')
56
+
57
+ this.config = {
58
+ // Plugin settings
59
+ LANGCHAIN_PLUGIN_ENABLED: env.LANGCHAIN_PLUGIN_ENABLED || 'true',
60
+ LANGCHAIN_PLUGIN_DEBUG: env.LANGCHAIN_PLUGIN_DEBUG || 'false',
61
+ LOG_ENABLED: env.LOG_ENABLED || 'false',
62
+
63
+ // Ollama Configuration
64
+ LANGCHAIN_OLLAMA_BASE_URL: env.LANGCHAIN_OLLAMA_BASE_URL || 'http://localhost:11434',
65
+ LANGCHAIN_OLLAMA_MODEL: env.LANGCHAIN_OLLAMA_MODEL || 'llama3.2:3b',
66
+
67
+ // OpenAI Configuration
68
+ LANGCHAIN_OPENAI_BASE_URL: env.LANGCHAIN_OPENAI_BASE_URL,
69
+ LANGCHAIN_OPENAI_MODEL: env.LANGCHAIN_OPENAI_MODEL || 'gpt-4o-mini',
70
+ OPENAI_API_KEY: env.OPENAI_API_KEY,
71
+
72
+ // Anthropic Configuration
73
+ LANGCHAIN_ANTHROPIC_MODEL: env.LANGCHAIN_ANTHROPIC_MODEL || 'claude-3-5-sonnet-20241022',
74
+ ANTHROPIC_API_KEY: env.ANTHROPIC_API_KEY,
75
+
76
+ // Feature Flags
77
+ LANGCHAIN_USE_GRAPH_ORCHESTRATOR: env.LANGCHAIN_USE_GRAPH_ORCHESTRATOR || 'true',
78
+ LANGCHAIN_DEBUG: env.LANGCHAIN_DEBUG || 'false',
79
+ }
80
+
81
+ this.logLoadedConfiguration()
82
+ this.loaded = true
83
+ } catch (error) {
84
+ console.error('[LangChain Plugin] Failed to load environment:', error)
85
+ this.loaded = true
86
+ }
87
+ }
88
+
89
+ private logLoadedConfiguration(): void {
90
+ if (process.env.NODE_ENV === 'development' && this.config.LANGCHAIN_PLUGIN_DEBUG === 'true') {
91
+ console.log('[LangChain Plugin] Environment Configuration:')
92
+ console.log(' → Plugin Settings:')
93
+ console.log(` - LANGCHAIN_PLUGIN_ENABLED: ${this.config.LANGCHAIN_PLUGIN_ENABLED}`)
94
+ console.log(` - LANGCHAIN_PLUGIN_DEBUG: ${this.config.LANGCHAIN_PLUGIN_DEBUG}`)
95
+ console.log(' → Ollama Configuration:')
96
+ console.log(` - LANGCHAIN_OLLAMA_BASE_URL: ${this.config.LANGCHAIN_OLLAMA_BASE_URL}`)
97
+ console.log(` - LANGCHAIN_OLLAMA_MODEL: ${this.config.LANGCHAIN_OLLAMA_MODEL}`)
98
+ console.log(' → OpenAI Configuration:')
99
+ console.log(` - LANGCHAIN_OPENAI_BASE_URL: ${this.config.LANGCHAIN_OPENAI_BASE_URL || 'not set'}`)
100
+ console.log(` - LANGCHAIN_OPENAI_MODEL: ${this.config.LANGCHAIN_OPENAI_MODEL}`)
101
+ console.log(` - OPENAI_API_KEY: ${this.config.OPENAI_API_KEY ? '✓ set' : '✗ not set'}`)
102
+ console.log(' → Anthropic Configuration:')
103
+ console.log(` - LANGCHAIN_ANTHROPIC_MODEL: ${this.config.LANGCHAIN_ANTHROPIC_MODEL}`)
104
+ console.log(` - ANTHROPIC_API_KEY: ${this.config.ANTHROPIC_API_KEY ? '✓ set' : '✗ not set'}`)
105
+ console.log()
106
+ }
107
+ }
108
+
109
+ public getConfig(): LangChainPluginEnvConfig {
110
+ if (!this.loaded) {
111
+ this.loadEnvironment()
112
+ }
113
+ return this.config
114
+ }
115
+
116
+ // Helper methods
117
+ public isPluginEnabled(): boolean {
118
+ return this.getConfig().LANGCHAIN_PLUGIN_ENABLED !== 'false'
119
+ }
120
+
121
+ public isDebugEnabled(): boolean {
122
+ return this.getConfig().LANGCHAIN_PLUGIN_DEBUG === 'true' || this.getConfig().LANGCHAIN_DEBUG === 'true'
123
+ }
124
+
125
+ public isLogEnabled(): boolean {
126
+ return this.getConfig().LOG_ENABLED === 'true'
127
+ }
128
+
129
+ public getOllamaBaseUrl(): string {
130
+ return this.getConfig().LANGCHAIN_OLLAMA_BASE_URL || 'http://localhost:11434'
131
+ }
132
+
133
+ public getOllamaModel(): string {
134
+ return this.getConfig().LANGCHAIN_OLLAMA_MODEL || 'llama3.2:3b'
135
+ }
136
+
137
+ public getOpenAIBaseUrl(): string | undefined {
138
+ return this.getConfig().LANGCHAIN_OPENAI_BASE_URL
139
+ }
140
+
141
+ public getOpenAIModel(): string {
142
+ return this.getConfig().LANGCHAIN_OPENAI_MODEL || 'gpt-4o-mini'
143
+ }
144
+
145
+ public getOpenAIApiKey(): string | undefined {
146
+ return this.getConfig().OPENAI_API_KEY
147
+ }
148
+
149
+ public getAnthropicModel(): string {
150
+ return this.getConfig().LANGCHAIN_ANTHROPIC_MODEL || 'claude-3-5-sonnet-20241022'
151
+ }
152
+
153
+ public getAnthropicApiKey(): string | undefined {
154
+ return this.getConfig().ANTHROPIC_API_KEY
155
+ }
156
+
157
+ public isGraphOrchestratorEnabled(): boolean {
158
+ return this.getConfig().LANGCHAIN_USE_GRAPH_ORCHESTRATOR === 'true'
159
+ }
160
+
161
+ public reload(): void {
162
+ this.loaded = false
163
+ this.loadEnvironment(true)
164
+ }
165
+ }
166
+
167
+ export const pluginEnv = PluginEnvironment.getInstance()
168
+
169
+ // Convenience exports
170
+ export const isPluginEnabled = () => pluginEnv.isPluginEnabled()
171
+ export const isDebugEnabled = () => pluginEnv.isDebugEnabled()
172
+ export const isLogEnabled = () => pluginEnv.isLogEnabled()
173
+ export const getOllamaBaseUrl = () => pluginEnv.getOllamaBaseUrl()
174
+ export const getOllamaModel = () => pluginEnv.getOllamaModel()
175
+ export const getOpenAIBaseUrl = () => pluginEnv.getOpenAIBaseUrl()
176
+ export const getOpenAIModel = () => pluginEnv.getOpenAIModel()
177
+ export const getOpenAIApiKey = () => pluginEnv.getOpenAIApiKey()
178
+ export const getAnthropicModel = () => pluginEnv.getAnthropicModel()
179
+ export const getAnthropicApiKey = () => pluginEnv.getAnthropicApiKey()
180
+ export const isGraphOrchestratorEnabled = () => pluginEnv.isGraphOrchestratorEnabled()
package/package.json CHANGED
@@ -1,11 +1,11 @@
1
1
  {
2
2
  "name": "@nextsparkjs/plugin-langchain",
3
- "version": "0.1.0-beta.1",
3
+ "version": "0.1.0-beta.101",
4
4
  "private": false,
5
5
  "main": "./plugin.config.ts",
6
6
  "requiredPlugins": [],
7
7
  "dependencies": {
8
- "@anthropic-ai/sdk": "^0.36.0",
8
+ "@anthropic-ai/sdk": "^0.71.0",
9
9
  "@langchain/anthropic": "^0.3.0",
10
10
  "@langchain/community": "^0.3.0",
11
11
  "@langchain/core": "^0.3.0",
@@ -16,7 +16,7 @@
16
16
  "langchain": "^0.3.0"
17
17
  },
18
18
  "peerDependencies": {
19
- "@nextsparkjs/core": "workspace:*",
19
+ "@nextsparkjs/core": "*",
20
20
  "@tanstack/react-query": "^5.0.0",
21
21
  "lucide-react": "^0.539.0",
22
22
  "next": "^15.0.0",
@@ -24,5 +24,9 @@
24
24
  "react": "^19.0.0",
25
25
  "react-dom": "^19.0.0",
26
26
  "zod": "^4.0.0"
27
+ },
28
+ "nextspark": {
29
+ "type": "plugin",
30
+ "name": "langchain"
27
31
  }
28
- }
32
+ }