@nextsparkjs/plugin-langchain 0.1.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +41 -0
- package/api/observability/metrics/route.ts +110 -0
- package/api/observability/traces/[traceId]/route.ts +398 -0
- package/api/observability/traces/route.ts +205 -0
- package/api/sessions/route.ts +332 -0
- package/components/observability/CollapsibleJson.tsx +71 -0
- package/components/observability/CompactTimeline.tsx +75 -0
- package/components/observability/ConversationFlow.tsx +271 -0
- package/components/observability/DisabledMessage.tsx +21 -0
- package/components/observability/FiltersPanel.tsx +82 -0
- package/components/observability/ObservabilityDashboard.tsx +230 -0
- package/components/observability/SpansList.tsx +210 -0
- package/components/observability/TraceDetail.tsx +335 -0
- package/components/observability/TraceStatusBadge.tsx +39 -0
- package/components/observability/TracesTable.tsx +97 -0
- package/components/observability/index.ts +7 -0
- package/docs/01-getting-started/01-overview.md +196 -0
- package/docs/01-getting-started/02-installation.md +368 -0
- package/docs/01-getting-started/03-configuration.md +794 -0
- package/docs/02-core-concepts/01-architecture.md +566 -0
- package/docs/02-core-concepts/02-agents.md +597 -0
- package/docs/02-core-concepts/03-tools.md +689 -0
- package/docs/03-orchestration/01-graph-orchestrator.md +809 -0
- package/docs/03-orchestration/02-legacy-react.md +650 -0
- package/docs/04-advanced/01-observability.md +645 -0
- package/docs/04-advanced/02-token-tracking.md +469 -0
- package/docs/04-advanced/03-streaming.md +476 -0
- package/docs/04-advanced/04-guardrails.md +597 -0
- package/docs/05-reference/01-api-reference.md +1403 -0
- package/docs/05-reference/02-customization.md +646 -0
- package/docs/05-reference/03-examples.md +881 -0
- package/docs/index.md +85 -0
- package/hooks/observability/useMetrics.ts +31 -0
- package/hooks/observability/useTraceDetail.ts +48 -0
- package/hooks/observability/useTraces.ts +59 -0
- package/lib/agent-factory.ts +354 -0
- package/lib/agent-helpers.ts +201 -0
- package/lib/db-memory-store.ts +417 -0
- package/lib/graph/index.ts +58 -0
- package/lib/graph/nodes/combiner.ts +399 -0
- package/lib/graph/nodes/router.ts +440 -0
- package/lib/graph/orchestrator-graph.ts +386 -0
- package/lib/graph/prompts/combiner.md +131 -0
- package/lib/graph/prompts/router.md +193 -0
- package/lib/graph/types.ts +365 -0
- package/lib/guardrails.ts +230 -0
- package/lib/index.ts +44 -0
- package/lib/logger.ts +70 -0
- package/lib/memory-store.ts +168 -0
- package/lib/message-serializer.ts +110 -0
- package/lib/prompt-renderer.ts +94 -0
- package/lib/providers.ts +226 -0
- package/lib/streaming.ts +232 -0
- package/lib/token-tracker.ts +298 -0
- package/lib/tools-builder.ts +192 -0
- package/lib/tracer-callbacks.ts +342 -0
- package/lib/tracer.ts +350 -0
- package/migrations/001_langchain_memory.sql +83 -0
- package/migrations/002_token_usage.sql +127 -0
- package/migrations/003_observability.sql +257 -0
- package/package.json +28 -0
- package/plugin.config.ts +170 -0
- package/presets/lib/langchain.config.ts.preset +142 -0
- package/presets/templates/sector7/ai-observability/[traceId]/page.tsx +91 -0
- package/presets/templates/sector7/ai-observability/page.tsx +54 -0
- package/types/langchain.types.ts +274 -0
- package/types/observability.types.ts +270 -0
|
@@ -0,0 +1,399 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Combiner Node
|
|
3
|
+
*
|
|
4
|
+
* Converts JSON handler results into natural language response.
|
|
5
|
+
* Single LLM call that synthesizes all results for the user.
|
|
6
|
+
*
|
|
7
|
+
* Optimization: For single-intent operations, can generate response
|
|
8
|
+
* without LLM by using template-based formatting.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { HumanMessage, SystemMessage } from '@langchain/core/messages'
|
|
12
|
+
import { getModel } from '../../providers'
|
|
13
|
+
import { tracer } from '../../tracer'
|
|
14
|
+
import { config as pluginConfig } from '../../../plugin.config'
|
|
15
|
+
import type { OrchestratorState, HandlerResults } from '../types'
|
|
16
|
+
import { DEFAULT_GRAPH_CONFIG } from '../types'
|
|
17
|
+
|
|
18
|
+
// ============================================
|
|
19
|
+
// COMBINER PROMPT
|
|
20
|
+
// ============================================
|
|
21
|
+
|
|
22
|
+
const COMBINER_SYSTEM_PROMPT = `You are a response synthesizer that converts JSON operation results into natural language responses for users.
|
|
23
|
+
|
|
24
|
+
## Your Task
|
|
25
|
+
|
|
26
|
+
Given the original user request and the results from various operations, generate a clear, natural response that:
|
|
27
|
+
1. Summarizes ALL results
|
|
28
|
+
2. Uses the same language as the user (Spanish if they wrote in Spanish)
|
|
29
|
+
3. Is concise but complete
|
|
30
|
+
4. Includes relevant data (names, counts, specific values)
|
|
31
|
+
|
|
32
|
+
## Input Format
|
|
33
|
+
|
|
34
|
+
You receive JSON with:
|
|
35
|
+
- originalRequest: The user's original message
|
|
36
|
+
- results: Object containing handler results (task, customer, page)
|
|
37
|
+
|
|
38
|
+
Each result contains:
|
|
39
|
+
- success: Whether the operation succeeded
|
|
40
|
+
- operation: What was done (list, create, update, search, etc.)
|
|
41
|
+
- data: The actual data (array or single object)
|
|
42
|
+
- count: Number of items (for list/search)
|
|
43
|
+
- message: Technical description
|
|
44
|
+
- error: Error message if failed
|
|
45
|
+
|
|
46
|
+
## Output Format
|
|
47
|
+
|
|
48
|
+
Return ONLY the response text. No JSON, no markdown code blocks, just natural text.
|
|
49
|
+
|
|
50
|
+
## Rules
|
|
51
|
+
|
|
52
|
+
1. Match the user's language - If they wrote in Spanish, respond in Spanish
|
|
53
|
+
2. Be concise - Don't repeat unnecessary information
|
|
54
|
+
3. Format lists nicely - Use bullet points for multiple items (max 5-7 items, summarize if more)
|
|
55
|
+
4. Include key data - Account numbers, task titles, counts, specific fields requested
|
|
56
|
+
5. Handle errors gracefully - Explain what went wrong and offer alternatives
|
|
57
|
+
6. Don't expose technical details - No JSON, no error codes, no internal messages
|
|
58
|
+
7. For greetings, be friendly and mention what you can help with`
|
|
59
|
+
|
|
60
|
+
// ============================================
|
|
61
|
+
// TEMPLATE-BASED FORMATTERS (No LLM needed)
|
|
62
|
+
// ============================================
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Check if we can use template-based response (single, simple result)
|
|
66
|
+
*/
|
|
67
|
+
function canUseTemplateResponse(results: HandlerResults): boolean {
|
|
68
|
+
const resultCount = Object.keys(results).filter((k) => results[k as keyof HandlerResults]).length
|
|
69
|
+
|
|
70
|
+
// Only use templates for single results
|
|
71
|
+
if (resultCount !== 1) return false
|
|
72
|
+
|
|
73
|
+
// Check if it's a simple operation
|
|
74
|
+
const result = results.task || results.customer || results.page
|
|
75
|
+
if (!result) return false
|
|
76
|
+
|
|
77
|
+
// Only list/search with small data sets can use templates
|
|
78
|
+
if (result.operation === 'list' || result.operation === 'search') {
|
|
79
|
+
const data = Array.isArray(result.data) ? result.data : []
|
|
80
|
+
return data.length <= 5
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// Single item operations (get, create, update) can use templates
|
|
84
|
+
return ['get', 'create', 'update', 'delete'].includes(result.operation)
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Generate template-based response for tasks
|
|
89
|
+
*/
|
|
90
|
+
function formatTaskResponse(result: HandlerResults['task'], isSpanish: boolean): string {
|
|
91
|
+
if (!result) return ''
|
|
92
|
+
|
|
93
|
+
if (!result.success) {
|
|
94
|
+
return isSpanish
|
|
95
|
+
? `No pude completar la operación: ${result.message}`
|
|
96
|
+
: `I couldn't complete the operation: ${result.message}`
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
const { operation, data, count } = result
|
|
100
|
+
|
|
101
|
+
if (operation === 'list' || operation === 'search') {
|
|
102
|
+
const tasks = Array.isArray(data) ? data : []
|
|
103
|
+
if (tasks.length === 0) {
|
|
104
|
+
return isSpanish ? 'No se encontraron tareas.' : 'No tasks found.'
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
const header = isSpanish
|
|
108
|
+
? `Encontré ${count || tasks.length} tarea(s):`
|
|
109
|
+
: `Found ${count || tasks.length} task(s):`
|
|
110
|
+
|
|
111
|
+
const items = tasks
|
|
112
|
+
.slice(0, 5)
|
|
113
|
+
.map((t) => {
|
|
114
|
+
const priority = t.priority ? ` (${t.priority})` : ''
|
|
115
|
+
const status = t.status ? ` - ${t.status}` : ''
|
|
116
|
+
return `• ${t.title}${priority}${status}`
|
|
117
|
+
})
|
|
118
|
+
.join('\n')
|
|
119
|
+
|
|
120
|
+
const more = tasks.length > 5
|
|
121
|
+
? (isSpanish ? `\n... y ${tasks.length - 5} más` : `\n... and ${tasks.length - 5} more`)
|
|
122
|
+
: ''
|
|
123
|
+
|
|
124
|
+
return `${header}\n${items}${more}`
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
if (operation === 'create') {
|
|
128
|
+
const task = Array.isArray(data) ? data[0] : data
|
|
129
|
+
return isSpanish
|
|
130
|
+
? `Tarea creada: "${task?.title}"`
|
|
131
|
+
: `Task created: "${task?.title}"`
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
if (operation === 'update') {
|
|
135
|
+
const task = Array.isArray(data) ? data[0] : data
|
|
136
|
+
return isSpanish
|
|
137
|
+
? `Tarea actualizada: "${task?.title}"`
|
|
138
|
+
: `Task updated: "${task?.title}"`
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
if (operation === 'get') {
|
|
142
|
+
const task = Array.isArray(data) ? data[0] : data
|
|
143
|
+
if (!task) {
|
|
144
|
+
return isSpanish ? 'Tarea no encontrada.' : 'Task not found.'
|
|
145
|
+
}
|
|
146
|
+
return isSpanish
|
|
147
|
+
? `Tarea: "${task.title}" - ${task.status || 'sin estado'}, prioridad ${task.priority || 'media'}`
|
|
148
|
+
: `Task: "${task.title}" - ${task.status || 'no status'}, ${task.priority || 'medium'} priority`
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
return result.message
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
/**
|
|
155
|
+
* Generate template-based response for customers
|
|
156
|
+
*/
|
|
157
|
+
function formatCustomerResponse(result: HandlerResults['customer'], isSpanish: boolean): string {
|
|
158
|
+
if (!result) return ''
|
|
159
|
+
|
|
160
|
+
if (!result.success) {
|
|
161
|
+
return isSpanish
|
|
162
|
+
? `No pude completar la operación: ${result.message}`
|
|
163
|
+
: `I couldn't complete the operation: ${result.message}`
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
const { operation, data, count } = result
|
|
167
|
+
|
|
168
|
+
if (operation === 'search') {
|
|
169
|
+
const customers = Array.isArray(data) ? data : []
|
|
170
|
+
if (customers.length === 0) {
|
|
171
|
+
return isSpanish
|
|
172
|
+
? 'No se encontraron clientes con ese criterio.'
|
|
173
|
+
: 'No customers found matching that criteria.'
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// For search, often looking for specific info
|
|
177
|
+
if (customers.length === 1) {
|
|
178
|
+
const c = customers[0]
|
|
179
|
+
const info = []
|
|
180
|
+
if (c.accountNumber) info.push(isSpanish ? `Cuenta: ${c.accountNumber}` : `Account: ${c.accountNumber}`)
|
|
181
|
+
if (c.phone) info.push(isSpanish ? `Tel: ${c.phone}` : `Phone: ${c.phone}`)
|
|
182
|
+
if (c.office) info.push(isSpanish ? `Oficina: ${c.office}` : `Office: ${c.office}`)
|
|
183
|
+
|
|
184
|
+
return `${c.name}${info.length ? ' - ' + info.join(', ') : ''}`
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
const header = isSpanish
|
|
188
|
+
? `Encontré ${count || customers.length} cliente(s):`
|
|
189
|
+
: `Found ${count || customers.length} customer(s):`
|
|
190
|
+
|
|
191
|
+
const items = customers
|
|
192
|
+
.slice(0, 5)
|
|
193
|
+
.map((c) => `• ${c.name}${c.accountNumber ? ` (${c.accountNumber})` : ''}`)
|
|
194
|
+
.join('\n')
|
|
195
|
+
|
|
196
|
+
return `${header}\n${items}`
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
if (operation === 'list') {
|
|
200
|
+
const customers = Array.isArray(data) ? data : []
|
|
201
|
+
if (customers.length === 0) {
|
|
202
|
+
return isSpanish ? 'No hay clientes registrados.' : 'No customers registered.'
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
const header = isSpanish
|
|
206
|
+
? `Hay ${count || customers.length} cliente(s):`
|
|
207
|
+
: `There are ${count || customers.length} customer(s):`
|
|
208
|
+
|
|
209
|
+
const items = customers
|
|
210
|
+
.slice(0, 5)
|
|
211
|
+
.map((c) => `• ${c.name}`)
|
|
212
|
+
.join('\n')
|
|
213
|
+
|
|
214
|
+
const more = customers.length > 5
|
|
215
|
+
? (isSpanish ? `\n... y ${customers.length - 5} más` : `\n... and ${customers.length - 5} more`)
|
|
216
|
+
: ''
|
|
217
|
+
|
|
218
|
+
return `${header}\n${items}${more}`
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
if (operation === 'create') {
|
|
222
|
+
const customer = Array.isArray(data) ? data[0] : data
|
|
223
|
+
return isSpanish
|
|
224
|
+
? `Cliente creado: "${customer?.name}"`
|
|
225
|
+
: `Customer created: "${customer?.name}"`
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
return result.message
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
/**
|
|
232
|
+
* Detect if input is in Spanish
|
|
233
|
+
*/
|
|
234
|
+
function isSpanishInput(input: string): boolean {
|
|
235
|
+
const spanishIndicators = [
|
|
236
|
+
'hola', 'muéstrame', 'muestrame', 'mis', 'tareas', 'clientes',
|
|
237
|
+
'crear', 'buscar', 'encontrar', 'cuál', 'cual', 'qué', 'que',
|
|
238
|
+
'número', 'numero', 'cuenta', 'por favor', 'gracias', 'dame'
|
|
239
|
+
]
|
|
240
|
+
const lower = input.toLowerCase()
|
|
241
|
+
return spanishIndicators.some((word) => lower.includes(word))
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
// ============================================
|
|
245
|
+
// COMBINER NODE
|
|
246
|
+
// ============================================
|
|
247
|
+
|
|
248
|
+
/**
|
|
249
|
+
* Combiner node that synthesizes handler results into user response
|
|
250
|
+
*
|
|
251
|
+
* Optimization: Uses template-based responses for simple single operations,
|
|
252
|
+
* falls back to LLM for complex multi-result scenarios.
|
|
253
|
+
*/
|
|
254
|
+
export async function combinerNode(
|
|
255
|
+
state: OrchestratorState
|
|
256
|
+
): Promise<Partial<OrchestratorState>> {
|
|
257
|
+
const { context, traceId, input, handlerResults, intents, needsClarification, clarificationQuestion } = state
|
|
258
|
+
|
|
259
|
+
// Handle clarification (already has response)
|
|
260
|
+
if (needsClarification && clarificationQuestion) {
|
|
261
|
+
return { finalResponse: clarificationQuestion }
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// Handle greeting
|
|
265
|
+
if (intents.length === 1 && intents[0].type === 'greeting') {
|
|
266
|
+
const isSpanish = isSpanishInput(input)
|
|
267
|
+
const greeting = isSpanish
|
|
268
|
+
? '¡Hola! ¿En qué puedo ayudarte? Puedo gestionar tareas, buscar clientes o consultar páginas.'
|
|
269
|
+
: 'Hello! How can I help you? I can manage tasks, search customers, or look up pages.'
|
|
270
|
+
return { finalResponse: greeting }
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
// Check if empty results
|
|
274
|
+
const hasResults = Object.values(handlerResults).some((r) => r !== undefined)
|
|
275
|
+
if (!hasResults) {
|
|
276
|
+
const isSpanish = isSpanishInput(input)
|
|
277
|
+
return {
|
|
278
|
+
finalResponse: isSpanish
|
|
279
|
+
? 'No pude procesar tu solicitud. ¿Podrías ser más específico?'
|
|
280
|
+
: "I couldn't process your request. Could you be more specific?",
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
const isSpanish = isSpanishInput(input)
|
|
285
|
+
|
|
286
|
+
// Try template-based response for simple cases
|
|
287
|
+
if (canUseTemplateResponse(handlerResults)) {
|
|
288
|
+
if (pluginConfig.debug) {
|
|
289
|
+
console.log('[Combiner] Using template-based response')
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
let response = ''
|
|
293
|
+
|
|
294
|
+
if (handlerResults.task) {
|
|
295
|
+
response = formatTaskResponse(handlerResults.task, isSpanish)
|
|
296
|
+
} else if (handlerResults.customer) {
|
|
297
|
+
response = formatCustomerResponse(handlerResults.customer, isSpanish)
|
|
298
|
+
} else if (handlerResults.page) {
|
|
299
|
+
// Simple page response
|
|
300
|
+
const pageResult = handlerResults.page
|
|
301
|
+
if (pageResult.success) {
|
|
302
|
+
const pages = Array.isArray(pageResult.data) ? pageResult.data : [pageResult.data].filter(Boolean)
|
|
303
|
+
response = isSpanish
|
|
304
|
+
? `Encontré ${pages.length} página(s): ${pages.map((p) => p?.title).join(', ')}`
|
|
305
|
+
: `Found ${pages.length} page(s): ${pages.map((p) => p?.title).join(', ')}`
|
|
306
|
+
} else {
|
|
307
|
+
response = pageResult.message
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
return { finalResponse: response }
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
// Use LLM for complex multi-result scenarios
|
|
315
|
+
if (pluginConfig.debug) {
|
|
316
|
+
console.log('[Combiner] Using LLM for multi-result response')
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
// Combiner uses OpenAI GPT-4o-mini (fast and cost-effective)
|
|
320
|
+
// Uses real OpenAI API when LANGCHAIN_OPENAI_BASE_URL is not set,
|
|
321
|
+
// or LM Studio when it is set
|
|
322
|
+
const combinerModelConfig = {
|
|
323
|
+
provider: 'openai' as const,
|
|
324
|
+
model: 'gpt-4o-mini',
|
|
325
|
+
temperature: DEFAULT_GRAPH_CONFIG.combinerTemperature,
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
// Start span for combiner LLM call with provider/model info
|
|
329
|
+
const spanContext = traceId
|
|
330
|
+
? await tracer.startSpan(
|
|
331
|
+
{ userId: context.userId, teamId: context.teamId },
|
|
332
|
+
traceId,
|
|
333
|
+
{
|
|
334
|
+
name: 'combiner',
|
|
335
|
+
type: 'llm',
|
|
336
|
+
provider: combinerModelConfig.provider,
|
|
337
|
+
model: combinerModelConfig.model,
|
|
338
|
+
input: { resultsCount: Object.keys(handlerResults).length },
|
|
339
|
+
}
|
|
340
|
+
)
|
|
341
|
+
: null
|
|
342
|
+
|
|
343
|
+
try {
|
|
344
|
+
// Use OpenAI GPT-4o-mini for combining results (fast and cost-effective)
|
|
345
|
+
const model = getModel(combinerModelConfig)
|
|
346
|
+
|
|
347
|
+
const combinerInput = JSON.stringify({
|
|
348
|
+
originalRequest: input,
|
|
349
|
+
results: handlerResults,
|
|
350
|
+
}, null, 2)
|
|
351
|
+
|
|
352
|
+
const result = await model.invoke([
|
|
353
|
+
new SystemMessage(COMBINER_SYSTEM_PROMPT),
|
|
354
|
+
new HumanMessage(combinerInput),
|
|
355
|
+
])
|
|
356
|
+
|
|
357
|
+
const response = typeof result.content === 'string'
|
|
358
|
+
? result.content
|
|
359
|
+
: JSON.stringify(result.content)
|
|
360
|
+
|
|
361
|
+
if (pluginConfig.debug) {
|
|
362
|
+
console.log('[Combiner] Generated response:', response.substring(0, 100) + '...')
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
// End span with success
|
|
366
|
+
if (spanContext && traceId) {
|
|
367
|
+
await tracer.endSpan(
|
|
368
|
+
{ userId: context.userId, teamId: context.teamId },
|
|
369
|
+
traceId,
|
|
370
|
+
spanContext.spanId,
|
|
371
|
+
{
|
|
372
|
+
output: { responseLength: response.length },
|
|
373
|
+
}
|
|
374
|
+
)
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
return { finalResponse: response }
|
|
378
|
+
} catch (error) {
|
|
379
|
+
console.error('[Combiner] Error generating response:', error)
|
|
380
|
+
|
|
381
|
+
// End span with error
|
|
382
|
+
if (spanContext && traceId) {
|
|
383
|
+
await tracer.endSpan(
|
|
384
|
+
{ userId: context.userId, teamId: context.teamId },
|
|
385
|
+
traceId,
|
|
386
|
+
spanContext.spanId,
|
|
387
|
+
{ error: error instanceof Error ? error : new Error(String(error)) }
|
|
388
|
+
)
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
const isSpanish = isSpanishInput(state.input)
|
|
392
|
+
return {
|
|
393
|
+
finalResponse: isSpanish
|
|
394
|
+
? 'Ocurrió un error al procesar la respuesta. Por favor, intenta de nuevo.'
|
|
395
|
+
: 'An error occurred while processing the response. Please try again.',
|
|
396
|
+
error: error instanceof Error ? error.message : 'Combiner error',
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
}
|