@kognitivedev/vercel-ai-provider 0.1.8 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -36,6 +36,110 @@ describe("wrapStream logging", () => {
36
36
  );
37
37
  });
38
38
 
39
+ it("should capture tool-call chunks and include them in logged conversation", async () => {
40
+ const mockModel = new MockLanguageModelV3({
41
+ doStream: async () => ({
42
+ stream: convertArrayToReadableStream([
43
+ { type: "text-start" as const, id: "t1" },
44
+ { type: "text-delta" as const, id: "t1", delta: "Let me check" },
45
+ { type: "text-end" as const, id: "t1" },
46
+ {
47
+ type: "tool-call" as const,
48
+ toolCallId: "call-1",
49
+ toolName: "get_weather",
50
+ input: '{"city":"London"}',
51
+ },
52
+ {
53
+ type: "tool-result" as const,
54
+ toolCallId: "call-1",
55
+ toolName: "get_weather",
56
+ result: { temperature: 15, unit: "celsius" },
57
+ },
58
+ {
59
+ type: "finish" as const,
60
+ finishReason: {
61
+ unified: "tool-calls" as const,
62
+ raw: undefined,
63
+ },
64
+ usage: {
65
+ inputTokens: { total: 20, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },
66
+ outputTokens: { total: 15, text: undefined, reasoning: undefined },
67
+ },
68
+ },
69
+ ] satisfies import("@ai-sdk/provider").LanguageModelV3StreamPart[]),
70
+ }),
71
+ });
72
+
73
+ const mockProvider = () => mockModel;
74
+
75
+ const cl = createCognitiveLayer({
76
+ provider: mockProvider,
77
+ clConfig: {
78
+ apiKey: "test-api-key",
79
+ appId: "test-app",
80
+ projectId: "test-project",
81
+ processDelayMs: 0,
82
+ logLevel: "none",
83
+ },
84
+ });
85
+
86
+ const model = cl("mock-model", {
87
+ userId: "user-1",
88
+ projectId: "project-1",
89
+ sessionId: "session-1",
90
+ });
91
+
92
+ const result = streamText({
93
+ model,
94
+ messages: [{ role: "user", content: "What's the weather in London?" }],
95
+ });
96
+
97
+ // Fully consume the stream
98
+ await result.text;
99
+
100
+ // Wait for async logConversation to complete
101
+ await new Promise((r) => setTimeout(r, 100));
102
+
103
+ // Find the log call
104
+ const logCall = fetchCalls.find((c) => c.url.includes("/api/cognitive/log"));
105
+ expect(logCall).toBeDefined();
106
+
107
+ const messages = logCall!.body.messages;
108
+
109
+ // Assistant message should contain text + tool-call parts
110
+ const assistantMsg = messages.find((m: any) => m.role === "assistant");
111
+ expect(assistantMsg).toBeDefined();
112
+ expect(assistantMsg.content).toEqual([
113
+ { type: "text", text: "Let me check" },
114
+ {
115
+ type: "tool-call",
116
+ toolCallId: "call-1",
117
+ toolName: "get_weather",
118
+ input: '{"city":"London"}',
119
+ },
120
+ ]);
121
+
122
+ // Tool results should be in a separate tool message
123
+ const toolMsg = messages.find((m: any) => m.role === "tool");
124
+ expect(toolMsg).toBeDefined();
125
+ expect(toolMsg.content).toEqual([
126
+ {
127
+ type: "tool-result",
128
+ toolCallId: "call-1",
129
+ toolName: "get_weather",
130
+ result: { temperature: 15, unit: "celsius" },
131
+ },
132
+ ]);
133
+
134
+ // Spans should include the tool call with populated previews
135
+ const spans = logCall!.body.spans;
136
+ const toolSpan = spans?.find((s: any) => s.spanType === "tool");
137
+ expect(toolSpan).toBeDefined();
138
+ expect(toolSpan.toolName).toBe("get_weather");
139
+ expect(toolSpan.inputPreview).toContain("London");
140
+ expect(toolSpan.outputPreview).toContain("15");
141
+ });
142
+
39
143
  it("should include assistant message in logged conversation after streaming", async () => {
40
144
  const mockModel = new MockLanguageModelV3({
41
145
  doStream: async () => ({
@@ -0,0 +1,4 @@
1
+ declare module "handlebars/dist/cjs/handlebars" {
2
+ import Handlebars from "handlebars";
3
+ export default Handlebars;
4
+ }
package/src/index.ts CHANGED
@@ -4,6 +4,9 @@ import {
4
4
  generateText as aiGenerateText,
5
5
  type LanguageModel,
6
6
  } from "ai";
7
+ import { randomUUID } from "crypto";
8
+ export { renderTemplate, type TemplateVariables } from "./template";
9
+ import { renderTemplate } from "./template";
7
10
 
8
11
  /**
9
12
  * Log levels for controlling verbosity of CognitiveLayer logging.
@@ -21,6 +24,17 @@ function isValidId(value: string | undefined | null): value is string {
21
24
  return trimmed !== "" && trimmed !== "null" && trimmed !== "undefined";
22
25
  }
23
26
 
27
+ function maskSecret(secret: string | undefined | null): string {
28
+ if (!secret) return "missing";
29
+ if (secret.length <= 8) return `${secret.slice(0, 2)}***`;
30
+ return `${secret.slice(0, 4)}...${secret.slice(-4)}`;
31
+ }
32
+
33
+ function previewText(value: string, maxLength = 240): string {
34
+ if (value.length <= maxLength) return value;
35
+ return `${value.slice(0, maxLength)}...`;
36
+ }
37
+
24
38
  const LOG_LEVEL_PRIORITY: Record<LogLevel, number> = {
25
39
  none: 0,
26
40
  error: 1,
@@ -89,7 +103,7 @@ export type CLModelWrapper = (
89
103
 
90
104
  export interface PromptConfig {
91
105
  slug: string;
92
- variables?: Record<string, string>;
106
+ variables?: Record<string, string | boolean>;
93
107
  }
94
108
 
95
109
  export type CLStreamTextOptions = Omit<Parameters<typeof aiStreamText>[0], 'system' | 'prompt'> & {
@@ -111,6 +125,27 @@ export interface LogConversationPayload {
111
125
  promptSlug?: string;
112
126
  promptVersion?: number;
113
127
  promptId?: string;
128
+ traceId?: string;
129
+ parentSpanId?: string;
130
+ requestPreview?: string;
131
+ responsePreview?: string;
132
+ state?: "active" | "completed" | "error";
133
+ startedAt?: string;
134
+ endedAt?: string;
135
+ durationMs?: number;
136
+ metadata?: Record<string, unknown>;
137
+ spans?: Array<{
138
+ spanKey: string;
139
+ parentSpanKey?: string;
140
+ name: string;
141
+ spanType: string;
142
+ status?: "active" | "completed" | "error";
143
+ inputPreview?: string;
144
+ outputPreview?: string;
145
+ toolName?: string;
146
+ errorMessage?: string;
147
+ metadata?: Record<string, unknown>;
148
+ }>;
114
149
  }
115
150
 
116
151
  export type CognitiveLayer = CLModelWrapper & {
@@ -136,12 +171,109 @@ export interface CachedPrompt {
136
171
 
137
172
  const PROMPT_CACHE_TTL_MS = 60_000; // 1 minute
138
173
 
174
+ function getContentText(content: any): string {
175
+ if (typeof content === "string") return content;
176
+ if (!Array.isArray(content)) return "";
177
+
178
+ return content.map((part) => {
179
+ if (!part || typeof part !== "object") return "";
180
+ if (typeof part.text === "string") return part.text;
181
+ if (part.type === "tool-call" && typeof part.toolName === "string") return `Called ${part.toolName}`;
182
+ if (part.type === "tool-result") return "Received tool result";
183
+ return "";
184
+ }).filter(Boolean).join(" ");
185
+ }
186
+
139
187
  /**
140
- * Interpolate {{variable}} placeholders in a template string.
141
- * Unmatched variables are left as-is.
188
+ * Unwraps V2/V3 ToolResultOutput discriminated union to a displayable value.
189
+ * Stream ToolResult uses plain `result` (passthrough), while prompt ToolResultPart
190
+ * uses `output` with a discriminated union: text, json, error-text, error-json, content, execution-denied.
142
191
  */
143
- function interpolateTemplate(content: string, variables: Record<string, string>): string {
144
- return content.replace(/\{\{(\w+)\}\}/g, (_, key) => variables[key] ?? `{{${key}}}`);
192
+ function extractOutputValue(raw: unknown): unknown {
193
+ if (raw == null) return raw;
194
+ if (typeof raw !== 'object') return raw;
195
+ const obj = raw as Record<string, unknown>;
196
+ if (typeof obj.type !== 'string') return raw;
197
+ switch (obj.type) {
198
+ case 'text':
199
+ case 'json':
200
+ case 'error-text':
201
+ case 'error-json':
202
+ case 'content':
203
+ return obj.value;
204
+ case 'execution-denied':
205
+ return `Execution denied: ${obj.reason ?? 'unknown'}`;
206
+ default:
207
+ return raw;
208
+ }
209
+ }
210
+
211
+ function buildTracePreviews(messages: any[]): { requestPreview: string; responsePreview: string } {
212
+ const request = [...messages].reverse().find((message) => message?.role === "user");
213
+ const response = [...messages].reverse().find((message) => message?.role === "assistant");
214
+
215
+ return {
216
+ requestPreview: request ? getContentText(request.content).slice(0, 220) : "No request captured",
217
+ responsePreview: response ? getContentText(response.content).slice(0, 240) : "No response captured",
218
+ };
219
+ }
220
+
221
+ function buildTraceSpansFromMessages(messages: any[]): Array<{
222
+ spanKey: string;
223
+ parentSpanKey?: string;
224
+ name: string;
225
+ spanType: string;
226
+ status?: "active" | "completed" | "error";
227
+ inputPreview?: string;
228
+ outputPreview?: string;
229
+ toolName?: string;
230
+ errorMessage?: string;
231
+ metadata?: Record<string, unknown>;
232
+ }> {
233
+ const resultMap = new Map<string, unknown>();
234
+
235
+ for (const message of messages) {
236
+ if (!Array.isArray(message?.content)) continue;
237
+ for (const part of message.content) {
238
+ if (part?.type === "tool-result" && typeof part.toolCallId === "string") {
239
+ resultMap.set(part.toolCallId, part.result ?? part.output);
240
+ }
241
+ }
242
+ }
243
+
244
+ const spans: Array<{
245
+ spanKey: string;
246
+ parentSpanKey?: string;
247
+ name: string;
248
+ spanType: string;
249
+ status?: "active" | "completed" | "error";
250
+ inputPreview?: string;
251
+ outputPreview?: string;
252
+ toolName?: string;
253
+ errorMessage?: string;
254
+ metadata?: Record<string, unknown>;
255
+ }> = [];
256
+
257
+ for (const message of messages) {
258
+ if (!Array.isArray(message?.content)) continue;
259
+ for (const part of message.content) {
260
+ if (part?.type === "tool-call" && typeof part.toolCallId === "string") {
261
+ const result = resultMap.get(part.toolCallId);
262
+ spans.push({
263
+ spanKey: part.toolCallId,
264
+ parentSpanKey: "root",
265
+ name: typeof part.toolName === "string" ? part.toolName : "tool",
266
+ spanType: "tool",
267
+ status: "completed",
268
+ inputPreview: JSON.stringify(part.input ?? {}).slice(0, 220),
269
+ outputPreview: result != null ? JSON.stringify(extractOutputValue(result)).slice(0, 220) : "No tool result captured",
270
+ toolName: typeof part.toolName === "string" ? part.toolName : undefined,
271
+ });
272
+ }
273
+ }
274
+ }
275
+
276
+ return spans;
145
277
  }
146
278
 
147
279
  // Session-scoped snapshot cache: sessionKey → formatted memory block
@@ -206,11 +338,32 @@ export function createCognitiveLayer(config: {
206
338
  url.searchParams.set("slug", slug);
207
339
  if (userId) url.searchParams.set("userId", userId);
208
340
 
341
+ logger.debug("Resolving prompt from backend", {
342
+ slug,
343
+ userId,
344
+ url: url.toString(),
345
+ baseUrl,
346
+ apiKeyHint: maskSecret(clConfig.apiKey),
347
+ });
348
+
209
349
  const res = await fetch(url.toString(), {
210
350
  headers: { "Authorization": `Bearer ${clConfig.apiKey}` },
211
351
  });
352
+ logger.debug("Prompt resolve response received", {
353
+ slug,
354
+ userId,
355
+ status: res.status,
356
+ ok: res.ok,
357
+ contentType: res.headers.get("content-type"),
358
+ });
212
359
  if (!res.ok) {
213
360
  const body = await res.text();
361
+ logger.debug("Prompt resolve response body preview", {
362
+ slug,
363
+ userId,
364
+ status: res.status,
365
+ bodyPreview: previewText(body),
366
+ });
214
367
  throw new Error(`Failed to resolve prompt "${slug}": ${res.status} ${body}`);
215
368
  }
216
369
 
@@ -224,6 +377,14 @@ export function createCognitiveLayer(config: {
224
377
  gatewaySlug: data.gatewaySlug,
225
378
  };
226
379
  promptCache.set(cacheKey, entry);
380
+ logger.debug("Prompt resolved payload", {
381
+ slug,
382
+ resolvedSlug: entry.slug,
383
+ version: entry.version,
384
+ promptId: entry.promptId,
385
+ contentLength: entry.content.length,
386
+ gatewaySlug: entry.gatewaySlug ?? null,
387
+ });
227
388
  logger.info("Prompt resolved", { slug, version: entry.version });
228
389
  return entry;
229
390
  };
@@ -306,9 +467,25 @@ export function createCognitiveLayer(config: {
306
467
  if (systemPromptToAdd === undefined) {
307
468
  try {
308
469
  const url = `${baseUrl}/api/cognitive/snapshot?userId=${userId}`;
470
+ logger.debug("Fetching snapshot from backend", {
471
+ userId,
472
+ projectId,
473
+ sessionId,
474
+ url,
475
+ baseUrl,
476
+ apiKeyHint: maskSecret(clConfig.apiKey),
477
+ });
309
478
  const res = await fetch(url, {
310
479
  headers: { "Authorization": `Bearer ${clConfig.apiKey}` },
311
480
  });
481
+ logger.debug("Snapshot response received", {
482
+ userId,
483
+ projectId,
484
+ sessionId,
485
+ status: res.status,
486
+ ok: res.ok,
487
+ contentType: res.headers.get("content-type"),
488
+ });
312
489
  if (res.ok) {
313
490
  const data = await res.json();
314
491
  const systemBlock = data.systemBlock || "";
@@ -342,7 +519,15 @@ ${userContextBlock || "None"}
342
519
  rawData: data,
343
520
  });
344
521
  } else {
522
+ const body = await res.text();
345
523
  logger.warn("Snapshot fetch failed", { status: res.status });
524
+ logger.debug("Snapshot response body preview", {
525
+ userId,
526
+ projectId,
527
+ sessionId,
528
+ status: res.status,
529
+ bodyPreview: previewText(body),
530
+ });
346
531
  systemPromptToAdd = "";
347
532
  sessionSnapshots.set(sessionKey, systemPromptToAdd);
348
533
  }
@@ -375,6 +560,7 @@ ${userContextBlock || "None"}
375
560
  },
376
561
 
377
562
  async wrapGenerate({ doGenerate, params }: { doGenerate: any; params: any }) {
563
+ const startedAt = new Date();
378
564
  let result;
379
565
  try {
380
566
  result = await doGenerate();
@@ -385,17 +571,40 @@ ${userContextBlock || "None"}
385
571
  }
386
572
 
387
573
  if (isValidId(userId) && isValidId(sessionId)) {
574
+ const endedAt = new Date();
388
575
  const sessionKey = `${userId}:${projectId}:${sessionId}`;
389
576
  const promptMeta = sessionPromptMetadata.get(sessionKey);
390
577
 
391
- const messagesInput = (params as any).messages || (params as any).prompt || [];
392
- const resultMessages = (result as any)?.response?.messages;
393
- const assistantMessage = (result as any)?.text
394
- ? [{ role: "assistant", content: [{ type: "text", text: (result as any).text }] }]
578
+ const messagesInput = (params as any).prompt || (params as any).messages || [];
579
+
580
+ // Build assistant message from result.content (V2/V3 GenerateResult)
581
+ const resultContent = Array.isArray(result?.content) ? result.content : [];
582
+ const assistantParts: any[] = [];
583
+ for (const part of resultContent) {
584
+ if (part?.type === 'text') {
585
+ assistantParts.push({ type: 'text', text: part.text });
586
+ } else if (part?.type === 'tool-call') {
587
+ assistantParts.push({
588
+ type: 'tool-call',
589
+ toolCallId: part.toolCallId,
590
+ toolName: part.toolName,
591
+ input: part.input,
592
+ });
593
+ } else if (part?.type === 'tool-result') {
594
+ assistantParts.push({
595
+ type: 'tool-result',
596
+ toolCallId: part.toolCallId,
597
+ toolName: part.toolName,
598
+ result: part.result,
599
+ });
600
+ }
601
+ }
602
+ const assistantMessage = assistantParts.length > 0
603
+ ? [{ role: "assistant", content: assistantParts }]
395
604
  : [];
396
- const finalMessages = Array.isArray(resultMessages) && resultMessages.length > 0
397
- ? resultMessages
398
- : [...messagesInput, ...assistantMessage];
605
+ const finalMessages = [...messagesInput, ...assistantMessage];
606
+ const { requestPreview, responsePreview } = buildTracePreviews(finalMessages);
607
+ const spans = buildTraceSpansFromMessages(finalMessages);
399
608
 
400
609
  logConversation({
401
610
  userId,
@@ -409,12 +618,25 @@ ${userContextBlock || "None"}
409
618
  promptVersion: promptMeta.promptVersion,
410
619
  promptId: promptMeta.promptId,
411
620
  }),
621
+ traceId: randomUUID(),
622
+ requestPreview,
623
+ responsePreview,
624
+ state: "completed",
625
+ startedAt: startedAt.toISOString(),
626
+ endedAt: endedAt.toISOString(),
627
+ durationMs: endedAt.getTime() - startedAt.getTime(),
628
+ metadata: {
629
+ appId: clConfig.appId,
630
+ },
631
+ spans,
412
632
  }).then(() => triggerProcessing(userId, projectId, sessionId));
413
633
  }
414
634
 
415
635
  return result;
416
636
  },
417
637
  async wrapStream({ doStream, params }: { doStream: any; params: any }) {
638
+ const startedAt = new Date();
639
+ const traceId = randomUUID();
418
640
  let result;
419
641
  try {
420
642
  logger.debug("Starting doStream with params", JSON.stringify(params, null, 2));
@@ -431,7 +653,7 @@ ${userContextBlock || "None"}
431
653
  const sessionKey = `${userId}:${projectId}:${sessionId}`;
432
654
  const promptMeta = sessionPromptMetadata.get(sessionKey);
433
655
 
434
- const messagesInput = (params as any).messages || (params as any).prompt || [];
656
+ const messagesInput = (params as any).prompt || (params as any).messages || [];
435
657
  const resultMessages = (result as any)?.response?.messages;
436
658
  const finalMessages = Array.isArray(resultMessages) && resultMessages.length > 0
437
659
  ? resultMessages
@@ -439,6 +661,9 @@ ${userContextBlock || "None"}
439
661
 
440
662
  let streamUsage: Record<string, unknown> | undefined;
441
663
  let accumulatedText = '';
664
+ const toolCallInputs = new Map<string, { toolName: string; chunks: string[] }>();
665
+ const completedToolCalls: any[] = [];
666
+ const completedToolResults: any[] = [];
442
667
 
443
668
  const originalStream = result.stream;
444
669
  const transformStream = new TransformStream({
@@ -449,14 +674,64 @@ ${userContextBlock || "None"}
449
674
  if (chunk.type === 'finish' && chunk.usage) {
450
675
  streamUsage = chunk.usage;
451
676
  }
677
+ // Capture tool-call stream chunks (V2/V3 shared types)
678
+ if (chunk.type === 'tool-input-start') {
679
+ toolCallInputs.set(chunk.id, { toolName: chunk.toolName, chunks: [] });
680
+ }
681
+ if (chunk.type === 'tool-input-delta') {
682
+ const entry = toolCallInputs.get(chunk.id);
683
+ if (entry) entry.chunks.push(chunk.delta);
684
+ }
685
+ if (chunk.type === 'tool-call') {
686
+ completedToolCalls.push({
687
+ type: 'tool-call',
688
+ toolCallId: chunk.toolCallId,
689
+ toolName: chunk.toolName,
690
+ input: chunk.input,
691
+ });
692
+ }
693
+ if (chunk.type === 'tool-result') {
694
+ completedToolResults.push({
695
+ type: 'tool-result',
696
+ toolCallId: chunk.toolCallId,
697
+ toolName: chunk.toolName,
698
+ result: chunk.result,
699
+ });
700
+ }
452
701
  controller.enqueue(chunk);
453
702
  },
454
- flush() {
455
- const allMessages = accumulatedText
456
- ? [...finalMessages, { role: "assistant", content: [{ type: "text", text: accumulatedText }] }]
703
+ async flush() {
704
+ const endedAt = new Date();
705
+
706
+ // Finalize any tool calls from incremental input chunks
707
+ for (const [id, entry] of toolCallInputs) {
708
+ // Only add if not already captured via a tool-call chunk
709
+ if (!completedToolCalls.some((tc: any) => tc.toolCallId === id)) {
710
+ completedToolCalls.push({
711
+ type: 'tool-call',
712
+ toolCallId: id,
713
+ toolName: entry.toolName,
714
+ input: entry.chunks.join(''),
715
+ });
716
+ }
717
+ }
718
+
719
+ const assistantParts: any[] = [];
720
+ if (accumulatedText) assistantParts.push({ type: "text", text: accumulatedText });
721
+ for (const tc of completedToolCalls) assistantParts.push(tc);
722
+
723
+ const allMessages = assistantParts.length > 0
724
+ ? [...finalMessages, { role: "assistant", content: assistantParts }]
457
725
  : finalMessages;
458
726
 
459
- logConversation({
727
+ if (completedToolResults.length > 0) {
728
+ allMessages.push({ role: "tool", content: completedToolResults });
729
+ }
730
+
731
+ const { requestPreview, responsePreview } = buildTracePreviews(allMessages);
732
+ const spans = buildTraceSpansFromMessages(allMessages);
733
+
734
+ await logConversation({
460
735
  userId,
461
736
  projectId,
462
737
  sessionId,
@@ -468,7 +743,19 @@ ${userContextBlock || "None"}
468
743
  promptVersion: promptMeta.promptVersion,
469
744
  promptId: promptMeta.promptId,
470
745
  }),
471
- }).then(() => triggerProcessing(userId, projectId, sessionId));
746
+ traceId,
747
+ requestPreview,
748
+ responsePreview,
749
+ state: "completed",
750
+ startedAt: startedAt.toISOString(),
751
+ endedAt: endedAt.toISOString(),
752
+ durationMs: endedAt.getTime() - startedAt.getTime(),
753
+ metadata: {
754
+ appId: clConfig.appId,
755
+ },
756
+ spans,
757
+ });
758
+ triggerProcessing(userId, projectId, sessionId);
472
759
  }
473
760
  });
474
761
 
@@ -558,7 +845,7 @@ ${userContextBlock || "None"}
558
845
  let system: string | undefined;
559
846
  if (resolved) {
560
847
  system = promptConfig.variables
561
- ? interpolateTemplate(resolved.content, promptConfig.variables)
848
+ ? renderTemplate(resolved.content, promptConfig.variables)
562
849
  : resolved.content;
563
850
 
564
851
  // Store prompt metadata for the session (read by middleware during logging)
@@ -602,7 +889,7 @@ ${userContextBlock || "None"}
602
889
  let system: string | undefined;
603
890
  if (resolved) {
604
891
  system = promptConfig.variables
605
- ? interpolateTemplate(resolved.content, promptConfig.variables)
892
+ ? renderTemplate(resolved.content, promptConfig.variables)
606
893
  : resolved.content;
607
894
 
608
895
  // Store prompt metadata for the session (read by middleware during logging)
@@ -0,0 +1,10 @@
1
+ // Use the pre-built dist to avoid `require.extensions` warning in webpack/Next.js
2
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
3
+ import Handlebars from "handlebars/dist/cjs/handlebars";
4
+
5
+ export type TemplateVariables = Record<string, string | boolean>;
6
+
7
+ export function renderTemplate(template: string, variables: TemplateVariables): string {
8
+ const compiled = Handlebars.compile(template, { noEscape: true });
9
+ return compiled(variables);
10
+ }