genai-lite 0.1.4 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -296,14 +296,14 @@ import type {
296
296
 
297
297
  ## Utilities
298
298
 
299
- genai-lite includes useful utilities for working with LLMs, available through the `genai-lite/utils` subpath:
299
+ genai-lite includes useful utilities for working with LLMs, available through the `genai-lite/prompting` subpath:
300
300
 
301
301
  ### Token Counting
302
302
 
303
303
  Count the number of tokens in a string using OpenAI's tiktoken library:
304
304
 
305
305
  ```typescript
306
- import { countTokens } from 'genai-lite/utils';
306
+ import { countTokens } from 'genai-lite/prompting';
307
307
 
308
308
  const text = 'Hello, this is a sample text for token counting.';
309
309
  const tokenCount = countTokens(text); // Uses gpt-4 tokenizer by default
@@ -320,7 +320,7 @@ const gpt35Tokens = countTokens(text, 'gpt-3.5-turbo');
320
320
  Generate intelligent previews of large text blocks that preserve context:
321
321
 
322
322
  ```typescript
323
- import { getSmartPreview } from 'genai-lite/utils';
323
+ import { getSmartPreview } from 'genai-lite/prompting';
324
324
 
325
325
  const largeCodeFile = `
326
326
  function calculateTotal(items) {
@@ -359,7 +359,7 @@ Combine these utilities to build prompts that fit within model context windows:
359
359
 
360
360
  ```typescript
361
361
  import { LLMService, fromEnvironment } from 'genai-lite';
362
- import { countTokens, getSmartPreview } from 'genai-lite/utils';
362
+ import { countTokens, getSmartPreview } from 'genai-lite/prompting';
363
363
 
364
364
  const llm = new LLMService(fromEnvironment);
365
365
 
@@ -394,7 +394,7 @@ const response = await llm.sendMessage({
394
394
  Generate dynamic prompts and content using the built-in template engine that supports variable substitution and conditional logic:
395
395
 
396
396
  ```typescript
397
- import { renderTemplate } from 'genai-lite/utils';
397
+ import { renderTemplate } from 'genai-lite/prompting';
398
398
 
399
399
  // Simple variable substitution
400
400
  const greeting = renderTemplate('Hello, {{ name }}!', { name: 'World' });
@@ -432,7 +432,7 @@ const result = renderTemplate(complexTemplate, {
432
432
  expertise: 'TypeScript, React, Node.js',
433
433
  task: 'Review the code for best practices',
434
434
  hasFiles: true,
435
- fileList: '- src/index.ts\n- src/utils.ts',
435
+ fileList: '- src/index.ts\n- src/prompting/template.ts',
436
436
  requiresOutput: false
437
437
  });
438
438
  ```
@@ -450,7 +450,7 @@ Combine the template engine with other utilities for powerful prompt generation:
450
450
 
451
451
  ```typescript
452
452
  import { LLMService, fromEnvironment } from 'genai-lite';
453
- import { renderTemplate, countTokens } from 'genai-lite/utils';
453
+ import { renderTemplate, countTokens } from 'genai-lite/prompting';
454
454
 
455
455
  const llm = new LLMService(fromEnvironment);
456
456
 
@@ -491,6 +491,138 @@ const response = await llm.sendMessage({
491
491
  });
492
492
  ```
493
493
 
494
+ ### Prompt Builder Utilities
495
+
496
+ genai-lite provides powerful utilities for building and parsing structured prompts:
497
+
498
+ #### Parsing Messages from Templates
499
+
500
+ Convert template strings with role tags into LLM message arrays:
501
+
502
+ ```typescript
503
+ import { buildMessagesFromTemplate } from 'genai-lite/prompting';
504
+
505
+ const template = `
506
+ <SYSTEM>You are a helpful assistant specialized in {{expertise}}.</SYSTEM>
507
+ <USER>Help me with {{task}}</USER>
508
+ <ASSISTANT>I'll help you with {{task}}. Let me analyze the requirements...</ASSISTANT>
509
+ <USER>Can you provide more details?</USER>
510
+ `;
511
+
512
+ const messages = buildMessagesFromTemplate(template, {
513
+ expertise: 'TypeScript and React',
514
+ task: 'building a custom hook'
515
+ });
516
+
517
+ // Result: Array of LLMMessage objects ready for the API
518
+ // [
519
+ // { role: 'system', content: 'You are a helpful assistant specialized in TypeScript and React.' },
520
+ // { role: 'user', content: 'Help me with building a custom hook' },
521
+ // { role: 'assistant', content: "I'll help you with building a custom hook. Let me analyze..." },
522
+ // { role: 'user', content: 'Can you provide more details?' }
523
+ // ]
524
+ ```
525
+
526
+ #### Extracting Random Variables for Few-Shot Learning
527
+
528
+ Implement few-shot prompting by extracting and shuffling examples:
529
+
530
+ ```typescript
531
+ import { extractRandomVariables, renderTemplate } from 'genai-lite/prompting';
532
+
533
+ // Define examples in your template
534
+ const examplesTemplate = `
535
+ <RANDOM_INPUT>User: Translate "hello" to Spanish</RANDOM_INPUT>
536
+ <RANDOM_OUTPUT>Assistant: The translation of "hello" to Spanish is "hola".</RANDOM_OUTPUT>
537
+
538
+ <RANDOM_INPUT>User: Translate "goodbye" to French</RANDOM_INPUT>
539
+ <RANDOM_OUTPUT>Assistant: The translation of "goodbye" to French is "au revoir".</RANDOM_OUTPUT>
540
+
541
+ <RANDOM_INPUT>User: Translate "thank you" to German</RANDOM_INPUT>
542
+ <RANDOM_OUTPUT>Assistant: The translation of "thank you" to German is "danke".</RANDOM_OUTPUT>
543
+ `;
544
+
545
+ // Extract random variables (shuffled each time)
546
+ const variables = extractRandomVariables(examplesTemplate, { maxPerTag: 2 });
547
+
548
+ // Use in a prompt template
549
+ const promptTemplate = `
550
+ You are a translation assistant. Here are some examples:
551
+
552
+ {{ random_input_1 }}
553
+ {{ random_output_1 }}
554
+
555
+ {{ random_input_2 }}
556
+ {{ random_output_2 }}
557
+
558
+ Now translate: "{{word}}" to {{language}}
559
+ `;
560
+
561
+ const prompt = renderTemplate(promptTemplate, {
562
+ ...variables,
563
+ word: 'please',
564
+ language: 'Italian'
565
+ });
566
+ ```
567
+
568
+ #### Parsing Structured LLM Responses
569
+
570
+ Extract structured data from LLM responses using custom tags:
571
+
572
+ ```typescript
573
+ import { parseStructuredContent } from 'genai-lite/prompting';
574
+
575
+ // Example LLM response with structured output
576
+ const llmResponse = `
577
+ Let me analyze this code for you.
578
+
579
+ <ANALYSIS>
580
+ The code has good structure but could benefit from:
581
+ 1. Better error handling in the API calls
582
+ 2. Memoization for expensive computations
583
+ 3. More descriptive variable names
584
+ </ANALYSIS>
585
+
586
+ <SUGGESTIONS>
587
+ - Add try-catch blocks around async operations
588
+ - Use React.memo() for the expensive component
589
+ - Rename 'data' to 'userData' for clarity
590
+ </SUGGESTIONS>
591
+
592
+ <REFACTORED_CODE>
593
+ const UserProfile = React.memo(({ userId }) => {
594
+ const [userData, setUserData] = useState(null);
595
+
596
+ useEffect(() => {
597
+ fetchUserData(userId)
598
+ .then(setUserData)
599
+ .catch(error => console.error('Failed to load user:', error));
600
+ }, [userId]);
601
+
602
+ return userData ? <Profile data={userData} /> : <Loading />;
603
+ });
604
+ </REFACTORED_CODE>
605
+ `;
606
+
607
+ // Parse the structured content
608
+ const parsed = parseStructuredContent(llmResponse, [
609
+ 'ANALYSIS',
610
+ 'SUGGESTIONS',
611
+ 'REFACTORED_CODE'
612
+ ]);
613
+
614
+ console.log(parsed.ANALYSIS); // The analysis text
615
+ console.log(parsed.SUGGESTIONS); // The suggestions text
616
+ console.log(parsed.REFACTORED_CODE); // The refactored code
617
+ ```
618
+
619
+ These prompt builder utilities enable:
620
+ - **Structured Conversations**: Build multi-turn conversations from templates
621
+ - **Few-Shot Learning**: Randomly sample examples to improve AI responses
622
+ - **Reliable Output Parsing**: Extract specific sections from AI responses
623
+ - **Template Reusability**: Define templates once, use with different variables
624
+ - **Type Safety**: Full TypeScript support with LLMMessage types
625
+
494
626
  ## Contributing
495
627
 
496
628
  Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
package/dist/index.d.ts CHANGED
@@ -5,4 +5,7 @@ export type { ModelPreset } from "./types/presets";
5
5
  export * from "./llm/types";
6
6
  export * from "./llm/clients/types";
7
7
  export { fromEnvironment } from "./providers/fromEnvironment";
8
- export { renderTemplate } from "./utils/templateEngine";
8
+ export { renderTemplate } from "./prompting/template";
9
+ export { countTokens, getSmartPreview, extractRandomVariables } from "./prompting/content";
10
+ export { buildMessagesFromTemplate } from "./prompting/builder";
11
+ export { parseStructuredContent } from "./prompting/parser";
package/dist/index.js CHANGED
@@ -14,7 +14,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- exports.renderTemplate = exports.fromEnvironment = exports.LLMService = void 0;
17
+ exports.parseStructuredContent = exports.buildMessagesFromTemplate = exports.extractRandomVariables = exports.getSmartPreview = exports.countTokens = exports.renderTemplate = exports.fromEnvironment = exports.LLMService = void 0;
18
18
  // --- LLM Service ---
19
19
  var LLMService_1 = require("./llm/LLMService");
20
20
  Object.defineProperty(exports, "LLMService", { enumerable: true, get: function () { return LLMService_1.LLMService; } });
@@ -26,5 +26,13 @@ __exportStar(require("./llm/clients/types"), exports);
26
26
  var fromEnvironment_1 = require("./providers/fromEnvironment");
27
27
  Object.defineProperty(exports, "fromEnvironment", { enumerable: true, get: function () { return fromEnvironment_1.fromEnvironment; } });
28
28
  // --- Utilities ---
29
- var templateEngine_1 = require("./utils/templateEngine");
30
- Object.defineProperty(exports, "renderTemplate", { enumerable: true, get: function () { return templateEngine_1.renderTemplate; } });
29
+ var template_1 = require("./prompting/template");
30
+ Object.defineProperty(exports, "renderTemplate", { enumerable: true, get: function () { return template_1.renderTemplate; } });
31
+ var content_1 = require("./prompting/content");
32
+ Object.defineProperty(exports, "countTokens", { enumerable: true, get: function () { return content_1.countTokens; } });
33
+ Object.defineProperty(exports, "getSmartPreview", { enumerable: true, get: function () { return content_1.getSmartPreview; } });
34
+ Object.defineProperty(exports, "extractRandomVariables", { enumerable: true, get: function () { return content_1.extractRandomVariables; } });
35
+ var builder_1 = require("./prompting/builder");
36
+ Object.defineProperty(exports, "buildMessagesFromTemplate", { enumerable: true, get: function () { return builder_1.buildMessagesFromTemplate; } });
37
+ var parser_1 = require("./prompting/parser");
38
+ Object.defineProperty(exports, "parseStructuredContent", { enumerable: true, get: function () { return parser_1.parseStructuredContent; } });
@@ -0,0 +1,34 @@
1
+ /**
2
+ * Prompt builder utilities for constructing structured LLM messages
3
+ *
4
+ * This module provides functions to construct the final, structured prompts
5
+ * that will be sent to the LLM service. This is the assembly step that turns
6
+ * templates and content into the format required by the LLMService.
7
+ */
8
+ import type { LLMMessage } from '../llm/types';
9
+ /**
10
+ * Builds an array of LLM messages from a template string with role tags.
11
+ *
12
+ * This function takes a template with <SYSTEM>, <USER>, and <ASSISTANT> tags
13
+ * and constructs a properly formatted array of LLMMessage objects ready to be
14
+ * sent to an LLM service.
15
+ *
16
+ * @param template The template string with {{variables}} and <ROLE> tags.
17
+ * @param variables An object with values to substitute into the template.
18
+ * @returns An array of LLMMessage objects.
19
+ *
20
+ * @example
21
+ * const template = `
22
+ * <SYSTEM>You are a helpful assistant specialized in {{expertise}}.</SYSTEM>
23
+ * <USER>Help me with {{task}}</USER>
24
+ * <ASSISTANT>I'll help you with {{task}}. Let me explain...</ASSISTANT>
25
+ * <USER>Can you provide more details?</USER>
26
+ * `;
27
+ *
28
+ * const messages = buildMessagesFromTemplate(template, {
29
+ * expertise: 'TypeScript',
30
+ * task: 'understanding generics'
31
+ * });
32
+ * // Returns an array of LLMMessage objects with roles and content
33
+ */
34
+ export declare function buildMessagesFromTemplate(template: string, variables?: Record<string, any>): LLMMessage[];
@@ -0,0 +1,112 @@
1
+ "use strict";
2
+ /**
3
+ * Prompt builder utilities for constructing structured LLM messages
4
+ *
5
+ * This module provides functions to construct the final, structured prompts
6
+ * that will be sent to the LLM service. This is the assembly step that turns
7
+ * templates and content into the format required by the LLMService.
8
+ */
9
+ Object.defineProperty(exports, "__esModule", { value: true });
10
+ exports.buildMessagesFromTemplate = buildMessagesFromTemplate;
11
+ const template_1 = require("./template");
12
+ /**
13
+ * Extracts text content from XML-style tags and returns both the extracted content
14
+ * and the original string with those sections removed. Handles multiple occurrences
15
+ * of the same tag.
16
+ *
17
+ * @param xmlString String containing XML-style tags to process
18
+ * @param tagName Name of the tag to extract (without angle brackets)
19
+ * @returns Tuple containing:
20
+ * - Array of extracted content strings, or null if no matches
21
+ * - Original string with matched tags and content removed
22
+ */
23
+ function extractTextAndClean(xmlString, tagName) {
24
+ if (typeof xmlString !== 'string' || typeof tagName !== 'string') {
25
+ return [null, xmlString];
26
+ }
27
+ const matches = [];
28
+ const pattern = new RegExp(`<${tagName}>([\\s\\S]*?)<\/${tagName}>`, 'g');
29
+ let match;
30
+ let lastIndex = 0;
31
+ const segments = [];
32
+ while ((match = pattern.exec(xmlString)) !== null) {
33
+ if (lastIndex < match.index) {
34
+ segments.push(xmlString.slice(lastIndex, match.index));
35
+ }
36
+ matches.push(match[1]);
37
+ lastIndex = pattern.lastIndex;
38
+ }
39
+ if (lastIndex < xmlString.length) {
40
+ segments.push(xmlString.slice(lastIndex));
41
+ }
42
+ return matches.length > 0 ? [matches, segments.join('')] : [null, xmlString];
43
+ }
44
+ /**
45
+ * Builds an array of LLM messages from a template string with role tags.
46
+ *
47
+ * This function takes a template with <SYSTEM>, <USER>, and <ASSISTANT> tags
48
+ * and constructs a properly formatted array of LLMMessage objects ready to be
49
+ * sent to an LLM service.
50
+ *
51
+ * @param template The template string with {{variables}} and <ROLE> tags.
52
+ * @param variables An object with values to substitute into the template.
53
+ * @returns An array of LLMMessage objects.
54
+ *
55
+ * @example
56
+ * const template = `
57
+ * <SYSTEM>You are a helpful assistant specialized in {{expertise}}.</SYSTEM>
58
+ * <USER>Help me with {{task}}</USER>
59
+ * <ASSISTANT>I'll help you with {{task}}. Let me explain...</ASSISTANT>
60
+ * <USER>Can you provide more details?</USER>
61
+ * `;
62
+ *
63
+ * const messages = buildMessagesFromTemplate(template, {
64
+ * expertise: 'TypeScript',
65
+ * task: 'understanding generics'
66
+ * });
67
+ * // Returns an array of LLMMessage objects with roles and content
68
+ */
69
+ function buildMessagesFromTemplate(template, variables) {
70
+ try {
71
+ if (typeof template !== 'string') {
72
+ throw new Error('Template must be a string');
73
+ }
74
+ // First, render variables using the existing template engine
75
+ let processedContent = template;
76
+ if (variables) {
77
+ processedContent = (0, template_1.renderTemplate)(template, variables);
78
+ }
79
+ // Extract sections for each role
80
+ const [systemContent, afterSystem] = extractTextAndClean(processedContent, 'SYSTEM');
81
+ const [userContent, afterUser] = extractTextAndClean(afterSystem, 'USER');
82
+ const [assistantContent] = extractTextAndClean(afterUser, 'ASSISTANT');
83
+ const messages = [];
84
+ // Add system message if present
85
+ if (systemContent && systemContent.length > 0 && systemContent[0].trim()) {
86
+ messages.push({
87
+ role: 'system',
88
+ content: systemContent[0].trim()
89
+ });
90
+ }
91
+ // Interleave user and assistant messages
92
+ const maxLength = Math.max(userContent?.length ?? 0, assistantContent?.length ?? 0);
93
+ for (let i = 0; i < maxLength; i++) {
94
+ if (userContent && i < userContent.length && userContent[i].trim()) {
95
+ messages.push({
96
+ role: 'user',
97
+ content: userContent[i].trim()
98
+ });
99
+ }
100
+ if (assistantContent && i < assistantContent.length && assistantContent[i].trim()) {
101
+ messages.push({
102
+ role: 'assistant',
103
+ content: assistantContent[i].trim()
104
+ });
105
+ }
106
+ }
107
+ return messages;
108
+ }
109
+ catch (error) {
110
+ throw new Error(`Failed to build messages from template: ${error.message}`);
111
+ }
112
+ }
@@ -0,0 +1,4 @@
1
+ /**
2
+ * Tests for prompt builder utilities
3
+ */
4
+ export {};
@@ -0,0 +1,109 @@
1
+ "use strict";
2
+ /**
3
+ * Tests for prompt builder utilities
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ const builder_1 = require("./builder");
7
+ describe('buildMessagesFromTemplate', () => {
8
+ it('should parse a simple template with one of each tag', () => {
9
+ const template = `
10
+ <SYSTEM>You are a helpful assistant.</SYSTEM>
11
+ <USER>Hello!</USER>
12
+ <ASSISTANT>Hi there! How can I help you?</ASSISTANT>
13
+ `;
14
+ const messages = (0, builder_1.buildMessagesFromTemplate)(template);
15
+ expect(messages).toHaveLength(3);
16
+ expect(messages[0]).toEqual({
17
+ role: 'system',
18
+ content: 'You are a helpful assistant.'
19
+ });
20
+ expect(messages[1]).toEqual({
21
+ role: 'user',
22
+ content: 'Hello!'
23
+ });
24
+ expect(messages[2]).toEqual({
25
+ role: 'assistant',
26
+ content: 'Hi there! How can I help you?'
27
+ });
28
+ });
29
+ it('should handle multiple USER and ASSISTANT tags in order', () => {
30
+ const template = `
31
+ <SYSTEM>System message</SYSTEM>
32
+ <USER>First user message</USER>
33
+ <ASSISTANT>First assistant response</ASSISTANT>
34
+ <USER>Second user message</USER>
35
+ <ASSISTANT>Second assistant response</ASSISTANT>
36
+ `;
37
+ const messages = (0, builder_1.buildMessagesFromTemplate)(template);
38
+ expect(messages).toHaveLength(5);
39
+ expect(messages[0].role).toBe('system');
40
+ expect(messages[1].role).toBe('user');
41
+ expect(messages[1].content).toBe('First user message');
42
+ expect(messages[2].role).toBe('assistant');
43
+ expect(messages[2].content).toBe('First assistant response');
44
+ expect(messages[3].role).toBe('user');
45
+ expect(messages[3].content).toBe('Second user message');
46
+ expect(messages[4].role).toBe('assistant');
47
+ expect(messages[4].content).toBe('Second assistant response');
48
+ });
49
+ it('should handle template with missing SYSTEM tag', () => {
50
+ const template = `
51
+ <USER>Hello</USER>
52
+ <ASSISTANT>Hi!</ASSISTANT>
53
+ `;
54
+ const messages = (0, builder_1.buildMessagesFromTemplate)(template);
55
+ expect(messages).toHaveLength(2);
56
+ expect(messages[0].role).toBe('user');
57
+ expect(messages[1].role).toBe('assistant');
58
+ });
59
+ it('should substitute variables inside tags', () => {
60
+ const template = `
61
+ <SYSTEM>You are an expert in {{expertise}}.</SYSTEM>
62
+ <USER>Can you help me with {{topic}}?</USER>
63
+ <ASSISTANT>I'd be happy to help with {{topic}}!</ASSISTANT>
64
+ `;
65
+ const variables = {
66
+ expertise: 'TypeScript',
67
+ topic: 'generics'
68
+ };
69
+ const messages = (0, builder_1.buildMessagesFromTemplate)(template, variables);
70
+ expect(messages[0].content).toBe('You are an expert in TypeScript.');
71
+ expect(messages[1].content).toBe('Can you help me with generics?');
72
+ expect(messages[2].content).toBe("I'd be happy to help with generics!");
73
+ });
74
+ it('should handle empty template string', () => {
75
+ const messages = (0, builder_1.buildMessagesFromTemplate)('');
76
+ expect(messages).toEqual([]);
77
+ });
78
+ it('should handle template with only whitespace in tags', () => {
79
+ const template = `
80
+ <SYSTEM> </SYSTEM>
81
+ <USER>Valid message</USER>
82
+ <ASSISTANT> </ASSISTANT>
83
+ `;
84
+ const messages = (0, builder_1.buildMessagesFromTemplate)(template);
85
+ expect(messages).toHaveLength(1);
86
+ expect(messages[0].role).toBe('user');
87
+ expect(messages[0].content).toBe('Valid message');
88
+ });
89
+ it('should throw error for non-string template', () => {
90
+ expect(() => {
91
+ (0, builder_1.buildMessagesFromTemplate)(123);
92
+ }).toThrow('Template must be a string');
93
+ });
94
+ it('should handle complex nested variables with conditionals', () => {
95
+ const template = `
96
+ <SYSTEM>You are a {{role}}{{ specialized ? \` specialized in {{specialty}}\` : \`\` }}.</SYSTEM>
97
+ <USER>{{greeting}}</USER>
98
+ `;
99
+ const variables = {
100
+ role: 'assistant',
101
+ specialized: true,
102
+ specialty: 'code review',
103
+ greeting: 'Hello!'
104
+ };
105
+ const messages = (0, builder_1.buildMessagesFromTemplate)(template, variables);
106
+ expect(messages[0].content).toBe('You are a assistant specialized in code review.');
107
+ expect(messages[1].content).toBe('Hello!');
108
+ });
109
+ });
@@ -0,0 +1,57 @@
1
+ /**
2
+ * Content preparation utilities for prompt engineering
3
+ *
4
+ * This module provides utilities that analyze, manipulate, and prepare raw text
5
+ * content before it's assembled into a final prompt structure. These functions
6
+ * help prepare the "ingredients" that will be used in prompt construction.
7
+ */
8
+ import { TiktokenModel } from 'js-tiktoken';
9
+ /**
10
+ * Counts the number of tokens in a text string using the specified model's tokenizer.
11
+ *
12
+ * @param text The text to count tokens for
13
+ * @param model The model whose tokenizer to use (defaults to 'gpt-4')
14
+ * @returns The number of tokens in the text
15
+ */
16
+ export declare function countTokens(text: string, model?: TiktokenModel): number;
17
+ /**
18
+ * Generates an intelligent preview of content that respects logical boundaries.
19
+ *
20
+ * This function truncates content while trying to break at natural points
21
+ * (empty lines) rather than in the middle of a section.
22
+ *
23
+ * @param content The content to preview
24
+ * @param config Configuration with minLines and maxLines
25
+ * @returns A truncated preview of the content
26
+ */
27
+ export declare function getSmartPreview(content: string, config: {
28
+ minLines: number;
29
+ maxLines: number;
30
+ }): string;
31
+ /**
32
+ * Extracts content from <RANDOM_...> tags into a flattened variable object.
33
+ * This is useful for preparing few-shot examples in prompts.
34
+ *
35
+ * @param content The string content containing the random tags.
36
+ * @param options Configuration options, like the max number of examples per tag.
37
+ * @returns A record of variables for use in a template engine.
38
+ *
39
+ * @example
40
+ * const content = `
41
+ * <RANDOM_GREETING>Hello</RANDOM_GREETING>
42
+ * <RANDOM_GREETING>Hi</RANDOM_GREETING>
43
+ * <RANDOM_FAREWELL>Goodbye</RANDOM_FAREWELL>
44
+ * `;
45
+ *
46
+ * const result = extractRandomVariables(content, { maxPerTag: 2 });
47
+ * // Might return:
48
+ * // {
49
+ * // random_greeting_1: "Hi",
50
+ * // random_greeting_2: "Hello",
51
+ * // random_farewell_1: "Goodbye",
52
+ * // random_farewell_2: ""
53
+ * // }
54
+ */
55
+ export declare function extractRandomVariables(content: string, options?: {
56
+ maxPerTag?: number;
57
+ }): Record<string, any>;