@vertana/core 0.1.0-dev.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. package/LICENSE +20 -0
  2. package/dist/_virtual/rolldown_runtime.cjs +29 -0
  3. package/dist/accumulator.cjs +64 -0
  4. package/dist/accumulator.d.cts +51 -0
  5. package/dist/accumulator.d.ts +51 -0
  6. package/dist/accumulator.js +61 -0
  7. package/dist/chunking.cjs +76 -0
  8. package/dist/chunking.d.cts +124 -0
  9. package/dist/chunking.d.ts +124 -0
  10. package/dist/chunking.js +74 -0
  11. package/dist/context.cjs +51 -0
  12. package/dist/context.d.cts +148 -0
  13. package/dist/context.d.ts +148 -0
  14. package/dist/context.js +49 -0
  15. package/dist/evaluation.cjs +120 -0
  16. package/dist/evaluation.d.cts +111 -0
  17. package/dist/evaluation.d.ts +111 -0
  18. package/dist/evaluation.js +119 -0
  19. package/dist/glossary.cjs +0 -0
  20. package/dist/glossary.d.cts +25 -0
  21. package/dist/glossary.d.ts +25 -0
  22. package/dist/glossary.js +0 -0
  23. package/dist/html.cjs +253 -0
  24. package/dist/html.d.cts +41 -0
  25. package/dist/html.d.ts +41 -0
  26. package/dist/html.js +250 -0
  27. package/dist/index.cjs +39 -0
  28. package/dist/index.d.cts +17 -0
  29. package/dist/index.d.ts +17 -0
  30. package/dist/index.js +16 -0
  31. package/dist/markdown.cjs +300 -0
  32. package/dist/markdown.d.cts +17 -0
  33. package/dist/markdown.d.ts +17 -0
  34. package/dist/markdown.js +300 -0
  35. package/dist/plaintext.cjs +70 -0
  36. package/dist/plaintext.d.cts +17 -0
  37. package/dist/plaintext.d.ts +17 -0
  38. package/dist/plaintext.js +70 -0
  39. package/dist/prompt.cjs +91 -0
  40. package/dist/prompt.d.cts +74 -0
  41. package/dist/prompt.d.ts +74 -0
  42. package/dist/prompt.js +86 -0
  43. package/dist/refine.cjs +243 -0
  44. package/dist/refine.d.cts +148 -0
  45. package/dist/refine.d.ts +148 -0
  46. package/dist/refine.js +241 -0
  47. package/dist/select.cjs +62 -0
  48. package/dist/select.d.cts +83 -0
  49. package/dist/select.d.ts +83 -0
  50. package/dist/select.js +61 -0
  51. package/dist/terms.cjs +60 -0
  52. package/dist/terms.d.cts +36 -0
  53. package/dist/terms.d.ts +36 -0
  54. package/dist/terms.js +59 -0
  55. package/dist/tokens.cjs +40 -0
  56. package/dist/tokens.d.cts +24 -0
  57. package/dist/tokens.d.ts +24 -0
  58. package/dist/tokens.js +38 -0
  59. package/dist/tools.cjs +35 -0
  60. package/dist/tools.d.cts +20 -0
  61. package/dist/tools.d.ts +20 -0
  62. package/dist/tools.js +34 -0
  63. package/dist/translate.cjs +200 -0
  64. package/dist/translate.d.cts +190 -0
  65. package/dist/translate.d.ts +190 -0
  66. package/dist/translate.js +199 -0
  67. package/dist/window.cjs +0 -0
  68. package/dist/window.d.cts +48 -0
  69. package/dist/window.d.ts +48 -0
  70. package/dist/window.js +0 -0
  71. package/package.json +215 -0
@@ -0,0 +1,190 @@
1
+ import { Glossary, GlossaryEntry } from "./glossary.js";
2
+ import { MediaType, TranslationTone } from "./prompt.js";
3
+ import { LanguageModel, ToolSet } from "ai";
4
+
5
+ //#region src/translate.d.ts
6
+
7
+ /**
8
+ * Options for dynamic glossary accumulation during chunk translation.
9
+ */
10
+ interface DynamicGlossaryOptions {
11
+ /**
12
+ * Maximum number of terms to extract from each chunk.
13
+ *
14
+ * @default `10`
15
+ */
16
+ readonly maxTermsPerChunk?: number;
17
+ /**
18
+ * The model to use for extracting terms.
19
+ * If not specified, the primary translation model is used.
20
+ */
21
+ readonly extractorModel?: LanguageModel;
22
+ }
23
+ /**
24
+ * Options for iterative refinement of translations.
25
+ */
26
+ interface RefinementOptions {
27
+ /**
28
+ * The minimum acceptable quality score (0-1). Chunks with scores below
29
+ * this threshold will be refined.
30
+ *
31
+ * @default `0.85`
32
+ */
33
+ readonly qualityThreshold?: number;
34
+ /**
35
+ * Maximum number of refinement iterations per chunk.
36
+ *
37
+ * @default `3`
38
+ */
39
+ readonly maxIterations?: number;
40
+ }
41
+ /**
42
+ * Options for translating chunks.
43
+ */
44
+ interface TranslateChunksOptions {
45
+ /**
46
+ * The target language for translation.
47
+ */
48
+ readonly targetLanguage: Intl.Locale | string;
49
+ /**
50
+ * The source language of the input text.
51
+ */
52
+ readonly sourceLanguage?: Intl.Locale | string;
53
+ /**
54
+ * An optional title for the input text. It's translated along with
55
+ * the first chunk if provided.
56
+ */
57
+ readonly title?: string;
58
+ /**
59
+ * The desired tone for the translated text.
60
+ */
61
+ readonly tone?: TranslationTone;
62
+ /**
63
+ * The domain or context of the text.
64
+ */
65
+ readonly domain?: string;
66
+ /**
67
+ * The media type of the input text.
68
+ */
69
+ readonly mediaType?: MediaType;
70
+ /**
71
+ * Additional context for the translation.
72
+ */
73
+ readonly context?: string;
74
+ /**
75
+ * Initial glossary for consistent terminology.
76
+ */
77
+ readonly glossary?: Glossary;
78
+ /**
79
+ * The language models to use for translation.
80
+ * If multiple models are provided, best-of-N selection is used.
81
+ */
82
+ readonly models: readonly LanguageModel[];
83
+ /**
84
+ * The model to use for evaluating and selecting the best translation.
85
+ * If not specified, the first model in the array is used.
86
+ */
87
+ readonly evaluatorModel?: LanguageModel;
88
+ /**
89
+ * Dynamic glossary accumulation settings.
90
+ * When enabled, terms are extracted from each translated chunk
91
+ * and accumulated for use in subsequent chunks.
92
+ */
93
+ readonly dynamicGlossary?: DynamicGlossaryOptions | null;
94
+ /**
95
+ * Refinement settings for iterative translation improvement.
96
+ * When enabled, chunks are evaluated and refined until they meet
97
+ * the quality threshold or reach maximum iterations.
98
+ */
99
+ readonly refinement?: RefinementOptions | null;
100
+ /**
101
+ * Optional tools for passive context sources.
102
+ */
103
+ readonly tools?: ToolSet;
104
+ /**
105
+ * Optional abort signal.
106
+ */
107
+ readonly signal?: AbortSignal;
108
+ }
109
+ /**
110
+ * Event yielded for each translated chunk.
111
+ */
112
+ interface TranslatedChunkEvent {
113
+ readonly type: "chunk";
114
+ /**
115
+ * The index of the chunk (0-based).
116
+ */
117
+ readonly index: number;
118
+ /**
119
+ * The translated text for this chunk.
120
+ */
121
+ readonly translation: string;
122
+ /**
123
+ * The number of tokens used for this chunk.
124
+ */
125
+ readonly tokensUsed: number;
126
+ /**
127
+ * The quality score if best-of-N selection was used.
128
+ */
129
+ readonly qualityScore?: number;
130
+ /**
131
+ * The model that produced the best translation for this chunk.
132
+ */
133
+ readonly selectedModel?: LanguageModel;
134
+ /**
135
+ * Terms extracted from this chunk if dynamic glossary is enabled.
136
+ */
137
+ readonly extractedTerms?: readonly GlossaryEntry[];
138
+ }
139
+ /**
140
+ * Event yielded when all chunks are translated.
141
+ */
142
+ interface TranslateChunksComplete {
143
+ readonly type: "complete";
144
+ /**
145
+ * All translated chunks in order.
146
+ */
147
+ readonly translations: readonly string[];
148
+ /**
149
+ * Total tokens used across all chunks.
150
+ */
151
+ readonly totalTokensUsed: number;
152
+ /**
153
+ * All accumulated glossary terms from dynamic extraction.
154
+ */
155
+ readonly accumulatedGlossary: readonly GlossaryEntry[];
156
+ /**
157
+ * Average quality score across all chunks.
158
+ * Only present if best-of-N selection or refinement was used.
159
+ */
160
+ readonly qualityScore?: number;
161
+ /**
162
+ * Total number of refinement iterations performed.
163
+ * Only present if refinement was enabled.
164
+ */
165
+ readonly refinementIterations?: number;
166
+ }
167
+ /**
168
+ * Events yielded during chunk translation.
169
+ */
170
+ type TranslateChunksEvent = TranslatedChunkEvent | TranslateChunksComplete;
171
+ /**
172
+ * Translates source chunks using the provided models and options.
173
+ *
174
+ * This function returns an async iterable that yields events for each
175
+ * translated chunk, allowing consumers to process chunks incrementally
176
+ * and track progress.
177
+ *
178
+ * Features:
179
+ * - Per-chunk parallel translation with multiple models (best-of-N selection)
180
+ * - Previous chunk context passing for consistency
181
+ * - Dynamic glossary accumulation across chunks
182
+ * - Streaming results via AsyncIterable
183
+ *
184
+ * @param sourceChunks The source text chunks to translate.
185
+ * @param options Translation options.
186
+ * @returns An async iterable of translation events.
187
+ */
188
+ declare function translateChunks(sourceChunks: readonly string[], options: TranslateChunksOptions): AsyncIterable<TranslateChunksEvent>;
189
+ //#endregion
190
+ export { DynamicGlossaryOptions, RefinementOptions, TranslateChunksComplete, TranslateChunksEvent, TranslateChunksOptions, TranslatedChunkEvent, translateChunks };
@@ -0,0 +1,199 @@
1
+ import { refineChunks } from "./refine.js";
2
+ import { selectBest } from "./select.js";
3
+ import { buildSystemPrompt, buildUserPrompt, buildUserPromptWithContext } from "./prompt.js";
4
+ import { extractTerms } from "./terms.js";
5
+ import { getLogger } from "@logtape/logtape";
6
+ import { generateText, stepCountIs } from "ai";
7
+
8
+ //#region src/translate.ts
9
+ const logger = getLogger([
10
+ "vertana",
11
+ "core",
12
+ "translate"
13
+ ]);
14
+ /**
15
+ * Translates a single chunk of text.
16
+ */
17
+ async function translateSingleChunk(model, systemPrompt, text, previousChunks, tools, hasPassiveSources, signal, title) {
18
+ const result = await generateText({
19
+ model,
20
+ system: systemPrompt,
21
+ prompt: previousChunks.length > 0 ? buildUserPromptWithContext(text, previousChunks) : buildUserPrompt(text, title),
22
+ tools,
23
+ stopWhen: hasPassiveSources ? stepCountIs(10) : void 0,
24
+ abortSignal: signal
25
+ });
26
+ return {
27
+ text: result.text,
28
+ tokenUsed: result.usage?.totalTokens ?? 0
29
+ };
30
+ }
31
+ /**
32
+ * Translates source chunks using the provided models and options.
33
+ *
34
+ * This function returns an async iterable that yields events for each
35
+ * translated chunk, allowing consumers to process chunks incrementally
36
+ * and track progress.
37
+ *
38
+ * Features:
39
+ * - Per-chunk parallel translation with multiple models (best-of-N selection)
40
+ * - Previous chunk context passing for consistency
41
+ * - Dynamic glossary accumulation across chunks
42
+ * - Streaming results via AsyncIterable
43
+ *
44
+ * @param sourceChunks The source text chunks to translate.
45
+ * @param options Translation options.
46
+ * @returns An async iterable of translation events.
47
+ */
48
+ async function* translateChunks(sourceChunks, options) {
49
+ const { targetLanguage, sourceLanguage, title, tone, domain, mediaType, context, glossary: initialGlossary = [], models, evaluatorModel, dynamicGlossary, refinement, tools, signal } = options;
50
+ const primaryModel = models[0];
51
+ const useBestOfN = models.length > 1;
52
+ const hasPassiveSources = tools != null && Object.keys(tools).length > 0;
53
+ logger.info("Starting translation of {chunkCount} chunks with {modelCount} model(s)...", {
54
+ chunkCount: sourceChunks.length,
55
+ modelCount: models.length,
56
+ targetLanguage: targetLanguage.toString(),
57
+ useBestOfN,
58
+ dynamicGlossary: dynamicGlossary != null,
59
+ refinement: refinement != null
60
+ });
61
+ const baseSystemPromptOptions = {
62
+ sourceLanguage,
63
+ tone,
64
+ domain,
65
+ mediaType,
66
+ context
67
+ };
68
+ const accumulatedGlossary = [];
69
+ /**
70
+ * Builds system prompt with the current glossary state.
71
+ */
72
+ function buildCurrentSystemPrompt() {
73
+ const currentGlossary = accumulatedGlossary.length > 0 ? [...initialGlossary, ...accumulatedGlossary] : initialGlossary;
74
+ return buildSystemPrompt(targetLanguage, {
75
+ ...baseSystemPromptOptions,
76
+ glossary: currentGlossary.length > 0 ? currentGlossary : void 0
77
+ });
78
+ }
79
+ const translations = [];
80
+ let totalTokensUsed = 0;
81
+ const previousChunks = [];
82
+ for (let i = 0; i < sourceChunks.length; i++) {
83
+ signal?.throwIfAborted();
84
+ logger.debug("Translating chunk {index} of {total}...", {
85
+ index: i + 1,
86
+ total: sourceChunks.length
87
+ });
88
+ const currentSystemPrompt = dynamicGlossary != null ? buildCurrentSystemPrompt() : buildSystemPrompt(targetLanguage, {
89
+ ...baseSystemPromptOptions,
90
+ glossary: initialGlossary.length > 0 ? initialGlossary : void 0
91
+ });
92
+ const currentGlossary = accumulatedGlossary.length > 0 ? [...initialGlossary, ...accumulatedGlossary] : initialGlossary;
93
+ const chunkTitle = i === 0 ? title : void 0;
94
+ const chunkResults = await Promise.all(models.map(async (model) => {
95
+ return {
96
+ model,
97
+ ...await translateSingleChunk(model, currentSystemPrompt, sourceChunks[i], previousChunks, tools, hasPassiveSources, signal, chunkTitle)
98
+ };
99
+ }));
100
+ let chunkTokensUsed = 0;
101
+ for (const result of chunkResults) chunkTokensUsed += result.tokenUsed;
102
+ totalTokensUsed += chunkTokensUsed;
103
+ let selectedTranslation;
104
+ let selectedModel;
105
+ let qualityScore;
106
+ if (useBestOfN) {
107
+ const candidates = chunkResults.map((r) => ({
108
+ text: r.text,
109
+ metadata: r.model
110
+ }));
111
+ const selectionResult = await selectBest(evaluatorModel ?? primaryModel, sourceChunks[i], candidates, {
112
+ targetLanguage,
113
+ sourceLanguage,
114
+ glossary: currentGlossary.length > 0 ? currentGlossary : void 0,
115
+ signal
116
+ });
117
+ selectedTranslation = selectionResult.best.text;
118
+ selectedModel = selectionResult.best.metadata;
119
+ qualityScore = selectionResult.best.score;
120
+ logger.debug("Best-of-N selection for chunk {index}: score {score}.", {
121
+ index: i + 1,
122
+ score: qualityScore
123
+ });
124
+ } else selectedTranslation = chunkResults[0].text;
125
+ translations.push(selectedTranslation);
126
+ let extractedTerms;
127
+ if (dynamicGlossary != null) {
128
+ const extractorModel = dynamicGlossary.extractorModel ?? primaryModel;
129
+ const maxTermsPerChunk = dynamicGlossary.maxTermsPerChunk ?? 10;
130
+ extractedTerms = await extractTerms(extractorModel, sourceChunks[i], selectedTranslation, {
131
+ maxTerms: maxTermsPerChunk,
132
+ signal
133
+ });
134
+ let addedTerms = 0;
135
+ for (const term of extractedTerms) if (!(accumulatedGlossary.some((existing) => existing.original.toLowerCase() === term.original.toLowerCase()) || initialGlossary.some((existing) => existing.original.toLowerCase() === term.original.toLowerCase()))) {
136
+ accumulatedGlossary.push(term);
137
+ addedTerms++;
138
+ }
139
+ logger.debug("Extracted {extracted} terms from chunk {index}, added {added} new terms.", {
140
+ extracted: extractedTerms.length,
141
+ index: i + 1,
142
+ added: addedTerms,
143
+ totalGlossary: accumulatedGlossary.length
144
+ });
145
+ }
146
+ previousChunks.push({
147
+ source: sourceChunks[i],
148
+ translation: selectedTranslation
149
+ });
150
+ yield {
151
+ type: "chunk",
152
+ index: i,
153
+ translation: selectedTranslation,
154
+ tokensUsed: chunkTokensUsed,
155
+ qualityScore,
156
+ selectedModel,
157
+ extractedTerms
158
+ };
159
+ }
160
+ let finalTranslations = translations;
161
+ let finalQualityScore;
162
+ let refinementIterations;
163
+ if (refinement != null) {
164
+ logger.info("Starting refinement phase...");
165
+ const refinementGlossary = accumulatedGlossary.length > 0 ? [...initialGlossary, ...accumulatedGlossary] : initialGlossary;
166
+ const refineResult = await refineChunks(primaryModel, sourceChunks, translations, {
167
+ targetLanguage,
168
+ sourceLanguage,
169
+ targetScore: refinement.qualityThreshold ?? .85,
170
+ maxIterations: refinement.maxIterations ?? 3,
171
+ glossary: refinementGlossary.length > 0 ? refinementGlossary : void 0,
172
+ evaluateBoundaries: sourceChunks.length > 1,
173
+ signal
174
+ });
175
+ finalTranslations = [...refineResult.chunks];
176
+ finalQualityScore = refineResult.scores.reduce((a, b) => a + b, 0) / refineResult.scores.length;
177
+ refinementIterations = refineResult.totalIterations;
178
+ logger.info("Refinement completed after {iterations} iteration(s), average score: {score}.", {
179
+ iterations: refinementIterations,
180
+ score: finalQualityScore
181
+ });
182
+ }
183
+ logger.info("Translation completed.", {
184
+ totalChunks: sourceChunks.length,
185
+ totalTokensUsed,
186
+ glossaryTerms: accumulatedGlossary.length
187
+ });
188
+ yield {
189
+ type: "complete",
190
+ translations: finalTranslations,
191
+ totalTokensUsed,
192
+ accumulatedGlossary,
193
+ qualityScore: finalQualityScore,
194
+ refinementIterations
195
+ };
196
+ }
197
+
198
+ //#endregion
199
+ export { translateChunks };
File without changes
@@ -0,0 +1,48 @@
1
+ //#region src/window.d.ts
2
+ /**
3
+ * Strategy for managing context window limits.
4
+ *
5
+ * - {@link ExplicitContextWindow}: User explicitly specifies the maximum
6
+ * token count.
7
+ * - {@link AdaptiveContextWindow}: Dynamically detects limits by retrying
8
+ * with smaller chunks when token limit errors occur.
9
+ */
10
+ type ContextWindow = ExplicitContextWindow | AdaptiveContextWindow;
11
+ /**
12
+ * User explicitly specifies the maximum token count.
13
+ */
14
+ interface ExplicitContextWindow {
15
+ /**
16
+ * Indicates that the token limit is explicitly specified.
17
+ */
18
+ readonly type: "explicit";
19
+ /**
20
+ * The maximum number of tokens allowed in the context window.
21
+ */
22
+ readonly maxTokens: number;
23
+ }
24
+ /**
25
+ * Dynamically detects limits by retrying with smaller chunks when token
26
+ * limit errors occur.
27
+ */
28
+ interface AdaptiveContextWindow {
29
+ /**
30
+ * Indicates that the token limit is dynamically detected.
31
+ */
32
+ readonly type: "adaptive";
33
+ /**
34
+ * The initial token count to try before adapting.
35
+ *
36
+ * @default `16384`
37
+ */
38
+ readonly initialMaxTokens?: number;
39
+ /**
40
+ * The minimum token count before giving up. If the chunk size falls below
41
+ * this threshold, an error is thrown instead of retrying.
42
+ *
43
+ * @default `1024`
44
+ */
45
+ readonly minTokens?: number;
46
+ }
47
+ //#endregion
48
+ export { AdaptiveContextWindow, ContextWindow, ExplicitContextWindow };
@@ -0,0 +1,48 @@
1
+ //#region src/window.d.ts
2
+ /**
3
+ * Strategy for managing context window limits.
4
+ *
5
+ * - {@link ExplicitContextWindow}: User explicitly specifies the maximum
6
+ * token count.
7
+ * - {@link AdaptiveContextWindow}: Dynamically detects limits by retrying
8
+ * with smaller chunks when token limit errors occur.
9
+ */
10
+ type ContextWindow = ExplicitContextWindow | AdaptiveContextWindow;
11
+ /**
12
+ * User explicitly specifies the maximum token count.
13
+ */
14
+ interface ExplicitContextWindow {
15
+ /**
16
+ * Indicates that the token limit is explicitly specified.
17
+ */
18
+ readonly type: "explicit";
19
+ /**
20
+ * The maximum number of tokens allowed in the context window.
21
+ */
22
+ readonly maxTokens: number;
23
+ }
24
+ /**
25
+ * Dynamically detects limits by retrying with smaller chunks when token
26
+ * limit errors occur.
27
+ */
28
+ interface AdaptiveContextWindow {
29
+ /**
30
+ * Indicates that the token limit is dynamically detected.
31
+ */
32
+ readonly type: "adaptive";
33
+ /**
34
+ * The initial token count to try before adapting.
35
+ *
36
+ * @default `16384`
37
+ */
38
+ readonly initialMaxTokens?: number;
39
+ /**
40
+ * The minimum token count before giving up. If the chunk size falls below
41
+ * this threshold, an error is thrown instead of retrying.
42
+ *
43
+ * @default `1024`
44
+ */
45
+ readonly minTokens?: number;
46
+ }
47
+ //#endregion
48
+ export { AdaptiveContextWindow, ContextWindow, ExplicitContextWindow };
package/dist/window.js ADDED
File without changes
package/package.json ADDED
@@ -0,0 +1,215 @@
1
+ {
2
+ "name": "@vertana/core",
3
+ "version": "0.1.0-dev.1",
4
+ "description": "The core library for Vertana, an LLM-powered natural language translation library",
5
+ "keywords": [
6
+ "LLM",
7
+ "translation",
8
+ "natural language processing",
9
+ "NLP",
10
+ "language model"
11
+ ],
12
+ "license": "MIT",
13
+ "author": {
14
+ "name": "Hong Minhee",
15
+ "email": "hong@minhee.org",
16
+ "url": "https://hongminhee.org/"
17
+ },
18
+ "homepage": "https://vertana.org/",
19
+ "repository": {
20
+ "type": "git",
21
+ "url": "git+https://github.com/dahlia/vertana.git",
22
+ "directory": "packages/core"
23
+ },
24
+ "bugs": {
25
+ "url": "https://github.com/dahlia/vertana/issues"
26
+ },
27
+ "funding": [
28
+ "https://github.com/sponsors/dahlia"
29
+ ],
30
+ "engines": {
31
+ "node": ">=20.0.0",
32
+ "bun": ">=1.2.0",
33
+ "deno": ">=2.3.0"
34
+ },
35
+ "files": [
36
+ "dist/",
37
+ "package.json",
38
+ "README.md"
39
+ ],
40
+ "type": "module",
41
+ "module": "./dist/index.js",
42
+ "main": "./dist/index.cjs",
43
+ "types": "./dist/index.d.ts",
44
+ "exports": {
45
+ ".": {
46
+ "types": {
47
+ "require": "./dist/index.d.cts",
48
+ "import": "./dist/index.d.ts"
49
+ },
50
+ "require": "./dist/index.cjs",
51
+ "import": "./dist/index.js"
52
+ },
53
+ "./accumulator": {
54
+ "types": {
55
+ "require": "./dist/accumulator.d.cts",
56
+ "import": "./dist/accumulator.d.ts"
57
+ },
58
+ "require": "./dist/accumulator.cjs",
59
+ "import": "./dist/accumulator.js"
60
+ },
61
+ "./chunking": {
62
+ "types": {
63
+ "require": "./dist/chunking.d.cts",
64
+ "import": "./dist/chunking.d.ts"
65
+ },
66
+ "require": "./dist/chunking.cjs",
67
+ "import": "./dist/chunking.js"
68
+ },
69
+ "./context": {
70
+ "types": {
71
+ "require": "./dist/context.d.cts",
72
+ "import": "./dist/context.d.ts"
73
+ },
74
+ "require": "./dist/context.cjs",
75
+ "import": "./dist/context.js"
76
+ },
77
+ "./evaluation": {
78
+ "types": {
79
+ "require": "./dist/evaluation.d.cts",
80
+ "import": "./dist/evaluation.d.ts"
81
+ },
82
+ "require": "./dist/evaluation.cjs",
83
+ "import": "./dist/evaluation.js"
84
+ },
85
+ "./glossary": {
86
+ "types": {
87
+ "require": "./dist/glossary.d.cts",
88
+ "import": "./dist/glossary.d.ts"
89
+ },
90
+ "require": "./dist/glossary.cjs",
91
+ "import": "./dist/glossary.js"
92
+ },
93
+ "./html": {
94
+ "types": {
95
+ "require": "./dist/html.d.cts",
96
+ "import": "./dist/html.d.ts"
97
+ },
98
+ "require": "./dist/html.cjs",
99
+ "import": "./dist/html.js"
100
+ },
101
+ "./markdown": {
102
+ "types": {
103
+ "require": "./dist/markdown.d.cts",
104
+ "import": "./dist/markdown.d.ts"
105
+ },
106
+ "require": "./dist/markdown.cjs",
107
+ "import": "./dist/markdown.js"
108
+ },
109
+ "./plaintext": {
110
+ "types": {
111
+ "require": "./dist/plaintext.d.cts",
112
+ "import": "./dist/plaintext.d.ts"
113
+ },
114
+ "require": "./dist/plaintext.cjs",
115
+ "import": "./dist/plaintext.js"
116
+ },
117
+ "./refine": {
118
+ "types": {
119
+ "require": "./dist/refine.d.cts",
120
+ "import": "./dist/refine.d.ts"
121
+ },
122
+ "require": "./dist/refine.cjs",
123
+ "import": "./dist/refine.js"
124
+ },
125
+ "./select": {
126
+ "types": {
127
+ "require": "./dist/select.d.cts",
128
+ "import": "./dist/select.d.ts"
129
+ },
130
+ "require": "./dist/select.cjs",
131
+ "import": "./dist/select.js"
132
+ },
133
+ "./tokens": {
134
+ "types": {
135
+ "require": "./dist/tokens.d.cts",
136
+ "import": "./dist/tokens.d.ts"
137
+ },
138
+ "require": "./dist/tokens.cjs",
139
+ "import": "./dist/tokens.js"
140
+ },
141
+ "./window": {
142
+ "types": {
143
+ "require": "./dist/window.d.cts",
144
+ "import": "./dist/window.d.ts"
145
+ },
146
+ "require": "./dist/window.cjs",
147
+ "import": "./dist/window.js"
148
+ },
149
+ "./prompt": {
150
+ "types": {
151
+ "require": "./dist/prompt.d.cts",
152
+ "import": "./dist/prompt.d.ts"
153
+ },
154
+ "require": "./dist/prompt.cjs",
155
+ "import": "./dist/prompt.js"
156
+ },
157
+ "./terms": {
158
+ "types": {
159
+ "require": "./dist/terms.d.cts",
160
+ "import": "./dist/terms.d.ts"
161
+ },
162
+ "require": "./dist/terms.cjs",
163
+ "import": "./dist/terms.js"
164
+ },
165
+ "./tools": {
166
+ "types": {
167
+ "require": "./dist/tools.d.cts",
168
+ "import": "./dist/tools.d.ts"
169
+ },
170
+ "require": "./dist/tools.cjs",
171
+ "import": "./dist/tools.js"
172
+ },
173
+ "./translate": {
174
+ "types": {
175
+ "require": "./dist/translate.d.cts",
176
+ "import": "./dist/translate.d.ts"
177
+ },
178
+ "require": "./dist/translate.cjs",
179
+ "import": "./dist/translate.js"
180
+ }
181
+ },
182
+ "sideEffects": false,
183
+ "dependencies": {
184
+ "@logtape/logtape": "^1.3.5",
185
+ "@standard-community/standard-json": "^0.3.5",
186
+ "dom-serializer": "^2.0.0",
187
+ "domhandler": "^5.0.3",
188
+ "htmlparser2": "^10.0.0",
189
+ "js-tiktoken": "^1.0.21",
190
+ "zod": "4.2.1"
191
+ },
192
+ "peerDependencies": {
193
+ "@standard-schema/spec": "^1.0.0",
194
+ "ai": "6.0.3"
195
+ },
196
+ "devDependencies": {
197
+ "@ai-sdk/anthropic": "3.0.1",
198
+ "@ai-sdk/google": "3.0.1",
199
+ "@ai-sdk/openai": "3.0.1",
200
+ "@standard-schema/spec": "^1.0.0",
201
+ "@types/node": "^20.19.9",
202
+ "ai": "6.0.3",
203
+ "tsdown": "^0.18.3",
204
+ "typescript": "^5.9.3",
205
+ "valibot": "1.2.0"
206
+ },
207
+ "scripts": {
208
+ "build": "tsdown",
209
+ "prepublish": "tsdown",
210
+ "test": "tsdown && node --experimental-transform-types --test --test-concurrency=4",
211
+ "test:bun": "tsdown && bun test",
212
+ "test:deno": "deno test --allow-env",
213
+ "test-all": "tsdown && node --experimental-transform-types --test && bun test && deno test"
214
+ }
215
+ }