@vertana/facade 0.1.0-dev.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +20 -0
- package/dist/index.cjs +157 -0
- package/dist/index.d.cts +24 -0
- package/dist/index.d.ts +24 -0
- package/dist/index.js +151 -0
- package/dist/result.cjs +32 -0
- package/dist/result.js +32 -0
- package/dist/types.d.cts +282 -0
- package/dist/types.d.ts +282 -0
- package/package.json +82 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright 2025 Hong Minhee
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
6
|
+
this software and associated documentation files (the "Software"), to deal in
|
|
7
|
+
the Software without restriction, including without limitation the rights to
|
|
8
|
+
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
|
9
|
+
the Software, and to permit persons to whom the Software is furnished to do so,
|
|
10
|
+
subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
|
17
|
+
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
|
18
|
+
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
|
19
|
+
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
20
|
+
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
const require_result = require('./result.cjs');
|
|
2
|
+
let _logtape_logtape = require("@logtape/logtape");
|
|
3
|
+
let _vertana_core = require("@vertana/core");
|
|
4
|
+
|
|
5
|
+
//#region src/index.ts
|
|
6
|
+
const logger = (0, _logtape_logtape.getLogger)([
|
|
7
|
+
"vertana",
|
|
8
|
+
"facade",
|
|
9
|
+
"translate"
|
|
10
|
+
]);
|
|
11
|
+
/**
|
|
12
|
+
* Translates the given text to the specified target language using the provided
|
|
13
|
+
* language model(s).
|
|
14
|
+
*
|
|
15
|
+
* @param model The language model or models to use for translation.
|
|
16
|
+
* If multiple models are provided, they will be used for
|
|
17
|
+
* best-of-N selection.
|
|
18
|
+
* @param targetLanguage The target language for the translation. This can be
|
|
19
|
+
* specified as an `Intl.Locale` object or a BCP 47
|
|
20
|
+
* language tag string.
|
|
21
|
+
* @param text The text to be translated.
|
|
22
|
+
* @param options Optional settings for the translation process.
|
|
23
|
+
* @returns A promise that resolves to the translation result.
|
|
24
|
+
* @throws {Error} If the translation stream ends without a completion event.
|
|
25
|
+
*/
|
|
26
|
+
async function translate(model, targetLanguage, text, options) {
|
|
27
|
+
const startTime = performance.now();
|
|
28
|
+
logger.info("Starting translation...", {
|
|
29
|
+
targetLanguage: targetLanguage.toString(),
|
|
30
|
+
textLength: text.length
|
|
31
|
+
});
|
|
32
|
+
const models = Array.isArray(model) ? model : [model];
|
|
33
|
+
const primaryModel = models[0];
|
|
34
|
+
const bestOfNOptions = models.length > 1 && options?.bestOfN != null && options.bestOfN !== false ? options.bestOfN === true ? {} : options.bestOfN : null;
|
|
35
|
+
const dynamicGlossaryOptions = options?.dynamicGlossary != null && options.dynamicGlossary !== false ? options.dynamicGlossary === true ? {} : options.dynamicGlossary : null;
|
|
36
|
+
let gatheredContext = "";
|
|
37
|
+
if (options?.contextSources != null && options.contextSources.length > 0) {
|
|
38
|
+
options?.onProgress?.({
|
|
39
|
+
stage: "gatheringContext",
|
|
40
|
+
progress: 0
|
|
41
|
+
});
|
|
42
|
+
gatheredContext = (0, _vertana_core.combineContextResults)(await (0, _vertana_core.gatherRequiredContext)(options.contextSources, options?.signal));
|
|
43
|
+
options?.onProgress?.({
|
|
44
|
+
stage: "gatheringContext",
|
|
45
|
+
progress: 1
|
|
46
|
+
});
|
|
47
|
+
logger.debug("Context gathering completed.", { contextLength: gatheredContext.length });
|
|
48
|
+
}
|
|
49
|
+
const combinedContext = [options?.context, gatheredContext].filter((c) => c != null && c.trim().length > 0).join("\n\n");
|
|
50
|
+
const chunkingEnabled = options?.chunker !== null;
|
|
51
|
+
if (chunkingEnabled) options?.onProgress?.({
|
|
52
|
+
stage: "chunking",
|
|
53
|
+
progress: 0
|
|
54
|
+
});
|
|
55
|
+
const maxTokens = options?.contextWindow?.type === "explicit" ? options.contextWindow.maxTokens : 4096;
|
|
56
|
+
const sourceChunks = await (0, _vertana_core.chunkText)(text, {
|
|
57
|
+
chunker: options?.chunker,
|
|
58
|
+
mediaType: options?.mediaType,
|
|
59
|
+
maxTokens,
|
|
60
|
+
signal: options?.signal
|
|
61
|
+
});
|
|
62
|
+
if (chunkingEnabled) {
|
|
63
|
+
options?.onProgress?.({
|
|
64
|
+
stage: "chunking",
|
|
65
|
+
progress: 1
|
|
66
|
+
});
|
|
67
|
+
logger.debug("Chunking completed.", { chunkCount: sourceChunks.length });
|
|
68
|
+
}
|
|
69
|
+
const passiveSources = (options?.contextSources ?? []).filter((s) => s.mode === "passive");
|
|
70
|
+
let tools;
|
|
71
|
+
if (passiveSources.length > 0) {
|
|
72
|
+
options?.onProgress?.({
|
|
73
|
+
stage: "prompting",
|
|
74
|
+
progress: 0
|
|
75
|
+
});
|
|
76
|
+
tools = await (0, _vertana_core.createToolSet)(passiveSources, options?.signal);
|
|
77
|
+
options?.onProgress?.({
|
|
78
|
+
stage: "prompting",
|
|
79
|
+
progress: 1
|
|
80
|
+
});
|
|
81
|
+
}
|
|
82
|
+
const initialGlossary = options?.glossary ?? [];
|
|
83
|
+
const totalChunks = sourceChunks.length;
|
|
84
|
+
const modelsToUse = bestOfNOptions != null ? models : [primaryModel];
|
|
85
|
+
options?.onProgress?.({
|
|
86
|
+
stage: "translating",
|
|
87
|
+
progress: 0,
|
|
88
|
+
chunkIndex: 0,
|
|
89
|
+
totalChunks
|
|
90
|
+
});
|
|
91
|
+
const refinementOptions = options?.refinement != null && options.refinement !== false ? options.refinement === true ? {} : options.refinement : null;
|
|
92
|
+
let state = (0, _vertana_core.createInitialAccumulatorState)();
|
|
93
|
+
for await (const event of (0, _vertana_core.translateChunks)(sourceChunks, {
|
|
94
|
+
targetLanguage,
|
|
95
|
+
sourceLanguage: options?.sourceLanguage,
|
|
96
|
+
title: options?.title,
|
|
97
|
+
tone: options?.tone,
|
|
98
|
+
domain: options?.domain,
|
|
99
|
+
mediaType: options?.mediaType,
|
|
100
|
+
context: combinedContext || void 0,
|
|
101
|
+
glossary: initialGlossary,
|
|
102
|
+
models: modelsToUse,
|
|
103
|
+
evaluatorModel: bestOfNOptions?.evaluatorModel,
|
|
104
|
+
dynamicGlossary: dynamicGlossaryOptions,
|
|
105
|
+
refinement: refinementOptions,
|
|
106
|
+
tools,
|
|
107
|
+
signal: options?.signal
|
|
108
|
+
})) {
|
|
109
|
+
state = (0, _vertana_core.accumulateEvent)(state, event);
|
|
110
|
+
if (event.type === "chunk") {
|
|
111
|
+
options?.onProgress?.({
|
|
112
|
+
stage: "translating",
|
|
113
|
+
progress: (event.index + 1) / totalChunks,
|
|
114
|
+
chunkIndex: event.index,
|
|
115
|
+
totalChunks
|
|
116
|
+
});
|
|
117
|
+
if (event.selectedModel != null) options?.onProgress?.({
|
|
118
|
+
stage: "selecting",
|
|
119
|
+
progress: 1,
|
|
120
|
+
totalCandidates: modelsToUse.length
|
|
121
|
+
});
|
|
122
|
+
if (event.index === totalChunks - 1 && refinementOptions != null) options?.onProgress?.({
|
|
123
|
+
stage: "refining",
|
|
124
|
+
progress: 0,
|
|
125
|
+
maxIterations: refinementOptions.maxIterations ?? 3,
|
|
126
|
+
totalChunks
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
if (state.complete?.refinementIterations != null) options?.onProgress?.({
|
|
131
|
+
stage: "refining",
|
|
132
|
+
progress: 1,
|
|
133
|
+
iteration: state.complete.refinementIterations,
|
|
134
|
+
maxIterations: refinementOptions?.maxIterations ?? 3,
|
|
135
|
+
totalChunks
|
|
136
|
+
});
|
|
137
|
+
const result = require_result.buildTranslation(state, {
|
|
138
|
+
startTime,
|
|
139
|
+
extractTitle: options?.title != null
|
|
140
|
+
});
|
|
141
|
+
logger.info("Translation completed.", {
|
|
142
|
+
processingTimeMs: result.processingTime,
|
|
143
|
+
tokensUsed: result.tokenUsed,
|
|
144
|
+
qualityScore: result.qualityScore,
|
|
145
|
+
chunkCount: state.complete?.translations.length ?? 0
|
|
146
|
+
});
|
|
147
|
+
return result;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
//#endregion
|
|
151
|
+
Object.defineProperty(exports, 'extractTerms', {
|
|
152
|
+
enumerable: true,
|
|
153
|
+
get: function () {
|
|
154
|
+
return _vertana_core.extractTerms;
|
|
155
|
+
}
|
|
156
|
+
});
|
|
157
|
+
exports.translate = translate;
|
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { BestOfNOptions, ChunkingProgress, DynamicGlossaryOptions, GatheringContextProgress, MediaType, PromptingProgress, RefinementOptions, RefiningProgress, SelectingProgress, TranslateOptions, TranslatingProgress, Translation, TranslationProgress, TranslationTone } from "./types.cjs";
|
|
2
|
+
import { LanguageModel } from "ai";
|
|
3
|
+
import { extractTerms } from "@vertana/core";
|
|
4
|
+
|
|
5
|
+
//#region src/index.d.ts
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Translates the given text to the specified target language using the provided
|
|
9
|
+
* language model(s).
|
|
10
|
+
*
|
|
11
|
+
* @param model The language model or models to use for translation.
|
|
12
|
+
* If multiple models are provided, they will be used for
|
|
13
|
+
* best-of-N selection.
|
|
14
|
+
* @param targetLanguage The target language for the translation. This can be
|
|
15
|
+
* specified as an `Intl.Locale` object or a BCP 47
|
|
16
|
+
* language tag string.
|
|
17
|
+
* @param text The text to be translated.
|
|
18
|
+
* @param options Optional settings for the translation process.
|
|
19
|
+
* @returns A promise that resolves to the translation result.
|
|
20
|
+
* @throws {Error} If the translation stream ends without a completion event.
|
|
21
|
+
*/
|
|
22
|
+
declare function translate(model: LanguageModel | readonly LanguageModel[], targetLanguage: Intl.Locale | string, text: string, options?: TranslateOptions): Promise<Translation>;
|
|
23
|
+
//#endregion
|
|
24
|
+
export { type BestOfNOptions, type ChunkingProgress, type DynamicGlossaryOptions, type GatheringContextProgress, type MediaType, type PromptingProgress, type RefinementOptions, type RefiningProgress, type SelectingProgress, type TranslateOptions, type TranslatingProgress, type Translation, type TranslationProgress, type TranslationTone, extractTerms, translate };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { BestOfNOptions, ChunkingProgress, DynamicGlossaryOptions, GatheringContextProgress, MediaType, PromptingProgress, RefinementOptions, RefiningProgress, SelectingProgress, TranslateOptions, TranslatingProgress, Translation, TranslationProgress, TranslationTone } from "./types.js";
|
|
2
|
+
import { extractTerms } from "@vertana/core";
|
|
3
|
+
import { LanguageModel } from "ai";
|
|
4
|
+
|
|
5
|
+
//#region src/index.d.ts
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Translates the given text to the specified target language using the provided
|
|
9
|
+
* language model(s).
|
|
10
|
+
*
|
|
11
|
+
* @param model The language model or models to use for translation.
|
|
12
|
+
* If multiple models are provided, they will be used for
|
|
13
|
+
* best-of-N selection.
|
|
14
|
+
* @param targetLanguage The target language for the translation. This can be
|
|
15
|
+
* specified as an `Intl.Locale` object or a BCP 47
|
|
16
|
+
* language tag string.
|
|
17
|
+
* @param text The text to be translated.
|
|
18
|
+
* @param options Optional settings for the translation process.
|
|
19
|
+
* @returns A promise that resolves to the translation result.
|
|
20
|
+
* @throws {Error} If the translation stream ends without a completion event.
|
|
21
|
+
*/
|
|
22
|
+
declare function translate(model: LanguageModel | readonly LanguageModel[], targetLanguage: Intl.Locale | string, text: string, options?: TranslateOptions): Promise<Translation>;
|
|
23
|
+
//#endregion
|
|
24
|
+
export { type BestOfNOptions, type ChunkingProgress, type DynamicGlossaryOptions, type GatheringContextProgress, type MediaType, type PromptingProgress, type RefinementOptions, type RefiningProgress, type SelectingProgress, type TranslateOptions, type TranslatingProgress, type Translation, type TranslationProgress, type TranslationTone, extractTerms, translate };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
import { buildTranslation } from "./result.js";
|
|
2
|
+
import { getLogger } from "@logtape/logtape";
|
|
3
|
+
import { accumulateEvent, chunkText, combineContextResults, createInitialAccumulatorState, createToolSet, extractTerms, gatherRequiredContext, translateChunks } from "@vertana/core";
|
|
4
|
+
|
|
5
|
+
//#region src/index.ts
|
|
6
|
+
const logger = getLogger([
|
|
7
|
+
"vertana",
|
|
8
|
+
"facade",
|
|
9
|
+
"translate"
|
|
10
|
+
]);
|
|
11
|
+
/**
|
|
12
|
+
* Translates the given text to the specified target language using the provided
|
|
13
|
+
* language model(s).
|
|
14
|
+
*
|
|
15
|
+
* @param model The language model or models to use for translation.
|
|
16
|
+
* If multiple models are provided, they will be used for
|
|
17
|
+
* best-of-N selection.
|
|
18
|
+
* @param targetLanguage The target language for the translation. This can be
|
|
19
|
+
* specified as an `Intl.Locale` object or a BCP 47
|
|
20
|
+
* language tag string.
|
|
21
|
+
* @param text The text to be translated.
|
|
22
|
+
* @param options Optional settings for the translation process.
|
|
23
|
+
* @returns A promise that resolves to the translation result.
|
|
24
|
+
* @throws {Error} If the translation stream ends without a completion event.
|
|
25
|
+
*/
|
|
26
|
+
async function translate(model, targetLanguage, text, options) {
|
|
27
|
+
const startTime = performance.now();
|
|
28
|
+
logger.info("Starting translation...", {
|
|
29
|
+
targetLanguage: targetLanguage.toString(),
|
|
30
|
+
textLength: text.length
|
|
31
|
+
});
|
|
32
|
+
const models = Array.isArray(model) ? model : [model];
|
|
33
|
+
const primaryModel = models[0];
|
|
34
|
+
const bestOfNOptions = models.length > 1 && options?.bestOfN != null && options.bestOfN !== false ? options.bestOfN === true ? {} : options.bestOfN : null;
|
|
35
|
+
const dynamicGlossaryOptions = options?.dynamicGlossary != null && options.dynamicGlossary !== false ? options.dynamicGlossary === true ? {} : options.dynamicGlossary : null;
|
|
36
|
+
let gatheredContext = "";
|
|
37
|
+
if (options?.contextSources != null && options.contextSources.length > 0) {
|
|
38
|
+
options?.onProgress?.({
|
|
39
|
+
stage: "gatheringContext",
|
|
40
|
+
progress: 0
|
|
41
|
+
});
|
|
42
|
+
gatheredContext = combineContextResults(await gatherRequiredContext(options.contextSources, options?.signal));
|
|
43
|
+
options?.onProgress?.({
|
|
44
|
+
stage: "gatheringContext",
|
|
45
|
+
progress: 1
|
|
46
|
+
});
|
|
47
|
+
logger.debug("Context gathering completed.", { contextLength: gatheredContext.length });
|
|
48
|
+
}
|
|
49
|
+
const combinedContext = [options?.context, gatheredContext].filter((c) => c != null && c.trim().length > 0).join("\n\n");
|
|
50
|
+
const chunkingEnabled = options?.chunker !== null;
|
|
51
|
+
if (chunkingEnabled) options?.onProgress?.({
|
|
52
|
+
stage: "chunking",
|
|
53
|
+
progress: 0
|
|
54
|
+
});
|
|
55
|
+
const maxTokens = options?.contextWindow?.type === "explicit" ? options.contextWindow.maxTokens : 4096;
|
|
56
|
+
const sourceChunks = await chunkText(text, {
|
|
57
|
+
chunker: options?.chunker,
|
|
58
|
+
mediaType: options?.mediaType,
|
|
59
|
+
maxTokens,
|
|
60
|
+
signal: options?.signal
|
|
61
|
+
});
|
|
62
|
+
if (chunkingEnabled) {
|
|
63
|
+
options?.onProgress?.({
|
|
64
|
+
stage: "chunking",
|
|
65
|
+
progress: 1
|
|
66
|
+
});
|
|
67
|
+
logger.debug("Chunking completed.", { chunkCount: sourceChunks.length });
|
|
68
|
+
}
|
|
69
|
+
const passiveSources = (options?.contextSources ?? []).filter((s) => s.mode === "passive");
|
|
70
|
+
let tools;
|
|
71
|
+
if (passiveSources.length > 0) {
|
|
72
|
+
options?.onProgress?.({
|
|
73
|
+
stage: "prompting",
|
|
74
|
+
progress: 0
|
|
75
|
+
});
|
|
76
|
+
tools = await createToolSet(passiveSources, options?.signal);
|
|
77
|
+
options?.onProgress?.({
|
|
78
|
+
stage: "prompting",
|
|
79
|
+
progress: 1
|
|
80
|
+
});
|
|
81
|
+
}
|
|
82
|
+
const initialGlossary = options?.glossary ?? [];
|
|
83
|
+
const totalChunks = sourceChunks.length;
|
|
84
|
+
const modelsToUse = bestOfNOptions != null ? models : [primaryModel];
|
|
85
|
+
options?.onProgress?.({
|
|
86
|
+
stage: "translating",
|
|
87
|
+
progress: 0,
|
|
88
|
+
chunkIndex: 0,
|
|
89
|
+
totalChunks
|
|
90
|
+
});
|
|
91
|
+
const refinementOptions = options?.refinement != null && options.refinement !== false ? options.refinement === true ? {} : options.refinement : null;
|
|
92
|
+
let state = createInitialAccumulatorState();
|
|
93
|
+
for await (const event of translateChunks(sourceChunks, {
|
|
94
|
+
targetLanguage,
|
|
95
|
+
sourceLanguage: options?.sourceLanguage,
|
|
96
|
+
title: options?.title,
|
|
97
|
+
tone: options?.tone,
|
|
98
|
+
domain: options?.domain,
|
|
99
|
+
mediaType: options?.mediaType,
|
|
100
|
+
context: combinedContext || void 0,
|
|
101
|
+
glossary: initialGlossary,
|
|
102
|
+
models: modelsToUse,
|
|
103
|
+
evaluatorModel: bestOfNOptions?.evaluatorModel,
|
|
104
|
+
dynamicGlossary: dynamicGlossaryOptions,
|
|
105
|
+
refinement: refinementOptions,
|
|
106
|
+
tools,
|
|
107
|
+
signal: options?.signal
|
|
108
|
+
})) {
|
|
109
|
+
state = accumulateEvent(state, event);
|
|
110
|
+
if (event.type === "chunk") {
|
|
111
|
+
options?.onProgress?.({
|
|
112
|
+
stage: "translating",
|
|
113
|
+
progress: (event.index + 1) / totalChunks,
|
|
114
|
+
chunkIndex: event.index,
|
|
115
|
+
totalChunks
|
|
116
|
+
});
|
|
117
|
+
if (event.selectedModel != null) options?.onProgress?.({
|
|
118
|
+
stage: "selecting",
|
|
119
|
+
progress: 1,
|
|
120
|
+
totalCandidates: modelsToUse.length
|
|
121
|
+
});
|
|
122
|
+
if (event.index === totalChunks - 1 && refinementOptions != null) options?.onProgress?.({
|
|
123
|
+
stage: "refining",
|
|
124
|
+
progress: 0,
|
|
125
|
+
maxIterations: refinementOptions.maxIterations ?? 3,
|
|
126
|
+
totalChunks
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
if (state.complete?.refinementIterations != null) options?.onProgress?.({
|
|
131
|
+
stage: "refining",
|
|
132
|
+
progress: 1,
|
|
133
|
+
iteration: state.complete.refinementIterations,
|
|
134
|
+
maxIterations: refinementOptions?.maxIterations ?? 3,
|
|
135
|
+
totalChunks
|
|
136
|
+
});
|
|
137
|
+
const result = buildTranslation(state, {
|
|
138
|
+
startTime,
|
|
139
|
+
extractTitle: options?.title != null
|
|
140
|
+
});
|
|
141
|
+
logger.info("Translation completed.", {
|
|
142
|
+
processingTimeMs: result.processingTime,
|
|
143
|
+
tokensUsed: result.tokenUsed,
|
|
144
|
+
qualityScore: result.qualityScore,
|
|
145
|
+
chunkCount: state.complete?.translations.length ?? 0
|
|
146
|
+
});
|
|
147
|
+
return result;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
//#endregion
|
|
151
|
+
export { extractTerms, translate };
|
package/dist/result.cjs
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
let _vertana_core = require("@vertana/core");
|
|
2
|
+
|
|
3
|
+
//#region src/result.ts
|
|
4
|
+
/**
|
|
5
|
+
* Builds the final translation result from accumulated stream state.
|
|
6
|
+
*
|
|
7
|
+
* @param state The accumulated state from processing the translation stream.
|
|
8
|
+
* @param options Options for building the result.
|
|
9
|
+
* @returns The translation result.
|
|
10
|
+
* @throws {Error} If the translation stream did not complete.
|
|
11
|
+
*/
|
|
12
|
+
function buildTranslation(state, options) {
|
|
13
|
+
const { complete, totalQualityScore, qualityScoreCount, modelWinCounts } = state;
|
|
14
|
+
if (complete == null) throw new Error("Translation did not complete.");
|
|
15
|
+
const text = complete.translations.join("\n\n");
|
|
16
|
+
const processingTime = performance.now() - options.startTime;
|
|
17
|
+
const qualityScore = complete.qualityScore ?? (qualityScoreCount > 0 ? totalQualityScore / qualityScoreCount : void 0);
|
|
18
|
+
const selectedModel = (0, _vertana_core.maxByValue)(modelWinCounts);
|
|
19
|
+
return {
|
|
20
|
+
text,
|
|
21
|
+
title: options.extractTitle ? (0, _vertana_core.extractTitle)(text) : void 0,
|
|
22
|
+
tokenUsed: complete.totalTokensUsed,
|
|
23
|
+
processingTime,
|
|
24
|
+
qualityScore,
|
|
25
|
+
refinementIterations: complete.refinementIterations,
|
|
26
|
+
selectedModel,
|
|
27
|
+
accumulatedGlossary: complete.accumulatedGlossary.length > 0 ? complete.accumulatedGlossary : void 0
|
|
28
|
+
};
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
//#endregion
|
|
32
|
+
exports.buildTranslation = buildTranslation;
|
package/dist/result.js
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import { extractTitle, maxByValue } from "@vertana/core";
|
|
2
|
+
|
|
3
|
+
//#region src/result.ts
|
|
4
|
+
/**
|
|
5
|
+
* Builds the final translation result from accumulated stream state.
|
|
6
|
+
*
|
|
7
|
+
* @param state The accumulated state from processing the translation stream.
|
|
8
|
+
* @param options Options for building the result.
|
|
9
|
+
* @returns The translation result.
|
|
10
|
+
* @throws {Error} If the translation stream did not complete.
|
|
11
|
+
*/
|
|
12
|
+
function buildTranslation(state, options) {
|
|
13
|
+
const { complete, totalQualityScore, qualityScoreCount, modelWinCounts } = state;
|
|
14
|
+
if (complete == null) throw new Error("Translation did not complete.");
|
|
15
|
+
const text = complete.translations.join("\n\n");
|
|
16
|
+
const processingTime = performance.now() - options.startTime;
|
|
17
|
+
const qualityScore = complete.qualityScore ?? (qualityScoreCount > 0 ? totalQualityScore / qualityScoreCount : void 0);
|
|
18
|
+
const selectedModel = maxByValue(modelWinCounts);
|
|
19
|
+
return {
|
|
20
|
+
text,
|
|
21
|
+
title: options.extractTitle ? extractTitle(text) : void 0,
|
|
22
|
+
tokenUsed: complete.totalTokensUsed,
|
|
23
|
+
processingTime,
|
|
24
|
+
qualityScore,
|
|
25
|
+
refinementIterations: complete.refinementIterations,
|
|
26
|
+
selectedModel,
|
|
27
|
+
accumulatedGlossary: complete.accumulatedGlossary.length > 0 ? complete.accumulatedGlossary : void 0
|
|
28
|
+
};
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
//#endregion
|
|
32
|
+
export { buildTranslation };
|
package/dist/types.d.cts
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
import { LanguageModel } from "ai";
|
|
2
|
+
import { Chunker, ContextSource, ContextWindow, Glossary, MediaType, MediaType as MediaType$1, TranslationTone, TranslationTone as TranslationTone$1 } from "@vertana/core";
|
|
3
|
+
|
|
4
|
+
//#region src/types.d.ts
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Base progress information shared by all stages.
|
|
8
|
+
*/
|
|
9
|
+
interface BaseProgress {
|
|
10
|
+
/**
|
|
11
|
+
* The progress percentage (0 to 1) of the current stage.
|
|
12
|
+
*/
|
|
13
|
+
readonly progress: number;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Progress information for the chunking stage.
|
|
17
|
+
*/
|
|
18
|
+
interface ChunkingProgress extends BaseProgress {
|
|
19
|
+
readonly stage: "chunking";
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Progress information for the prompting stage.
|
|
23
|
+
*/
|
|
24
|
+
interface PromptingProgress extends BaseProgress {
|
|
25
|
+
readonly stage: "prompting";
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Progress information for the context gathering stage.
|
|
29
|
+
*/
|
|
30
|
+
interface GatheringContextProgress extends BaseProgress {
|
|
31
|
+
readonly stage: "gatheringContext";
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Progress information for the translating stage.
|
|
35
|
+
*/
|
|
36
|
+
interface TranslatingProgress extends BaseProgress {
|
|
37
|
+
readonly stage: "translating";
|
|
38
|
+
/**
|
|
39
|
+
* When chunking is used, indicates the current chunk index (0-based).
|
|
40
|
+
*/
|
|
41
|
+
readonly chunkIndex?: number;
|
|
42
|
+
/**
|
|
43
|
+
* When chunking is used, indicates total number of chunks.
|
|
44
|
+
*/
|
|
45
|
+
readonly totalChunks?: number;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Progress information for the refining stage.
|
|
49
|
+
*/
|
|
50
|
+
interface RefiningProgress extends BaseProgress {
|
|
51
|
+
readonly stage: "refining";
|
|
52
|
+
/**
|
|
53
|
+
* The current refinement iteration (1-based).
|
|
54
|
+
*/
|
|
55
|
+
readonly iteration?: number;
|
|
56
|
+
/**
|
|
57
|
+
* The maximum number of refinement iterations.
|
|
58
|
+
*/
|
|
59
|
+
readonly maxIterations?: number;
|
|
60
|
+
/**
|
|
61
|
+
* When refining chunks, indicates the current chunk index (0-based).
|
|
62
|
+
*/
|
|
63
|
+
readonly chunkIndex?: number;
|
|
64
|
+
/**
|
|
65
|
+
* When refining chunks, indicates total number of chunks.
|
|
66
|
+
*/
|
|
67
|
+
readonly totalChunks?: number;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Progress information for the best-of-N selection stage.
|
|
71
|
+
*/
|
|
72
|
+
interface SelectingProgress extends BaseProgress {
|
|
73
|
+
readonly stage: "selecting";
|
|
74
|
+
/**
|
|
75
|
+
* The current candidate being evaluated (0-based).
|
|
76
|
+
*/
|
|
77
|
+
readonly candidateIndex?: number;
|
|
78
|
+
/**
|
|
79
|
+
* The total number of candidates.
|
|
80
|
+
*/
|
|
81
|
+
readonly totalCandidates?: number;
|
|
82
|
+
}
|
|
83
|
+
/**
|
|
84
|
+
* Progress information for the translation process.
|
|
85
|
+
*/
|
|
86
|
+
type TranslationProgress = ChunkingProgress | PromptingProgress | GatheringContextProgress | TranslatingProgress | RefiningProgress | SelectingProgress;
|
|
87
|
+
/**
|
|
88
|
+
* The result of a translation operation.
|
|
89
|
+
*/
|
|
90
|
+
interface Translation {
|
|
91
|
+
/**
|
|
92
|
+
* The translated text.
|
|
93
|
+
*/
|
|
94
|
+
readonly text: string;
|
|
95
|
+
/**
|
|
96
|
+
* An optional title for the translated text, if provided.
|
|
97
|
+
*/
|
|
98
|
+
readonly title?: string;
|
|
99
|
+
/**
|
|
100
|
+
* The total number of tokens used during the translation process.
|
|
101
|
+
*/
|
|
102
|
+
readonly tokenUsed: number;
|
|
103
|
+
/**
|
|
104
|
+
* The time taken to process the translation, in milliseconds.
|
|
105
|
+
*/
|
|
106
|
+
readonly processingTime: number;
|
|
107
|
+
/**
|
|
108
|
+
* The final quality score after refinement (0-1).
|
|
109
|
+
* Only present when refinement is enabled.
|
|
110
|
+
*/
|
|
111
|
+
readonly qualityScore?: number;
|
|
112
|
+
/**
|
|
113
|
+
* The number of refinement iterations performed.
|
|
114
|
+
* Only present when refinement is enabled.
|
|
115
|
+
*/
|
|
116
|
+
readonly refinementIterations?: number;
|
|
117
|
+
/**
|
|
118
|
+
* The model that produced the best translation.
|
|
119
|
+
* Only present when best-of-N selection is used with multiple models.
|
|
120
|
+
*/
|
|
121
|
+
readonly selectedModel?: LanguageModel;
|
|
122
|
+
/**
|
|
123
|
+
* The accumulated glossary from dynamic term extraction.
|
|
124
|
+
* Only present when dynamic glossary is enabled.
|
|
125
|
+
*/
|
|
126
|
+
readonly accumulatedGlossary?: Glossary;
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Options for iterative translation refinement.
|
|
130
|
+
*/
|
|
131
|
+
interface RefinementOptions {
|
|
132
|
+
/**
|
|
133
|
+
* Maximum number of refinement iterations.
|
|
134
|
+
*
|
|
135
|
+
* @default `3`
|
|
136
|
+
*/
|
|
137
|
+
readonly maxIterations?: number;
|
|
138
|
+
/**
|
|
139
|
+
* Quality threshold (0-1). If the evaluation score exceeds this threshold,
|
|
140
|
+
* refinement stops early.
|
|
141
|
+
*
|
|
142
|
+
* @default `0.9`
|
|
143
|
+
*/
|
|
144
|
+
readonly qualityThreshold?: number;
|
|
145
|
+
}
|
|
146
|
+
/**
|
|
147
|
+
* Options for best-of-N selection when multiple models are provided.
|
|
148
|
+
*/
|
|
149
|
+
interface BestOfNOptions {
|
|
150
|
+
/**
|
|
151
|
+
* The model to use for evaluating and selecting the best translation.
|
|
152
|
+
* If not specified, the first model in the array is used.
|
|
153
|
+
*/
|
|
154
|
+
readonly evaluatorModel?: LanguageModel;
|
|
155
|
+
}
|
|
156
|
+
/**
|
|
157
|
+
* Options for dynamic glossary accumulation during translation.
|
|
158
|
+
*/
|
|
159
|
+
interface DynamicGlossaryOptions {
|
|
160
|
+
/**
|
|
161
|
+
* Maximum number of terms to extract from each chunk.
|
|
162
|
+
*
|
|
163
|
+
* @default `10`
|
|
164
|
+
*/
|
|
165
|
+
readonly maxTermsPerChunk?: number;
|
|
166
|
+
/**
|
|
167
|
+
* The model to use for extracting terms.
|
|
168
|
+
* If not specified, the primary translation model is used.
|
|
169
|
+
*/
|
|
170
|
+
readonly extractorModel?: LanguageModel;
|
|
171
|
+
}
|
|
172
|
+
/**
|
|
173
|
+
* Options for the translate function.
|
|
174
|
+
*/
|
|
175
|
+
interface TranslateOptions {
|
|
176
|
+
/**
|
|
177
|
+
* The source language of the input text. If not provided, the language will
|
|
178
|
+
* be auto-detected.
|
|
179
|
+
*
|
|
180
|
+
* If a string is provided, it should be a valid BCP 47 language tag.
|
|
181
|
+
*/
|
|
182
|
+
readonly sourceLanguage?: Intl.Locale | string;
|
|
183
|
+
/**
|
|
184
|
+
* An optional title for the input text. It's also translated if provided.
|
|
185
|
+
*/
|
|
186
|
+
readonly title?: string;
|
|
187
|
+
/**
|
|
188
|
+
* Additional context or background information about the input text. This
|
|
189
|
+
* can help improve translation accuracy.
|
|
190
|
+
*/
|
|
191
|
+
readonly context?: string;
|
|
192
|
+
/**
|
|
193
|
+
* The desired tone for the translated text. This helps tailor the style
|
|
194
|
+
* and formality of the output.
|
|
195
|
+
*/
|
|
196
|
+
readonly tone?: TranslationTone;
|
|
197
|
+
/**
|
|
198
|
+
* The domain or context of the input text, e.g., `"medical"`, `"legal"`,
|
|
199
|
+
* `"technical"`, etc. This helps the model produce more accurate
|
|
200
|
+
* translations by tailoring the output to the specific field.
|
|
201
|
+
*/
|
|
202
|
+
readonly domain?: string;
|
|
203
|
+
/**
|
|
204
|
+
* The media type of the input text. This hints at the formatting and
|
|
205
|
+
* structure of the content so that the model can maintain it in
|
|
206
|
+
* the translation.
|
|
207
|
+
*
|
|
208
|
+
* @default `"text/plain"`
|
|
209
|
+
*/
|
|
210
|
+
readonly mediaType?: MediaType;
|
|
211
|
+
/**
|
|
212
|
+
* An optional callback function that is invoked to report progress
|
|
213
|
+
* during the translation process.
|
|
214
|
+
*
|
|
215
|
+
* @param progress The current progress information.
|
|
216
|
+
*/
|
|
217
|
+
readonly onProgress?: (progress: TranslationProgress) => void;
|
|
218
|
+
/**
|
|
219
|
+
* An optional `AbortSignal` to cancel the translation request.
|
|
220
|
+
*/
|
|
221
|
+
readonly signal?: AbortSignal;
|
|
222
|
+
/**
|
|
223
|
+
* Context sources to gather additional information for translation.
|
|
224
|
+
* These can be either required (always invoked) or passive (invoked by
|
|
225
|
+
* the LLM agent on demand).
|
|
226
|
+
*/
|
|
227
|
+
readonly contextSources?: readonly ContextSource[];
|
|
228
|
+
/**
|
|
229
|
+
* A glossary of terms for consistent translation. Terms in the glossary
|
|
230
|
+
* will be translated consistently throughout the document.
|
|
231
|
+
*/
|
|
232
|
+
readonly glossary?: Glossary;
|
|
233
|
+
/**
|
|
234
|
+
* Context window management strategy. This controls how the translation
|
|
235
|
+
* handles long documents that may exceed the model's context window.
|
|
236
|
+
*
|
|
237
|
+
* @default `{ type: "explicit", maxTokens: 8192 }`
|
|
238
|
+
*/
|
|
239
|
+
readonly contextWindow?: ContextWindow;
|
|
240
|
+
/**
|
|
241
|
+
* Refinement settings for iterative translation improvement. When enabled,
|
|
242
|
+
* the translation will be evaluated and refined until the quality threshold
|
|
243
|
+
* is met or the maximum number of iterations is reached.
|
|
244
|
+
*
|
|
245
|
+
* - `true`: Enable refinement with default settings.
|
|
246
|
+
* - `RefinementOptions`: Enable with custom settings.
|
|
247
|
+
* - `undefined` or `false`: Disabled.
|
|
248
|
+
*/
|
|
249
|
+
readonly refinement?: boolean | RefinementOptions;
|
|
250
|
+
/**
|
|
251
|
+
* A custom chunker function for splitting long texts. If not provided,
|
|
252
|
+
* a default chunker is selected based on `mediaType`:
|
|
253
|
+
*
|
|
254
|
+
* - `"text/markdown"`: Markdown-aware chunker
|
|
255
|
+
* - `"text/plain"` or `"text/html"`: Paragraph-based chunker
|
|
256
|
+
*
|
|
257
|
+
* Set to `null` to disable chunking entirely.
|
|
258
|
+
*/
|
|
259
|
+
readonly chunker?: Chunker | null;
|
|
260
|
+
/**
|
|
261
|
+
* Best-of-N selection settings. When multiple models are provided and this
|
|
262
|
+
* is enabled, each model generates a translation and the best one is selected
|
|
263
|
+
* based on evaluation scores.
|
|
264
|
+
*
|
|
265
|
+
* - `true`: Enable best-of-N selection with default settings.
|
|
266
|
+
* - `BestOfNOptions`: Enable with custom settings.
|
|
267
|
+
* - `undefined` or `false`: Disabled (only first model is used).
|
|
268
|
+
*/
|
|
269
|
+
readonly bestOfN?: boolean | BestOfNOptions;
|
|
270
|
+
/**
|
|
271
|
+
* Dynamic glossary accumulation settings. When enabled, key terminology
|
|
272
|
+
* pairs are extracted from each translated chunk and accumulated for use
|
|
273
|
+
* in subsequent chunks, improving terminology consistency.
|
|
274
|
+
*
|
|
275
|
+
* - `true`: Enable dynamic glossary with default settings.
|
|
276
|
+
* - `DynamicGlossaryOptions`: Enable with custom settings.
|
|
277
|
+
* - `undefined` or `false`: Disabled.
|
|
278
|
+
*/
|
|
279
|
+
readonly dynamicGlossary?: boolean | DynamicGlossaryOptions;
|
|
280
|
+
}
|
|
281
|
+
//#endregion
|
|
282
|
+
export { BestOfNOptions, ChunkingProgress, DynamicGlossaryOptions, GatheringContextProgress, type MediaType$1 as MediaType, PromptingProgress, RefinementOptions, RefiningProgress, SelectingProgress, TranslateOptions, TranslatingProgress, Translation, TranslationProgress, type TranslationTone$1 as TranslationTone };
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
import { Chunker, ContextSource, ContextWindow, Glossary, MediaType, MediaType as MediaType$1, TranslationTone, TranslationTone as TranslationTone$1 } from "@vertana/core";
|
|
2
|
+
import { LanguageModel } from "ai";
|
|
3
|
+
|
|
4
|
+
//#region src/types.d.ts
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Base progress information shared by all stages.
|
|
8
|
+
*/
|
|
9
|
+
interface BaseProgress {
|
|
10
|
+
/**
|
|
11
|
+
* The progress percentage (0 to 1) of the current stage.
|
|
12
|
+
*/
|
|
13
|
+
readonly progress: number;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Progress information for the chunking stage.
|
|
17
|
+
*/
|
|
18
|
+
interface ChunkingProgress extends BaseProgress {
|
|
19
|
+
readonly stage: "chunking";
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Progress information for the prompting stage.
|
|
23
|
+
*/
|
|
24
|
+
interface PromptingProgress extends BaseProgress {
|
|
25
|
+
readonly stage: "prompting";
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Progress information for the context gathering stage.
|
|
29
|
+
*/
|
|
30
|
+
interface GatheringContextProgress extends BaseProgress {
|
|
31
|
+
readonly stage: "gatheringContext";
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Progress information for the translating stage.
|
|
35
|
+
*/
|
|
36
|
+
interface TranslatingProgress extends BaseProgress {
|
|
37
|
+
readonly stage: "translating";
|
|
38
|
+
/**
|
|
39
|
+
* When chunking is used, indicates the current chunk index (0-based).
|
|
40
|
+
*/
|
|
41
|
+
readonly chunkIndex?: number;
|
|
42
|
+
/**
|
|
43
|
+
* When chunking is used, indicates total number of chunks.
|
|
44
|
+
*/
|
|
45
|
+
readonly totalChunks?: number;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Progress information for the refining stage.
|
|
49
|
+
*/
|
|
50
|
+
interface RefiningProgress extends BaseProgress {
|
|
51
|
+
readonly stage: "refining";
|
|
52
|
+
/**
|
|
53
|
+
* The current refinement iteration (1-based).
|
|
54
|
+
*/
|
|
55
|
+
readonly iteration?: number;
|
|
56
|
+
/**
|
|
57
|
+
* The maximum number of refinement iterations.
|
|
58
|
+
*/
|
|
59
|
+
readonly maxIterations?: number;
|
|
60
|
+
/**
|
|
61
|
+
* When refining chunks, indicates the current chunk index (0-based).
|
|
62
|
+
*/
|
|
63
|
+
readonly chunkIndex?: number;
|
|
64
|
+
/**
|
|
65
|
+
* When refining chunks, indicates total number of chunks.
|
|
66
|
+
*/
|
|
67
|
+
readonly totalChunks?: number;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Progress information for the best-of-N selection stage.
|
|
71
|
+
*/
|
|
72
|
+
interface SelectingProgress extends BaseProgress {
|
|
73
|
+
readonly stage: "selecting";
|
|
74
|
+
/**
|
|
75
|
+
* The current candidate being evaluated (0-based).
|
|
76
|
+
*/
|
|
77
|
+
readonly candidateIndex?: number;
|
|
78
|
+
/**
|
|
79
|
+
* The total number of candidates.
|
|
80
|
+
*/
|
|
81
|
+
readonly totalCandidates?: number;
|
|
82
|
+
}
|
|
83
|
+
/**
|
|
84
|
+
* Progress information for the translation process.
|
|
85
|
+
*/
|
|
86
|
+
type TranslationProgress = ChunkingProgress | PromptingProgress | GatheringContextProgress | TranslatingProgress | RefiningProgress | SelectingProgress;
|
|
87
|
+
/**
|
|
88
|
+
* The result of a translation operation.
|
|
89
|
+
*/
|
|
90
|
+
interface Translation {
|
|
91
|
+
/**
|
|
92
|
+
* The translated text.
|
|
93
|
+
*/
|
|
94
|
+
readonly text: string;
|
|
95
|
+
/**
|
|
96
|
+
* An optional title for the translated text, if provided.
|
|
97
|
+
*/
|
|
98
|
+
readonly title?: string;
|
|
99
|
+
/**
|
|
100
|
+
* The total number of tokens used during the translation process.
|
|
101
|
+
*/
|
|
102
|
+
readonly tokenUsed: number;
|
|
103
|
+
/**
|
|
104
|
+
* The time taken to process the translation, in milliseconds.
|
|
105
|
+
*/
|
|
106
|
+
readonly processingTime: number;
|
|
107
|
+
/**
|
|
108
|
+
* The final quality score after refinement (0-1).
|
|
109
|
+
* Only present when refinement is enabled.
|
|
110
|
+
*/
|
|
111
|
+
readonly qualityScore?: number;
|
|
112
|
+
/**
|
|
113
|
+
* The number of refinement iterations performed.
|
|
114
|
+
* Only present when refinement is enabled.
|
|
115
|
+
*/
|
|
116
|
+
readonly refinementIterations?: number;
|
|
117
|
+
/**
|
|
118
|
+
* The model that produced the best translation.
|
|
119
|
+
* Only present when best-of-N selection is used with multiple models.
|
|
120
|
+
*/
|
|
121
|
+
readonly selectedModel?: LanguageModel;
|
|
122
|
+
/**
|
|
123
|
+
* The accumulated glossary from dynamic term extraction.
|
|
124
|
+
* Only present when dynamic glossary is enabled.
|
|
125
|
+
*/
|
|
126
|
+
readonly accumulatedGlossary?: Glossary;
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Options for iterative translation refinement.
|
|
130
|
+
*/
|
|
131
|
+
interface RefinementOptions {
|
|
132
|
+
/**
|
|
133
|
+
* Maximum number of refinement iterations.
|
|
134
|
+
*
|
|
135
|
+
* @default `3`
|
|
136
|
+
*/
|
|
137
|
+
readonly maxIterations?: number;
|
|
138
|
+
/**
|
|
139
|
+
* Quality threshold (0-1). If the evaluation score exceeds this threshold,
|
|
140
|
+
* refinement stops early.
|
|
141
|
+
*
|
|
142
|
+
* @default `0.9`
|
|
143
|
+
*/
|
|
144
|
+
readonly qualityThreshold?: number;
|
|
145
|
+
}
|
|
146
|
+
/**
|
|
147
|
+
* Options for best-of-N selection when multiple models are provided.
|
|
148
|
+
*/
|
|
149
|
+
interface BestOfNOptions {
|
|
150
|
+
/**
|
|
151
|
+
* The model to use for evaluating and selecting the best translation.
|
|
152
|
+
* If not specified, the first model in the array is used.
|
|
153
|
+
*/
|
|
154
|
+
readonly evaluatorModel?: LanguageModel;
|
|
155
|
+
}
|
|
156
|
+
/**
|
|
157
|
+
* Options for dynamic glossary accumulation during translation.
|
|
158
|
+
*/
|
|
159
|
+
interface DynamicGlossaryOptions {
|
|
160
|
+
/**
|
|
161
|
+
* Maximum number of terms to extract from each chunk.
|
|
162
|
+
*
|
|
163
|
+
* @default `10`
|
|
164
|
+
*/
|
|
165
|
+
readonly maxTermsPerChunk?: number;
|
|
166
|
+
/**
|
|
167
|
+
* The model to use for extracting terms.
|
|
168
|
+
* If not specified, the primary translation model is used.
|
|
169
|
+
*/
|
|
170
|
+
readonly extractorModel?: LanguageModel;
|
|
171
|
+
}
|
|
172
|
+
/**
|
|
173
|
+
* Options for the translate function.
|
|
174
|
+
*/
|
|
175
|
+
interface TranslateOptions {
|
|
176
|
+
/**
|
|
177
|
+
* The source language of the input text. If not provided, the language will
|
|
178
|
+
* be auto-detected.
|
|
179
|
+
*
|
|
180
|
+
* If a string is provided, it should be a valid BCP 47 language tag.
|
|
181
|
+
*/
|
|
182
|
+
readonly sourceLanguage?: Intl.Locale | string;
|
|
183
|
+
/**
|
|
184
|
+
* An optional title for the input text. It's also translated if provided.
|
|
185
|
+
*/
|
|
186
|
+
readonly title?: string;
|
|
187
|
+
/**
|
|
188
|
+
* Additional context or background information about the input text. This
|
|
189
|
+
* can help improve translation accuracy.
|
|
190
|
+
*/
|
|
191
|
+
readonly context?: string;
|
|
192
|
+
/**
|
|
193
|
+
* The desired tone for the translated text. This helps tailor the style
|
|
194
|
+
* and formality of the output.
|
|
195
|
+
*/
|
|
196
|
+
readonly tone?: TranslationTone;
|
|
197
|
+
/**
|
|
198
|
+
* The domain or context of the input text, e.g., `"medical"`, `"legal"`,
|
|
199
|
+
* `"technical"`, etc. This helps the model produce more accurate
|
|
200
|
+
* translations by tailoring the output to the specific field.
|
|
201
|
+
*/
|
|
202
|
+
readonly domain?: string;
|
|
203
|
+
/**
|
|
204
|
+
* The media type of the input text. This hints at the formatting and
|
|
205
|
+
* structure of the content so that the model can maintain it in
|
|
206
|
+
* the translation.
|
|
207
|
+
*
|
|
208
|
+
* @default `"text/plain"`
|
|
209
|
+
*/
|
|
210
|
+
readonly mediaType?: MediaType;
|
|
211
|
+
/**
|
|
212
|
+
* An optional callback function that is invoked to report progress
|
|
213
|
+
* during the translation process.
|
|
214
|
+
*
|
|
215
|
+
* @param progress The current progress information.
|
|
216
|
+
*/
|
|
217
|
+
readonly onProgress?: (progress: TranslationProgress) => void;
|
|
218
|
+
/**
|
|
219
|
+
* An optional `AbortSignal` to cancel the translation request.
|
|
220
|
+
*/
|
|
221
|
+
readonly signal?: AbortSignal;
|
|
222
|
+
/**
|
|
223
|
+
* Context sources to gather additional information for translation.
|
|
224
|
+
* These can be either required (always invoked) or passive (invoked by
|
|
225
|
+
* the LLM agent on demand).
|
|
226
|
+
*/
|
|
227
|
+
readonly contextSources?: readonly ContextSource[];
|
|
228
|
+
/**
|
|
229
|
+
* A glossary of terms for consistent translation. Terms in the glossary
|
|
230
|
+
* will be translated consistently throughout the document.
|
|
231
|
+
*/
|
|
232
|
+
readonly glossary?: Glossary;
|
|
233
|
+
/**
|
|
234
|
+
* Context window management strategy. This controls how the translation
|
|
235
|
+
* handles long documents that may exceed the model's context window.
|
|
236
|
+
*
|
|
237
|
+
* @default `{ type: "explicit", maxTokens: 8192 }`
|
|
238
|
+
*/
|
|
239
|
+
readonly contextWindow?: ContextWindow;
|
|
240
|
+
/**
|
|
241
|
+
* Refinement settings for iterative translation improvement. When enabled,
|
|
242
|
+
* the translation will be evaluated and refined until the quality threshold
|
|
243
|
+
* is met or the maximum number of iterations is reached.
|
|
244
|
+
*
|
|
245
|
+
* - `true`: Enable refinement with default settings.
|
|
246
|
+
* - `RefinementOptions`: Enable with custom settings.
|
|
247
|
+
* - `undefined` or `false`: Disabled.
|
|
248
|
+
*/
|
|
249
|
+
readonly refinement?: boolean | RefinementOptions;
|
|
250
|
+
/**
|
|
251
|
+
* A custom chunker function for splitting long texts. If not provided,
|
|
252
|
+
* a default chunker is selected based on `mediaType`:
|
|
253
|
+
*
|
|
254
|
+
* - `"text/markdown"`: Markdown-aware chunker
|
|
255
|
+
* - `"text/plain"` or `"text/html"`: Paragraph-based chunker
|
|
256
|
+
*
|
|
257
|
+
* Set to `null` to disable chunking entirely.
|
|
258
|
+
*/
|
|
259
|
+
readonly chunker?: Chunker | null;
|
|
260
|
+
/**
|
|
261
|
+
* Best-of-N selection settings. When multiple models are provided and this
|
|
262
|
+
* is enabled, each model generates a translation and the best one is selected
|
|
263
|
+
* based on evaluation scores.
|
|
264
|
+
*
|
|
265
|
+
* - `true`: Enable best-of-N selection with default settings.
|
|
266
|
+
* - `BestOfNOptions`: Enable with custom settings.
|
|
267
|
+
* - `undefined` or `false`: Disabled (only first model is used).
|
|
268
|
+
*/
|
|
269
|
+
readonly bestOfN?: boolean | BestOfNOptions;
|
|
270
|
+
/**
|
|
271
|
+
* Dynamic glossary accumulation settings. When enabled, key terminology
|
|
272
|
+
* pairs are extracted from each translated chunk and accumulated for use
|
|
273
|
+
* in subsequent chunks, improving terminology consistency.
|
|
274
|
+
*
|
|
275
|
+
* - `true`: Enable dynamic glossary with default settings.
|
|
276
|
+
* - `DynamicGlossaryOptions`: Enable with custom settings.
|
|
277
|
+
* - `undefined` or `false`: Disabled.
|
|
278
|
+
*/
|
|
279
|
+
readonly dynamicGlossary?: boolean | DynamicGlossaryOptions;
|
|
280
|
+
}
|
|
281
|
+
//#endregion
|
|
282
|
+
export { BestOfNOptions, ChunkingProgress, DynamicGlossaryOptions, GatheringContextProgress, type MediaType$1 as MediaType, PromptingProgress, RefinementOptions, RefiningProgress, SelectingProgress, TranslateOptions, TranslatingProgress, Translation, TranslationProgress, type TranslationTone$1 as TranslationTone };
|
package/package.json
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@vertana/facade",
|
|
3
|
+
"version": "0.1.0-dev.1",
|
|
4
|
+
"description": "An LLM-powered natural language translation facade library",
|
|
5
|
+
"keywords": [
|
|
6
|
+
"LLM",
|
|
7
|
+
"translation",
|
|
8
|
+
"natural language processing",
|
|
9
|
+
"NLP",
|
|
10
|
+
"language model"
|
|
11
|
+
],
|
|
12
|
+
"license": "MIT",
|
|
13
|
+
"author": {
|
|
14
|
+
"name": "Hong Minhee",
|
|
15
|
+
"email": "hong@minhee.org",
|
|
16
|
+
"url": "https://hongminhee.org/"
|
|
17
|
+
},
|
|
18
|
+
"homepage": "https://vertana.org/",
|
|
19
|
+
"repository": {
|
|
20
|
+
"type": "git",
|
|
21
|
+
"url": "git+https://github.com/dahlia/vertana.git",
|
|
22
|
+
"directory": "packages/facade"
|
|
23
|
+
},
|
|
24
|
+
"bugs": {
|
|
25
|
+
"url": "https://github.com/dahlia/vertana/issues"
|
|
26
|
+
},
|
|
27
|
+
"funding": [
|
|
28
|
+
"https://github.com/sponsors/dahlia"
|
|
29
|
+
],
|
|
30
|
+
"engines": {
|
|
31
|
+
"node": ">=20.0.0",
|
|
32
|
+
"bun": ">=1.2.0",
|
|
33
|
+
"deno": ">=2.3.0"
|
|
34
|
+
},
|
|
35
|
+
"files": [
|
|
36
|
+
"dist/",
|
|
37
|
+
"package.json",
|
|
38
|
+
"README.md"
|
|
39
|
+
],
|
|
40
|
+
"type": "module",
|
|
41
|
+
"module": "./dist/index.js",
|
|
42
|
+
"main": "./dist/index.cjs",
|
|
43
|
+
"types": "./dist/index.d.ts",
|
|
44
|
+
"exports": {
|
|
45
|
+
".": {
|
|
46
|
+
"types": {
|
|
47
|
+
"require": "./dist/index.d.cts",
|
|
48
|
+
"import": "./dist/index.d.ts"
|
|
49
|
+
},
|
|
50
|
+
"require": "./dist/index.cjs",
|
|
51
|
+
"import": "./dist/index.js"
|
|
52
|
+
}
|
|
53
|
+
},
|
|
54
|
+
"sideEffects": false,
|
|
55
|
+
"dependencies": {
|
|
56
|
+
"@logtape/logtape": "^1.3.5",
|
|
57
|
+
"@vertana/core": ""
|
|
58
|
+
},
|
|
59
|
+
"peerDependencies": {
|
|
60
|
+
"ai": "6.0.3"
|
|
61
|
+
},
|
|
62
|
+
"devDependencies": {
|
|
63
|
+
"@ai-sdk/anthropic": "3.0.1",
|
|
64
|
+
"@ai-sdk/google": "3.0.1",
|
|
65
|
+
"@ai-sdk/openai": "3.0.1",
|
|
66
|
+
"@types/node": "^20.19.9",
|
|
67
|
+
"@valibot/to-json-schema": "^1.5.0",
|
|
68
|
+
"ai": "6.0.3",
|
|
69
|
+
"tsdown": "^0.18.3",
|
|
70
|
+
"typescript": "^5.9.3",
|
|
71
|
+
"valibot": "1.2.0",
|
|
72
|
+
"zod": "4.2.1"
|
|
73
|
+
},
|
|
74
|
+
"scripts": {
|
|
75
|
+
"build": "tsdown",
|
|
76
|
+
"prepublish": "tsdown",
|
|
77
|
+
"test": "tsdown && node --experimental-transform-types --test --test-concurrency=4",
|
|
78
|
+
"test:bun": "tsdown && bun test",
|
|
79
|
+
"test:deno": "deno test --allow-env --allow-net",
|
|
80
|
+
"test-all": "tsdown && node --experimental-transform-types --test && bun test && deno test"
|
|
81
|
+
}
|
|
82
|
+
}
|