@llumiverse/core 0.21.0 → 0.22.0-dev.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/cjs/CompletionStream.js +124 -36
- package/lib/cjs/CompletionStream.js.map +1 -1
- package/lib/cjs/Driver.js +4 -1
- package/lib/cjs/Driver.js.map +1 -1
- package/lib/cjs/async.js.map +1 -1
- package/lib/cjs/stream.js +16 -10
- package/lib/cjs/stream.js.map +1 -1
- package/lib/cjs/validation.js +15 -7
- package/lib/cjs/validation.js.map +1 -1
- package/lib/esm/CompletionStream.js +124 -36
- package/lib/esm/CompletionStream.js.map +1 -1
- package/lib/esm/Driver.js +4 -1
- package/lib/esm/Driver.js.map +1 -1
- package/lib/esm/async.js.map +1 -1
- package/lib/esm/stream.js +16 -10
- package/lib/esm/stream.js.map +1 -1
- package/lib/esm/validation.js +15 -7
- package/lib/esm/validation.js.map +1 -1
- package/lib/tsconfig.tsbuildinfo +1 -1
- package/lib/types/CompletionStream.d.ts +2 -2
- package/lib/types/CompletionStream.d.ts.map +1 -1
- package/lib/types/Driver.d.ts +3 -3
- package/lib/types/Driver.d.ts.map +1 -1
- package/lib/types/async.d.ts +2 -2
- package/lib/types/async.d.ts.map +1 -1
- package/lib/types/stream.d.ts.map +1 -1
- package/lib/types/validation.d.ts +2 -2
- package/lib/types/validation.d.ts.map +1 -1
- package/package.json +8 -8
- package/src/CompletionStream.ts +121 -39
- package/src/Driver.ts +7 -5
- package/src/async.ts +4 -4
- package/src/stream.ts +19 -11
- package/src/validation.ts +15 -10
- package/lib/cjs/capability/bedrock.js +0 -186
- package/lib/cjs/capability/bedrock.js.map +0 -1
- package/lib/cjs/capability/openai.js +0 -122
- package/lib/cjs/capability/openai.js.map +0 -1
- package/lib/cjs/capability/vertexai.js +0 -90
- package/lib/cjs/capability/vertexai.js.map +0 -1
- package/lib/cjs/capability.js +0 -52
- package/lib/cjs/capability.js.map +0 -1
- package/lib/cjs/formatters/openai.js +0 -113
- package/lib/cjs/formatters/openai.js.map +0 -1
- package/lib/cjs/options/bedrock.js +0 -343
- package/lib/cjs/options/bedrock.js.map +0 -1
- package/lib/cjs/options/groq.js +0 -37
- package/lib/cjs/options/groq.js.map +0 -1
- package/lib/cjs/options/openai.js +0 -123
- package/lib/cjs/options/openai.js.map +0 -1
- package/lib/cjs/options/vertexai.js +0 -257
- package/lib/cjs/options/vertexai.js.map +0 -1
- package/lib/cjs/options.js +0 -19
- package/lib/cjs/options.js.map +0 -1
- package/lib/cjs/types.js +0 -80
- package/lib/cjs/types.js.map +0 -1
- package/lib/esm/capability/bedrock.js +0 -183
- package/lib/esm/capability/bedrock.js.map +0 -1
- package/lib/esm/capability/openai.js +0 -119
- package/lib/esm/capability/openai.js.map +0 -1
- package/lib/esm/capability/vertexai.js +0 -87
- package/lib/esm/capability/vertexai.js.map +0 -1
- package/lib/esm/capability.js +0 -47
- package/lib/esm/capability.js.map +0 -1
- package/lib/esm/formatters/openai.js +0 -109
- package/lib/esm/formatters/openai.js.map +0 -1
- package/lib/esm/options/bedrock.js +0 -340
- package/lib/esm/options/bedrock.js.map +0 -1
- package/lib/esm/options/groq.js +0 -34
- package/lib/esm/options/groq.js.map +0 -1
- package/lib/esm/options/openai.js +0 -120
- package/lib/esm/options/openai.js.map +0 -1
- package/lib/esm/options/vertexai.js +0 -253
- package/lib/esm/options/vertexai.js.map +0 -1
- package/lib/esm/options.js +0 -16
- package/lib/esm/options.js.map +0 -1
- package/lib/esm/types.js +0 -77
- package/lib/esm/types.js.map +0 -1
- package/lib/types/capability/bedrock.d.ts +0 -6
- package/lib/types/capability/bedrock.d.ts.map +0 -1
- package/lib/types/capability/openai.d.ts +0 -10
- package/lib/types/capability/openai.d.ts.map +0 -1
- package/lib/types/capability/vertexai.d.ts +0 -10
- package/lib/types/capability/vertexai.d.ts.map +0 -1
- package/lib/types/capability.d.ts +0 -4
- package/lib/types/capability.d.ts.map +0 -1
- package/lib/types/formatters/openai.d.ts +0 -41
- package/lib/types/formatters/openai.d.ts.map +0 -1
- package/lib/types/options/bedrock.d.ts +0 -31
- package/lib/types/options/bedrock.d.ts.map +0 -1
- package/lib/types/options/groq.d.ts +0 -11
- package/lib/types/options/groq.d.ts.map +0 -1
- package/lib/types/options/openai.d.ts +0 -20
- package/lib/types/options/openai.d.ts.map +0 -1
- package/lib/types/options/vertexai.d.ts +0 -51
- package/lib/types/options/vertexai.d.ts.map +0 -1
- package/lib/types/options.d.ts +0 -2
- package/lib/types/options.d.ts.map +0 -1
- package/lib/types/types.d.ts +0 -322
- package/lib/types/types.d.ts.map +0 -1
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import { ResultValidationError } from "@llumiverse/common";
|
|
1
|
+
import { CompletionResult, ResultValidationError } from "@llumiverse/common";
|
|
2
2
|
export declare class ValidationError extends Error implements ResultValidationError {
|
|
3
3
|
code: 'validation_error' | 'json_error';
|
|
4
4
|
constructor(code: 'validation_error' | 'json_error', message: string);
|
|
5
5
|
}
|
|
6
|
-
export declare function validateResult(data:
|
|
6
|
+
export declare function validateResult(data: CompletionResult[], schema: Object): CompletionResult[];
|
|
7
7
|
//# sourceMappingURL=validation.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"validation.d.ts","sourceRoot":"","sources":["../../src/validation.ts"],"names":[],"mappings":"AAIA,OAAO,EAAE,qBAAqB,EAAE,MAAM,oBAAoB,CAAC;
|
|
1
|
+
{"version":3,"file":"validation.d.ts","sourceRoot":"","sources":["../../src/validation.ts"],"names":[],"mappings":"AAIA,OAAO,EAAE,gBAAgB,EAA4B,qBAAqB,EAAE,MAAM,oBAAoB,CAAC;AAgBvG,qBAAa,eAAgB,SAAQ,KAAM,YAAW,qBAAqB;IAE5D,IAAI,EAAE,kBAAkB,GAAG,YAAY;gBAAvC,IAAI,EAAE,kBAAkB,GAAG,YAAY,EAC9C,OAAO,EAAE,MAAM;CAKtB;AAED,wBAAgB,cAAc,CAAC,IAAI,EAAE,gBAAgB,EAAE,EAAE,MAAM,EAAE,MAAM,GAAG,gBAAgB,EAAE,CAiD3F"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@llumiverse/core",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.22.0-dev.1",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"description": "Provide an universal API to LLMs. Support for existing LLMs can be added by writing a driver.",
|
|
6
6
|
"files": [
|
|
@@ -63,17 +63,17 @@
|
|
|
63
63
|
"url": "git+ssh://git@github.com/vertesia/llumiverse.git"
|
|
64
64
|
},
|
|
65
65
|
"devDependencies": {
|
|
66
|
-
"@vertesia/api-fetch-client": "^0.
|
|
67
|
-
"rimraf": "^6.0.
|
|
66
|
+
"@vertesia/api-fetch-client": "^0.78.0",
|
|
67
|
+
"rimraf": "^6.0.1",
|
|
68
68
|
"ts-dual-module": "^0.6.3",
|
|
69
|
-
"typescript": "^5.
|
|
70
|
-
"vitest": "^3.
|
|
69
|
+
"typescript": "^5.9.2",
|
|
70
|
+
"vitest": "^3.2.4"
|
|
71
71
|
},
|
|
72
72
|
"dependencies": {
|
|
73
|
-
"@types/node": "^22.
|
|
74
|
-
"ajv": "^8.
|
|
73
|
+
"@types/node": "^22.18.6",
|
|
74
|
+
"ajv": "^8.17.1",
|
|
75
75
|
"ajv-formats": "^3.0.1",
|
|
76
|
-
"@llumiverse/common": "0.
|
|
76
|
+
"@llumiverse/common": "0.22.0-dev.1"
|
|
77
77
|
},
|
|
78
78
|
"ts_dual_module": {
|
|
79
79
|
"outDir": "lib",
|
package/src/CompletionStream.ts
CHANGED
|
@@ -3,74 +3,138 @@ import { AbstractDriver } from "./Driver.js";
|
|
|
3
3
|
|
|
4
4
|
export class DefaultCompletionStream<PromptT = any> implements CompletionStream<PromptT> {
|
|
5
5
|
|
|
6
|
-
chunks:
|
|
6
|
+
chunks: number; // Counter for number of chunks instead of storing strings
|
|
7
7
|
completion: ExecutionResponse<PromptT> | undefined;
|
|
8
8
|
|
|
9
9
|
constructor(public driver: AbstractDriver<DriverOptions, PromptT>,
|
|
10
10
|
public prompt: PromptT,
|
|
11
11
|
public options: ExecutionOptions) {
|
|
12
|
-
this.chunks =
|
|
12
|
+
this.chunks = 0;
|
|
13
13
|
}
|
|
14
14
|
|
|
15
15
|
async *[Symbol.asyncIterator]() {
|
|
16
16
|
// reset state
|
|
17
17
|
this.completion = undefined;
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
}
|
|
21
|
-
const chunks = this.chunks;
|
|
18
|
+
this.chunks = 0;
|
|
19
|
+
const accumulatedResults: any[] = []; // Accumulate CompletionResult[] from chunks
|
|
22
20
|
|
|
23
21
|
this.driver.logger.debug(
|
|
24
22
|
`[${this.driver.provider}] Streaming Execution of ${this.options.model} with prompt`,
|
|
25
23
|
);
|
|
26
24
|
|
|
27
25
|
const start = Date.now();
|
|
28
|
-
const stream = await this.driver.requestTextCompletionStream(this.prompt, this.options);
|
|
29
|
-
|
|
30
26
|
let finish_reason: string | undefined = undefined;
|
|
31
27
|
let promptTokens: number = 0;
|
|
32
28
|
let resultTokens: number | undefined = undefined;
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
29
|
+
|
|
30
|
+
try {
|
|
31
|
+
const stream = await this.driver.requestTextCompletionStream(this.prompt, this.options);
|
|
32
|
+
for await (const chunk of stream) {
|
|
33
|
+
if (chunk) {
|
|
34
|
+
if (typeof chunk === 'string') {
|
|
35
|
+
this.chunks++;
|
|
36
|
+
yield chunk;
|
|
37
|
+
} else {
|
|
38
|
+
if (chunk.finish_reason) { //Do not replace non-null values with null values
|
|
39
|
+
finish_reason = chunk.finish_reason; //Used to skip empty finish_reason chunks coming after "stop" or "length"
|
|
40
|
+
}
|
|
41
|
+
if (chunk.token_usage) {
|
|
42
|
+
//Tokens returned include prior parts of stream,
|
|
43
|
+
//so overwrite rather than accumulate
|
|
44
|
+
//Math.max used as some models report final token count at beginning of stream
|
|
45
|
+
promptTokens = Math.max(promptTokens, chunk.token_usage.prompt ?? 0);
|
|
46
|
+
resultTokens = Math.max(resultTokens ?? 0, chunk.token_usage.result ?? 0);
|
|
47
|
+
}
|
|
48
|
+
if (Array.isArray(chunk.result) && chunk.result.length > 0) {
|
|
49
|
+
// Process each result in the chunk, combining consecutive text/JSON
|
|
50
|
+
for (const result of chunk.result) {
|
|
51
|
+
// Check if we can combine with the last accumulated result
|
|
52
|
+
const lastResult = accumulatedResults[accumulatedResults.length - 1];
|
|
53
|
+
|
|
54
|
+
if (lastResult &&
|
|
55
|
+
((lastResult.type === 'text' && result.type === 'text') ||
|
|
56
|
+
(lastResult.type === 'json' && result.type === 'json'))) {
|
|
57
|
+
// Combine consecutive text or JSON results
|
|
58
|
+
if (result.type === 'text') {
|
|
59
|
+
lastResult.value += result.value;
|
|
60
|
+
} else if (result.type === 'json') {
|
|
61
|
+
// For JSON, combine the parsed objects directly
|
|
62
|
+
try {
|
|
63
|
+
const lastParsed = lastResult.value;
|
|
64
|
+
const currentParsed = result.value;
|
|
65
|
+
if (lastParsed !== null && typeof lastParsed === 'object' &&
|
|
66
|
+
currentParsed !== null && typeof currentParsed === 'object') {
|
|
67
|
+
const combined = { ...lastParsed, ...currentParsed };
|
|
68
|
+
lastResult.value = combined;
|
|
69
|
+
} else {
|
|
70
|
+
// If not objects, convert to string and concatenate
|
|
71
|
+
const lastStr = typeof lastParsed === 'string' ? lastParsed : JSON.stringify(lastParsed);
|
|
72
|
+
const currentStr = typeof currentParsed === 'string' ? currentParsed : JSON.stringify(currentParsed);
|
|
73
|
+
lastResult.value = lastStr + currentStr;
|
|
74
|
+
}
|
|
75
|
+
} catch {
|
|
76
|
+
// If anything fails, just concatenate string representations
|
|
77
|
+
lastResult.value = String(lastResult.value) + String(result.value);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
} else {
|
|
81
|
+
// Add as new result
|
|
82
|
+
accumulatedResults.push(result);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// Convert CompletionResult[] to string for streaming
|
|
87
|
+
// Only yield if we have results to show
|
|
88
|
+
const resultText = chunk.result.map(r => {
|
|
89
|
+
switch (r.type) {
|
|
90
|
+
case 'text':
|
|
91
|
+
return r.value;
|
|
92
|
+
case 'json':
|
|
93
|
+
return JSON.stringify(r.value);
|
|
94
|
+
case 'image':
|
|
95
|
+
// Show truncated image placeholder for streaming
|
|
96
|
+
const truncatedValue = typeof r.value === 'string' ? r.value.slice(0, 10) : String(r.value).slice(0, 10);
|
|
97
|
+
return `\n[Image: ${truncatedValue}...]\n`;
|
|
98
|
+
default:
|
|
99
|
+
return String((r as any).value || '');
|
|
100
|
+
}
|
|
101
|
+
}).join('');
|
|
102
|
+
|
|
103
|
+
if (resultText) {
|
|
104
|
+
this.chunks++;
|
|
105
|
+
yield resultText;
|
|
106
|
+
}
|
|
107
|
+
}
|
|
52
108
|
}
|
|
53
109
|
}
|
|
54
110
|
}
|
|
111
|
+
} catch (error: any) {
|
|
112
|
+
error.prompt = this.prompt;
|
|
113
|
+
throw error;
|
|
55
114
|
}
|
|
56
115
|
|
|
57
|
-
const content = chunks.join('');
|
|
58
|
-
|
|
59
116
|
// Return undefined for the ExecutionTokenUsage object if there is nothing to fill it with.
|
|
60
|
-
// Allows for checking for
|
|
61
|
-
|
|
117
|
+
// Allows for checking for truthy-ness on token_usage, rather than it's internals. For testing and downstream usage.
|
|
118
|
+
const tokens: ExecutionTokenUsage | undefined = resultTokens ?
|
|
62
119
|
{ prompt: promptTokens, result: resultTokens, total: resultTokens + promptTokens, } : undefined
|
|
63
120
|
|
|
64
121
|
this.completion = {
|
|
65
|
-
result:
|
|
122
|
+
result: accumulatedResults, // Return the accumulated CompletionResult[] instead of text
|
|
66
123
|
prompt: this.prompt,
|
|
67
124
|
execution_time: Date.now() - start,
|
|
68
125
|
token_usage: tokens,
|
|
69
126
|
finish_reason: finish_reason,
|
|
70
|
-
chunks: chunks
|
|
127
|
+
chunks: this.chunks,
|
|
71
128
|
}
|
|
72
129
|
|
|
73
|
-
|
|
130
|
+
try {
|
|
131
|
+
if (this.completion) {
|
|
132
|
+
this.driver.validateResult(this.completion, this.options);
|
|
133
|
+
}
|
|
134
|
+
} catch (error: any) {
|
|
135
|
+
error.prompt = this.prompt;
|
|
136
|
+
throw error;
|
|
137
|
+
}
|
|
74
138
|
}
|
|
75
139
|
|
|
76
140
|
}
|
|
@@ -90,10 +154,28 @@ export class FallbackCompletionStream<PromptT = any> implements CompletionStream
|
|
|
90
154
|
this.driver.logger.debug(
|
|
91
155
|
`[${this.driver.provider}] Streaming is not supported, falling back to blocking execution`
|
|
92
156
|
);
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
157
|
+
try {
|
|
158
|
+
const completion = await this.driver._execute(this.prompt, this.options);
|
|
159
|
+
// For fallback streaming, yield the text content but keep the original completion
|
|
160
|
+
const content = completion.result.map(r => {
|
|
161
|
+
switch (r.type) {
|
|
162
|
+
case 'text':
|
|
163
|
+
return r.value;
|
|
164
|
+
case 'json':
|
|
165
|
+
return JSON.stringify(r.value);
|
|
166
|
+
case 'image':
|
|
167
|
+
// Show truncated image placeholder for streaming
|
|
168
|
+
const truncatedValue = typeof r.value === 'string' ? r.value.slice(0, 10) : String(r.value).slice(0, 10);
|
|
169
|
+
return `[Image: ${truncatedValue}...]`;
|
|
170
|
+
default:
|
|
171
|
+
return String((r as any).value || '');
|
|
172
|
+
}
|
|
173
|
+
}).join('');
|
|
174
|
+
yield content;
|
|
175
|
+
this.completion = completion; // Return the original completion with untouched CompletionResult[]
|
|
176
|
+
} catch (error: any) {
|
|
177
|
+
error.prompt = this.prompt;
|
|
178
|
+
throw error;
|
|
179
|
+
}
|
|
98
180
|
}
|
|
99
181
|
}
|
package/src/Driver.ts
CHANGED
|
@@ -9,7 +9,7 @@ import { formatTextPrompt } from "./formatters/index.js";
|
|
|
9
9
|
import {
|
|
10
10
|
AIModel,
|
|
11
11
|
Completion,
|
|
12
|
-
|
|
12
|
+
CompletionChunkObject,
|
|
13
13
|
CompletionStream,
|
|
14
14
|
DataSource,
|
|
15
15
|
DriverOptions,
|
|
@@ -17,7 +17,6 @@ import {
|
|
|
17
17
|
EmbeddingsResult,
|
|
18
18
|
ExecutionOptions,
|
|
19
19
|
ExecutionResponse,
|
|
20
|
-
ImageGeneration,
|
|
21
20
|
Logger,
|
|
22
21
|
Modalities,
|
|
23
22
|
ModelSearchPayload,
|
|
@@ -143,7 +142,10 @@ export abstract class AbstractDriver<OptionsT extends DriverOptions = DriverOpti
|
|
|
143
142
|
|
|
144
143
|
async execute(segments: PromptSegment[], options: ExecutionOptions): Promise<ExecutionResponse<PromptT>> {
|
|
145
144
|
const prompt = await this.createPrompt(segments, options);
|
|
146
|
-
return this._execute(prompt, options)
|
|
145
|
+
return this._execute(prompt, options).catch((error: any) => {
|
|
146
|
+
(error as any).prompt = prompt;
|
|
147
|
+
throw error;
|
|
148
|
+
});
|
|
147
149
|
}
|
|
148
150
|
|
|
149
151
|
async _execute(prompt: PromptT, options: ExecutionOptions): Promise<ExecutionResponse<PromptT>> {
|
|
@@ -222,9 +224,9 @@ export abstract class AbstractDriver<OptionsT extends DriverOptions = DriverOpti
|
|
|
222
224
|
|
|
223
225
|
abstract requestTextCompletion(prompt: PromptT, options: ExecutionOptions): Promise<Completion>;
|
|
224
226
|
|
|
225
|
-
abstract requestTextCompletionStream(prompt: PromptT, options: ExecutionOptions): Promise<AsyncIterable<
|
|
227
|
+
abstract requestTextCompletionStream(prompt: PromptT, options: ExecutionOptions): Promise<AsyncIterable<CompletionChunkObject>>;
|
|
226
228
|
|
|
227
|
-
async requestImageGeneration(_prompt: PromptT, _options: ExecutionOptions): Promise<Completion
|
|
229
|
+
async requestImageGeneration(_prompt: PromptT, _options: ExecutionOptions): Promise<Completion> {
|
|
228
230
|
throw new Error("Image generation not implemented.");
|
|
229
231
|
//Cannot be made abstract, as abstract methods are required in the derived class
|
|
230
232
|
}
|
package/src/async.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import type { ServerSentEvent } from "@vertesia/api-fetch-client"
|
|
2
|
-
import {
|
|
2
|
+
import { CompletionChunkObject } from "@llumiverse/common";
|
|
3
3
|
|
|
4
4
|
export async function* asyncMap<T, R>(asyncIterable: AsyncIterable<T>, callback: (value: T, index: number) => R) {
|
|
5
5
|
let i = 0;
|
|
@@ -18,9 +18,9 @@ export function oneAsyncIterator<T>(value: T): AsyncIterable<T> {
|
|
|
18
18
|
/**
|
|
19
19
|
* Given a ReadableStream of server sent events, tran
|
|
20
20
|
*/
|
|
21
|
-
export function transformSSEStream(stream: ReadableStream<ServerSentEvent>, transform: (data: string) =>
|
|
21
|
+
export function transformSSEStream(stream: ReadableStream<ServerSentEvent>, transform: (data: string) => CompletionChunkObject): ReadableStream<CompletionChunkObject> & AsyncIterable<CompletionChunkObject> {
|
|
22
22
|
// on node and bun the ReadableStream is an async iterable
|
|
23
|
-
return stream.pipeThrough(new TransformStream<ServerSentEvent,
|
|
23
|
+
return stream.pipeThrough(new TransformStream<ServerSentEvent, CompletionChunkObject>({
|
|
24
24
|
transform(event: ServerSentEvent, controller) {
|
|
25
25
|
if (event.type === 'event' && event.data && event.data !== '[DONE]') {
|
|
26
26
|
try {
|
|
@@ -32,7 +32,7 @@ export function transformSSEStream(stream: ReadableStream<ServerSentEvent>, tran
|
|
|
32
32
|
}
|
|
33
33
|
}
|
|
34
34
|
}
|
|
35
|
-
}))
|
|
35
|
+
})) satisfies ReadableStream<CompletionChunkObject> & AsyncIterable<CompletionChunkObject>;
|
|
36
36
|
}
|
|
37
37
|
|
|
38
38
|
export class EventStream<T, ReturnT = any> implements AsyncIterable<T> {
|
package/src/stream.ts
CHANGED
|
@@ -1,22 +1,30 @@
|
|
|
1
1
|
|
|
2
2
|
export async function readStreamAsBase64(stream: ReadableStream): Promise<string> {
|
|
3
|
-
|
|
3
|
+
const uint8Array = await readStreamAsUint8Array(stream);
|
|
4
|
+
return Buffer.from(uint8Array).toString('base64');
|
|
4
5
|
}
|
|
5
6
|
|
|
6
7
|
export async function readStreamAsString(stream: ReadableStream): Promise<string> {
|
|
7
|
-
|
|
8
|
+
const uint8Array = await readStreamAsUint8Array(stream);
|
|
9
|
+
return Buffer.from(uint8Array).toString();
|
|
8
10
|
}
|
|
9
11
|
|
|
10
12
|
export async function readStreamAsUint8Array(stream: ReadableStream): Promise<Uint8Array> {
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
}
|
|
15
|
-
|
|
16
|
-
async function _readStreamAsBuffer(stream: ReadableStream): Promise<Buffer> {
|
|
17
|
-
const out: Buffer[] = [];
|
|
13
|
+
const chunks: Uint8Array[] = [];
|
|
14
|
+
let totalLength = 0;
|
|
15
|
+
|
|
18
16
|
for await (const chunk of stream) {
|
|
19
|
-
|
|
17
|
+
const uint8Chunk = chunk instanceof Uint8Array ? chunk : new Uint8Array(chunk);
|
|
18
|
+
chunks.push(uint8Chunk);
|
|
19
|
+
totalLength += uint8Chunk.length;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
const combined = new Uint8Array(totalLength);
|
|
23
|
+
let offset = 0;
|
|
24
|
+
for (const chunk of chunks) {
|
|
25
|
+
combined.set(chunk, offset);
|
|
26
|
+
offset += chunk.length;
|
|
20
27
|
}
|
|
21
|
-
|
|
28
|
+
|
|
29
|
+
return combined;
|
|
22
30
|
}
|
package/src/validation.ts
CHANGED
|
@@ -2,7 +2,7 @@ import { Ajv } from 'ajv';
|
|
|
2
2
|
import addFormats from 'ajv-formats';
|
|
3
3
|
import { extractAndParseJSON } from "./json.js";
|
|
4
4
|
import { resolveField } from './resolver.js';
|
|
5
|
-
import { ResultValidationError } from "@llumiverse/common";
|
|
5
|
+
import { CompletionResult, completionResultToString, ResultValidationError } from "@llumiverse/common";
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
const ajv = new Ajv({
|
|
@@ -28,17 +28,22 @@ export class ValidationError extends Error implements ResultValidationError {
|
|
|
28
28
|
}
|
|
29
29
|
}
|
|
30
30
|
|
|
31
|
-
export function validateResult(data:
|
|
31
|
+
export function validateResult(data: CompletionResult[], schema: Object): CompletionResult[] {
|
|
32
32
|
let json;
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
json =
|
|
37
|
-
}
|
|
38
|
-
|
|
33
|
+
if (Array.isArray(data)) {
|
|
34
|
+
const jsonResults = data.filter(r => r.type === "json");
|
|
35
|
+
if (jsonResults.length > 0) {
|
|
36
|
+
json = jsonResults[0].value;
|
|
37
|
+
} else {
|
|
38
|
+
const stringResult = data.map(completionResultToString).join("");
|
|
39
|
+
try {
|
|
40
|
+
json = extractAndParseJSON(stringResult);
|
|
41
|
+
} catch (error: any) {
|
|
42
|
+
throw new ValidationError("json_error", error.message)
|
|
43
|
+
}
|
|
39
44
|
}
|
|
40
45
|
} else {
|
|
41
|
-
|
|
46
|
+
throw new Error("Data to validate must be an array")
|
|
42
47
|
}
|
|
43
48
|
|
|
44
49
|
const validate = ajv.compile(schema);
|
|
@@ -71,5 +76,5 @@ export function validateResult(data: any, schema: Object) {
|
|
|
71
76
|
}
|
|
72
77
|
}
|
|
73
78
|
|
|
74
|
-
return json;
|
|
79
|
+
return [{ type: "json", value: json }];
|
|
75
80
|
}
|
|
@@ -1,186 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.getModelCapabilitiesBedrock = getModelCapabilitiesBedrock;
|
|
4
|
-
// Record of Bedrock model capabilities keyed by model ID.
|
|
5
|
-
const RECORD_MODEL_CAPABILITIES = {
|
|
6
|
-
"foundation-model/ai21.jamba-1-5-large-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
7
|
-
"foundation-model/ai21.jamba-1-5-mini-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
8
|
-
"foundation-model/ai21.jamba-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
9
|
-
"foundation-model/amazon.nova-canvas-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { image: true, text: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
10
|
-
"foundation-model/amazon.nova-lite-v1:0": { input: { text: true, image: true, video: true, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
11
|
-
"foundation-model/amazon.nova-micro-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
12
|
-
"foundation-model/amazon.nova-pro-v1:0": { input: { text: true, image: true, video: true, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
13
|
-
"foundation-model/amazon.titan-text-express-v1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
14
|
-
"foundation-model/amazon.titan-text-lite-v1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
15
|
-
"foundation-model/amazon.titan-text-premier-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
16
|
-
"foundation-model/amazon.titan-tg1-large": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
17
|
-
"foundation-model/anthropic.claude-3-5-haiku-20241022-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
18
|
-
"foundation-model/anthropic.claude-3-5-sonnet-20240620-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
19
|
-
"foundation-model/anthropic.claude-3-5-sonnet-20241022-v2:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
20
|
-
"foundation-model/anthropic.claude-3-haiku-20240307-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
21
|
-
"foundation-model/anthropic.claude-3-opus-20240229-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
22
|
-
"foundation-model/anthropic.claude-3-sonnet-20240229-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
23
|
-
"foundation-model/anthropic.claude-instant-v1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
24
|
-
"foundation-model/anthropic.claude-v2": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
25
|
-
"foundation-model/anthropic.claude-v2:1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
26
|
-
"foundation-model/cohere.command-light-text-v14": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
27
|
-
"foundation-model/cohere.command-r-plus-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
28
|
-
"foundation-model/cohere.command-r-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
29
|
-
"foundation-model/cohere.command-text-v14": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
30
|
-
"foundation-model/meta.llama3-1-405b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
31
|
-
"foundation-model/meta.llama3-1-70b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
32
|
-
"foundation-model/meta.llama3-1-8b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
33
|
-
"foundation-model/meta.llama3-70b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
34
|
-
"foundation-model/meta.llama3-8b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
35
|
-
"foundation-model/mistral.mixtral-8x7b-instruct-v0:1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
36
|
-
"foundation-model/mistral.mistral-7b-instruct-v0:2": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
37
|
-
"foundation-model/mistral.mistral-large-2402-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
38
|
-
"foundation-model/mistral.mistral-large-2407-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
39
|
-
"foundation-model/mistral.mistral-small-2402-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
40
|
-
"inference-profile/us.amazon.nova-lite-v1:0": { input: { text: true, image: true, video: true, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
41
|
-
"inference-profile/us.amazon.nova-micro-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
42
|
-
"inference-profile/us.amazon.nova-premier-v1:0": { input: { text: true, image: true, video: true, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
43
|
-
"inference-profile/us.amazon.nova-pro-v1:0": { input: { text: true, image: true, video: true, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
44
|
-
"inference-profile/us.anthropic.claude-3-5-haiku-20241022-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
45
|
-
"inference-profile/us.anthropic.claude-3-5-sonnet-20240620-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
46
|
-
"inference-profile/us.anthropic.claude-3-5-sonnet-20241022-v2:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
47
|
-
"inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
48
|
-
"inference-profile/us.anthropic.claude-3-haiku-20240307-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
49
|
-
"inference-profile/us.anthropic.claude-3-opus-20240229-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
50
|
-
"inference-profile/us.anthropic.claude-3-sonnet-20240229-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
51
|
-
"inference-profile/us.anthropic.claude-opus-4-20250514-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
52
|
-
"inference-profile/us.anthropic.claude-sonnet-4-20250514-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
53
|
-
"inference-profile/us.deepseek.r1-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
54
|
-
"inference-profile/us.meta.llama3-1-70b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
55
|
-
"inference-profile/us.meta.llama3-1-8b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
56
|
-
"inference-profile/us.meta.llama3-2-1b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
57
|
-
"inference-profile/us.meta.llama3-2-11b-instruct-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
58
|
-
"inference-profile/us.meta.llama3-2-3b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
59
|
-
"inference-profile/us.meta.llama3-2-90b-instruct-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
60
|
-
"inference-profile/us.meta.llama3-3-70b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
61
|
-
"inference-profile/us.meta.llama4-maverick-17b-instruct-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
62
|
-
"inference-profile/us.meta.llama4-scout-17b-instruct-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
63
|
-
"inference-profile/us.mistral.pixtral-large-2502-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
64
|
-
"inference-profile/us.writer.palmyra-x4-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
65
|
-
"inference-profile/us.writer.palmyra-x5-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false }
|
|
66
|
-
};
|
|
67
|
-
// Populate RECORD_FAMILY_CAPABILITIES as a const record (lowest common denominator for each family)
|
|
68
|
-
const RECORD_FAMILY_CAPABILITIES = {
|
|
69
|
-
"foundation-model/ai21.jamba": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
70
|
-
"foundation-model/amazon.nova": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: false, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
71
|
-
"foundation-model/amazon.titan": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
72
|
-
"foundation-model/anthropic.claude": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
73
|
-
"foundation-model/anthropic.claude-3": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
74
|
-
"foundation-model/anthropic.claude-3-5": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
75
|
-
"foundation-model/anthropic.claude-3-7": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
76
|
-
"foundation-model/cohere.command": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
77
|
-
"foundation-model/meta.llama3": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
78
|
-
"foundation-model/mistral.mistral": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
79
|
-
"foundation-model/mistral.mistral-large": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
80
|
-
"foundation-model/mistral.mixtral": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
81
|
-
"inference-profile/us.anthropic.claude-3-haiku": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
82
|
-
"inference-profile/us.anthropic.claude-3-5-sonnet": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
|
|
83
|
-
"inference-profile/us.anthropic.claude-3-opus": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
84
|
-
"inference-profile/us.anthropic.claude-3-sonnet": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
85
|
-
"inference-profile/us.anthropic.claude": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
86
|
-
"inference-profile/us.deepseek.r1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
87
|
-
"inference-profile/us.meta.llama3": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
88
|
-
"inference-profile/us.meta.llama4-maverick-17b": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
89
|
-
"inference-profile/us.meta.llama4-scout-17b": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
|
|
90
|
-
"inference-profile/us.mistral.pixtral": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
|
|
91
|
-
"inference-profile/us.writer.palmyra": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false }
|
|
92
|
-
};
|
|
93
|
-
/**
|
|
94
|
-
* Extract the model identifier from an ARN or inference profile
|
|
95
|
-
* @param modelName The full model ARN or name
|
|
96
|
-
* @returns The normalized model identifier
|
|
97
|
-
*/
|
|
98
|
-
function normalizeModelName(modelName) {
|
|
99
|
-
const modelLower = modelName.toLowerCase();
|
|
100
|
-
if (modelLower.includes("inference-profile")) {
|
|
101
|
-
const parts = modelLower.split("/");
|
|
102
|
-
if (parts.length > 1) {
|
|
103
|
-
const providerModel = parts[parts.length - 1];
|
|
104
|
-
const modelParts = providerModel.split(".");
|
|
105
|
-
if (modelParts.length > 1 && modelParts[1] === "deepseek") {
|
|
106
|
-
return `deepseek-${modelParts.slice(2).join(".")}`;
|
|
107
|
-
}
|
|
108
|
-
return modelParts.length > 2 ? modelParts.slice(2).join(".") : providerModel;
|
|
109
|
-
}
|
|
110
|
-
}
|
|
111
|
-
return modelLower;
|
|
112
|
-
}
|
|
113
|
-
// Fallback pattern lists for inferring modalities and tool support
|
|
114
|
-
const IMAGE_INPUT_MODELS = ["image"]; // fallback: if model id contains 'image', supports image input
|
|
115
|
-
const VIDEO_INPUT_MODELS = ["video"];
|
|
116
|
-
const AUDIO_INPUT_MODELS = ["audio"];
|
|
117
|
-
const TEXT_INPUT_MODELS = ["text"];
|
|
118
|
-
const IMAGE_OUTPUT_MODELS = ["image"];
|
|
119
|
-
const VIDEO_OUTPUT_MODELS = ["video"];
|
|
120
|
-
const AUDIO_OUTPUT_MODELS = ["audio"];
|
|
121
|
-
const TEXT_OUTPUT_MODELS = ["text"];
|
|
122
|
-
const EMBEDDING_OUTPUT_MODELS = ["embed"];
|
|
123
|
-
const TOOL_SUPPORT_MODELS = ["tool", "sonnet", "opus", "nova", "palmyra", "command-r", "mistral-large", "pixtral"];
|
|
124
|
-
function modelMatches(modelName, patterns) {
|
|
125
|
-
return patterns.some(pattern => modelName.includes(pattern));
|
|
126
|
-
}
|
|
127
|
-
/**
|
|
128
|
-
* Get the full ModelCapabilities for a Bedrock model.
|
|
129
|
-
* Checks RECORD_MODEL_CAPABILITIES first, then falls back to pattern-based inference.
|
|
130
|
-
*/
|
|
131
|
-
function getModelCapabilitiesBedrock(model) {
|
|
132
|
-
// Normalize ARN or inference-profile to model ID
|
|
133
|
-
let normalized = model;
|
|
134
|
-
const arnPattern = /^arn:aws:bedrock:[^:]+:[^:]*:(inference-profile|foundation-model)\/.+/i;
|
|
135
|
-
if (arnPattern.test(model)) {
|
|
136
|
-
// Extract after last occurrence of 'foundation-model/' or 'inference-profile/'
|
|
137
|
-
const foundationIdx = model.lastIndexOf('foundation-model/');
|
|
138
|
-
const inferenceIdx = model.lastIndexOf('inference-profile/');
|
|
139
|
-
if (foundationIdx !== -1) {
|
|
140
|
-
normalized = model.substring(foundationIdx);
|
|
141
|
-
}
|
|
142
|
-
else if (inferenceIdx !== -1) {
|
|
143
|
-
normalized = model.substring(inferenceIdx);
|
|
144
|
-
}
|
|
145
|
-
}
|
|
146
|
-
// Standardize region for inference-profile to 'us' for record lookup
|
|
147
|
-
// This allows support for different AWS regions by mapping all to 'us'
|
|
148
|
-
if (normalized.startsWith("inference-profile/")) {
|
|
149
|
-
normalized = normalized.replace(/^inference-profile\/[^.]+\./, "inference-profile/us.");
|
|
150
|
-
}
|
|
151
|
-
// 1. Exact match in record
|
|
152
|
-
const record = RECORD_MODEL_CAPABILITIES[normalized];
|
|
153
|
-
if (record)
|
|
154
|
-
return record;
|
|
155
|
-
// 2. Fallback: find the longest matching family prefix in RECORD_FAMILY_CAPABILITIES
|
|
156
|
-
let bestFamilyKey = undefined;
|
|
157
|
-
let bestFamilyLength = 0;
|
|
158
|
-
for (const key of Object.keys(RECORD_FAMILY_CAPABILITIES)) {
|
|
159
|
-
if (normalized.startsWith(key) && key.length > bestFamilyLength) {
|
|
160
|
-
bestFamilyKey = key;
|
|
161
|
-
bestFamilyLength = key.length;
|
|
162
|
-
}
|
|
163
|
-
}
|
|
164
|
-
if (bestFamilyKey) {
|
|
165
|
-
return RECORD_FAMILY_CAPABILITIES[bestFamilyKey];
|
|
166
|
-
}
|
|
167
|
-
// 3. Fallback: infer from normalized name
|
|
168
|
-
normalized = normalizeModelName(normalized);
|
|
169
|
-
const input = {
|
|
170
|
-
text: modelMatches(normalized, TEXT_INPUT_MODELS) || undefined,
|
|
171
|
-
image: modelMatches(normalized, IMAGE_INPUT_MODELS) || undefined,
|
|
172
|
-
video: modelMatches(normalized, VIDEO_INPUT_MODELS) || undefined,
|
|
173
|
-
audio: modelMatches(normalized, AUDIO_INPUT_MODELS) || undefined,
|
|
174
|
-
embed: false
|
|
175
|
-
};
|
|
176
|
-
const output = {
|
|
177
|
-
text: modelMatches(normalized, TEXT_OUTPUT_MODELS) || undefined,
|
|
178
|
-
image: modelMatches(normalized, IMAGE_OUTPUT_MODELS) || undefined,
|
|
179
|
-
video: modelMatches(normalized, VIDEO_OUTPUT_MODELS) || undefined,
|
|
180
|
-
audio: modelMatches(normalized, AUDIO_OUTPUT_MODELS) || undefined,
|
|
181
|
-
embed: modelMatches(normalized, EMBEDDING_OUTPUT_MODELS) || undefined
|
|
182
|
-
};
|
|
183
|
-
const tool_support = modelMatches(normalized, TOOL_SUPPORT_MODELS) || undefined;
|
|
184
|
-
return { input, output, tool_support };
|
|
185
|
-
}
|
|
186
|
-
//# sourceMappingURL=bedrock.js.map
|