@llumiverse/core 0.15.0 → 0.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -3
- package/lib/cjs/CompletionStream.js +31 -10
- package/lib/cjs/CompletionStream.js.map +1 -1
- package/lib/cjs/Driver.js +19 -21
- package/lib/cjs/Driver.js.map +1 -1
- package/lib/cjs/async.js +3 -2
- package/lib/cjs/async.js.map +1 -1
- package/lib/cjs/formatters/index.js +1 -3
- package/lib/cjs/formatters/index.js.map +1 -1
- package/lib/cjs/formatters/{claude.js → nova.js} +33 -37
- package/lib/cjs/formatters/nova.js.map +1 -0
- package/lib/cjs/formatters/openai.js +36 -6
- package/lib/cjs/formatters/openai.js.map +1 -1
- package/lib/cjs/index.js +1 -0
- package/lib/cjs/index.js.map +1 -1
- package/lib/cjs/options/bedrock.js +343 -0
- package/lib/cjs/options/bedrock.js.map +1 -0
- package/lib/cjs/options/groq.js +37 -0
- package/lib/cjs/options/groq.js.map +1 -0
- package/lib/cjs/options/openai.js +123 -0
- package/lib/cjs/options/openai.js.map +1 -0
- package/lib/cjs/options/vertexai.js +257 -0
- package/lib/cjs/options/vertexai.js.map +1 -0
- package/lib/cjs/options.js +54 -0
- package/lib/cjs/options.js.map +1 -0
- package/lib/cjs/types.js +34 -1
- package/lib/cjs/types.js.map +1 -1
- package/lib/esm/CompletionStream.js +31 -10
- package/lib/esm/CompletionStream.js.map +1 -1
- package/lib/esm/Driver.js +20 -22
- package/lib/esm/Driver.js.map +1 -1
- package/lib/esm/async.js +3 -2
- package/lib/esm/async.js.map +1 -1
- package/lib/esm/formatters/index.js +1 -3
- package/lib/esm/formatters/index.js.map +1 -1
- package/lib/esm/formatters/{claude.js → nova.js} +32 -36
- package/lib/esm/formatters/nova.js.map +1 -0
- package/lib/esm/formatters/openai.js +36 -6
- package/lib/esm/formatters/openai.js.map +1 -1
- package/lib/esm/index.js +1 -0
- package/lib/esm/index.js.map +1 -1
- package/lib/esm/options/bedrock.js +340 -0
- package/lib/esm/options/bedrock.js.map +1 -0
- package/lib/esm/options/groq.js +34 -0
- package/lib/esm/options/groq.js.map +1 -0
- package/lib/esm/options/openai.js +120 -0
- package/lib/esm/options/openai.js.map +1 -0
- package/lib/esm/options/vertexai.js +253 -0
- package/lib/esm/options/vertexai.js.map +1 -0
- package/lib/esm/options.js +50 -0
- package/lib/esm/options.js.map +1 -0
- package/lib/esm/types.js +33 -0
- package/lib/esm/types.js.map +1 -1
- package/lib/types/CompletionStream.d.ts +1 -1
- package/lib/types/CompletionStream.d.ts.map +1 -1
- package/lib/types/Driver.d.ts +5 -4
- package/lib/types/Driver.d.ts.map +1 -1
- package/lib/types/async.d.ts +3 -2
- package/lib/types/async.d.ts.map +1 -1
- package/lib/types/formatters/generic.d.ts.map +1 -1
- package/lib/types/formatters/index.d.ts +1 -3
- package/lib/types/formatters/index.d.ts.map +1 -1
- package/lib/types/formatters/nova.d.ts +40 -0
- package/lib/types/formatters/nova.d.ts.map +1 -0
- package/lib/types/formatters/openai.d.ts +13 -1
- package/lib/types/formatters/openai.d.ts.map +1 -1
- package/lib/types/index.d.ts +1 -0
- package/lib/types/index.d.ts.map +1 -1
- package/lib/types/options/bedrock.d.ts +32 -0
- package/lib/types/options/bedrock.d.ts.map +1 -0
- package/lib/types/options/groq.d.ts +12 -0
- package/lib/types/options/groq.d.ts.map +1 -0
- package/lib/types/options/openai.d.ts +21 -0
- package/lib/types/options/openai.d.ts.map +1 -0
- package/lib/types/options/vertexai.d.ts +52 -0
- package/lib/types/options/vertexai.d.ts.map +1 -0
- package/lib/types/options.d.ts +14 -0
- package/lib/types/options.d.ts.map +1 -0
- package/lib/types/types.d.ts +133 -49
- package/lib/types/types.d.ts.map +1 -1
- package/package.json +6 -6
- package/src/CompletionStream.ts +31 -11
- package/src/Driver.ts +29 -25
- package/src/async.ts +7 -5
- package/src/formatters/index.ts +1 -3
- package/src/formatters/nova.ts +141 -0
- package/src/formatters/openai.ts +52 -12
- package/src/index.ts +2 -1
- package/src/options/bedrock.ts +388 -0
- package/src/options/groq.ts +47 -0
- package/src/options/openai.ts +148 -0
- package/src/options/vertexai.ts +312 -0
- package/src/options.ts +62 -0
- package/src/types.ts +167 -52
- package/lib/cjs/formatters/claude.js.map +0 -1
- package/lib/cjs/formatters/llama2.js +0 -48
- package/lib/cjs/formatters/llama2.js.map +0 -1
- package/lib/cjs/formatters/llama3.js +0 -42
- package/lib/cjs/formatters/llama3.js.map +0 -1
- package/lib/esm/formatters/claude.js.map +0 -1
- package/lib/esm/formatters/llama2.js +0 -45
- package/lib/esm/formatters/llama2.js.map +0 -1
- package/lib/esm/formatters/llama3.js +0 -39
- package/lib/esm/formatters/llama3.js.map +0 -1
- package/lib/types/formatters/claude.d.ts +0 -25
- package/lib/types/formatters/claude.d.ts.map +0 -1
- package/lib/types/formatters/llama2.d.ts +0 -4
- package/lib/types/formatters/llama2.d.ts.map +0 -1
- package/lib/types/formatters/llama3.d.ts +0 -7
- package/lib/types/formatters/llama3.d.ts.map +0 -1
- package/src/formatters/claude.ts +0 -131
- package/src/formatters/llama2.ts +0 -58
- package/src/formatters/llama3.ts +0 -55
package/src/CompletionStream.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { AbstractDriver } from "./Driver.js";
|
|
2
|
-
import { CompletionStream, DriverOptions, ExecutionOptions, ExecutionResponse } from "./types.js";
|
|
2
|
+
import { CompletionStream, DriverOptions, ExecutionOptions, ExecutionResponse, ExecutionTokenUsage } from "./types.js";
|
|
3
3
|
|
|
4
4
|
export class DefaultCompletionStream<PromptT = any> implements CompletionStream<PromptT> {
|
|
5
5
|
|
|
@@ -25,29 +25,49 @@ export class DefaultCompletionStream<PromptT = any> implements CompletionStream<
|
|
|
25
25
|
);
|
|
26
26
|
|
|
27
27
|
const start = Date.now();
|
|
28
|
-
const stream = await this.driver.
|
|
28
|
+
const stream = await this.driver.requestTextCompletionStream(this.prompt, this.options);
|
|
29
29
|
|
|
30
|
+
let finish_reason: string | undefined = undefined;
|
|
31
|
+
let promptTokens: number = 0;
|
|
32
|
+
let resultTokens: number | undefined = undefined;
|
|
30
33
|
for await (const chunk of stream) {
|
|
31
34
|
if (chunk) {
|
|
32
|
-
|
|
33
|
-
|
|
35
|
+
if (typeof chunk === 'string') {
|
|
36
|
+
chunks.push(chunk);
|
|
37
|
+
yield chunk;
|
|
38
|
+
}else{
|
|
39
|
+
if (chunk.finish_reason) { //Do not replace non-null values with null values
|
|
40
|
+
finish_reason = chunk.finish_reason; //Used to skip empty finish_reason chunks coming after "stop" or "length"
|
|
41
|
+
}
|
|
42
|
+
if (chunk.token_usage) {
|
|
43
|
+
//Tokens returned include prior parts of stream,
|
|
44
|
+
//so overwrite rather than accumulate
|
|
45
|
+
//Math.max used as some models report final token count at beginning of stream
|
|
46
|
+
promptTokens = Math.max(promptTokens,chunk.token_usage.prompt ?? 0);
|
|
47
|
+
resultTokens = Math.max(resultTokens ?? 0,chunk.token_usage.result ?? 0);
|
|
48
|
+
}
|
|
49
|
+
if (chunk.result) {
|
|
50
|
+
chunks.push(chunk.result);
|
|
51
|
+
yield chunk.result;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
34
54
|
}
|
|
35
55
|
}
|
|
36
56
|
|
|
37
57
|
const content = chunks.join('');
|
|
38
58
|
|
|
39
|
-
|
|
40
|
-
|
|
59
|
+
// Return undefined for the ExecutionTokenUsage object if there is nothing to fill it with.
|
|
60
|
+
// Allows for checking for truthyness on token_usage, rather than it's internals. For testing and downstream usage.
|
|
61
|
+
let tokens: ExecutionTokenUsage | undefined = resultTokens ?
|
|
62
|
+
{ prompt: promptTokens, result: resultTokens, total: resultTokens + promptTokens, } : undefined
|
|
41
63
|
|
|
42
64
|
this.completion = {
|
|
43
65
|
result: content,
|
|
44
66
|
prompt: this.prompt,
|
|
45
67
|
execution_time: Date.now() - start,
|
|
46
|
-
token_usage:
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
total: resultTokens + promptTokens,
|
|
50
|
-
}
|
|
68
|
+
token_usage: tokens,
|
|
69
|
+
finish_reason: finish_reason,
|
|
70
|
+
chunks: chunks.length,
|
|
51
71
|
}
|
|
52
72
|
|
|
53
73
|
this.driver.validateResult(this.completion, this.options);
|
package/src/Driver.ts
CHANGED
|
@@ -5,10 +5,11 @@
|
|
|
5
5
|
*/
|
|
6
6
|
|
|
7
7
|
import { DefaultCompletionStream, FallbackCompletionStream } from "./CompletionStream.js";
|
|
8
|
-
import {
|
|
8
|
+
import { formatTextPrompt } from "./formatters/index.js";
|
|
9
9
|
import {
|
|
10
10
|
AIModel,
|
|
11
11
|
Completion,
|
|
12
|
+
CompletionChunk,
|
|
12
13
|
CompletionStream,
|
|
13
14
|
DataSource,
|
|
14
15
|
DriverOptions,
|
|
@@ -16,7 +17,9 @@ import {
|
|
|
16
17
|
EmbeddingsResult,
|
|
17
18
|
ExecutionOptions,
|
|
18
19
|
ExecutionResponse,
|
|
20
|
+
ImageGeneration,
|
|
19
21
|
Logger,
|
|
22
|
+
Modalities,
|
|
20
23
|
ModelSearchPayload,
|
|
21
24
|
PromptOptions,
|
|
22
25
|
PromptSegment,
|
|
@@ -51,14 +54,6 @@ export function createLogger(logger: Logger | "console" | undefined) {
|
|
|
51
54
|
}
|
|
52
55
|
}
|
|
53
56
|
|
|
54
|
-
function applyExecutionDefaults(options: ExecutionOptions): ExecutionOptions {
|
|
55
|
-
return {
|
|
56
|
-
max_tokens: 2048,
|
|
57
|
-
temperature: 0.7,
|
|
58
|
-
...options
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
|
|
62
57
|
export interface Driver<PromptT = unknown> {
|
|
63
58
|
|
|
64
59
|
/**
|
|
@@ -69,7 +64,7 @@ export interface Driver<PromptT = unknown> {
|
|
|
69
64
|
*/
|
|
70
65
|
createTrainingPrompt(options: TrainingPromptOptions): Promise<string>;
|
|
71
66
|
|
|
72
|
-
createPrompt(segments: PromptSegment[], opts:
|
|
67
|
+
createPrompt(segments: PromptSegment[], opts: ExecutionOptions): Promise<PromptT>;
|
|
73
68
|
|
|
74
69
|
execute(segments: PromptSegment[], options: ExecutionOptions): Promise<ExecutionResponse<PromptT>>;
|
|
75
70
|
|
|
@@ -92,7 +87,7 @@ export interface Driver<PromptT = unknown> {
|
|
|
92
87
|
//check that it is possible to connect to the environment
|
|
93
88
|
validateConnection(): Promise<boolean>;
|
|
94
89
|
|
|
95
|
-
//generate embeddings for a given text
|
|
90
|
+
//generate embeddings for a given text or image
|
|
96
91
|
generateEmbeddings(options: EmbeddingsOptions): Promise<EmbeddingsResult>;
|
|
97
92
|
|
|
98
93
|
}
|
|
@@ -147,7 +142,6 @@ export abstract class AbstractDriver<OptionsT extends DriverOptions = DriverOpti
|
|
|
147
142
|
}
|
|
148
143
|
|
|
149
144
|
async execute(segments: PromptSegment[], options: ExecutionOptions): Promise<ExecutionResponse<PromptT>> {
|
|
150
|
-
options = applyExecutionDefaults(options);
|
|
151
145
|
const prompt = await this.createPrompt(segments, options);
|
|
152
146
|
return this._execute(prompt, options);
|
|
153
147
|
}
|
|
@@ -157,8 +151,20 @@ export abstract class AbstractDriver<OptionsT extends DriverOptions = DriverOpti
|
|
|
157
151
|
`[${this.provider}] Executing prompt on ${options.model}`);
|
|
158
152
|
try {
|
|
159
153
|
const start = Date.now();
|
|
160
|
-
|
|
161
|
-
|
|
154
|
+
let result;
|
|
155
|
+
|
|
156
|
+
switch (options.output_modality) {
|
|
157
|
+
case Modalities.text:
|
|
158
|
+
result = await this.requestTextCompletion(prompt, options);
|
|
159
|
+
this.validateResult(result, options);
|
|
160
|
+
break;
|
|
161
|
+
case Modalities.image:
|
|
162
|
+
result = await this.requestImageGeneration(prompt, options);
|
|
163
|
+
break;
|
|
164
|
+
default:
|
|
165
|
+
throw new Error(`Unsupported modality: ${options['output_modality'] ?? "No modality specified"}`);
|
|
166
|
+
}
|
|
167
|
+
|
|
162
168
|
const execution_time = Date.now() - start;
|
|
163
169
|
return { ...result, prompt, execution_time };
|
|
164
170
|
} catch (error) {
|
|
@@ -169,10 +175,9 @@ export abstract class AbstractDriver<OptionsT extends DriverOptions = DriverOpti
|
|
|
169
175
|
|
|
170
176
|
// by default no stream is supported. we block and we return all at once
|
|
171
177
|
async stream(segments: PromptSegment[], options: ExecutionOptions): Promise<CompletionStream<PromptT>> {
|
|
172
|
-
options = applyExecutionDefaults(options);
|
|
173
178
|
const prompt = await this.createPrompt(segments, options);
|
|
174
179
|
const canStream = await this.canStream(options);
|
|
175
|
-
if (canStream) {
|
|
180
|
+
if (options.output_modality === Modalities.text && canStream) {
|
|
176
181
|
return new DefaultCompletionStream(this, prompt, options);
|
|
177
182
|
} else {
|
|
178
183
|
return new FallbackCompletionStream(this, prompt, options);
|
|
@@ -186,13 +191,7 @@ export abstract class AbstractDriver<OptionsT extends DriverOptions = DriverOpti
|
|
|
186
191
|
* @returns
|
|
187
192
|
*/
|
|
188
193
|
protected async formatPrompt(segments: PromptSegment[], opts: PromptOptions): Promise<PromptT> {
|
|
189
|
-
|
|
190
|
-
return formatLlama2Prompt(segments, opts.result_schema) as PromptT;
|
|
191
|
-
} else if (/\bllama.?3\b/i.test(opts.model)) {
|
|
192
|
-
return formatLlama3Prompt(segments, opts.result_schema) as PromptT;
|
|
193
|
-
} else {
|
|
194
|
-
return formatTextPrompt(segments, opts.result_schema) as PromptT;
|
|
195
|
-
}
|
|
194
|
+
return formatTextPrompt(segments, opts.result_schema) as PromptT;
|
|
196
195
|
}
|
|
197
196
|
|
|
198
197
|
public async createPrompt(segments: PromptSegment[], opts: PromptOptions): Promise<PromptT> {
|
|
@@ -221,9 +220,14 @@ export abstract class AbstractDriver<OptionsT extends DriverOptions = DriverOpti
|
|
|
221
220
|
return [];
|
|
222
221
|
}
|
|
223
222
|
|
|
224
|
-
abstract
|
|
223
|
+
abstract requestTextCompletion(prompt: PromptT, options: ExecutionOptions): Promise<Completion>;
|
|
225
224
|
|
|
226
|
-
abstract
|
|
225
|
+
abstract requestTextCompletionStream(prompt: PromptT, options: ExecutionOptions): Promise<AsyncIterable<CompletionChunk>>;
|
|
226
|
+
|
|
227
|
+
async requestImageGeneration(_prompt: PromptT, _options: ExecutionOptions): Promise<Completion<ImageGeneration>> {
|
|
228
|
+
throw new Error("Image generation not implemented.");
|
|
229
|
+
//Cannot be made abstract, as abstract methods are required in the derived class
|
|
230
|
+
}
|
|
227
231
|
|
|
228
232
|
//list models available for this environement
|
|
229
233
|
abstract listModels(params?: ModelSearchPayload): Promise<AIModel[]>;
|
package/src/async.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { ServerSentEvent } from "api-fetch-client";
|
|
2
|
+
import { CompletionChunk } from "./types.js";
|
|
2
3
|
|
|
3
4
|
export async function* asyncMap<T, R>(asyncIterable: AsyncIterable<T>, callback: (value: T, index: number) => R) {
|
|
4
5
|
let i = 0;
|
|
@@ -15,22 +16,23 @@ export function oneAsyncIterator<T>(value: T): AsyncIterable<T> {
|
|
|
15
16
|
}
|
|
16
17
|
|
|
17
18
|
/**
|
|
18
|
-
* Given a ReadableStream of server
|
|
19
|
+
* Given a ReadableStream of server sent events, tran
|
|
19
20
|
*/
|
|
20
|
-
export function transformSSEStream(stream: ReadableStream<ServerSentEvent>, transform: (data: string) =>
|
|
21
|
+
export function transformSSEStream(stream: ReadableStream<ServerSentEvent>, transform: (data: string) => CompletionChunk): ReadableStream<CompletionChunk> & AsyncIterable<CompletionChunk> {
|
|
21
22
|
// on node and bun the readablestream is an async iterable
|
|
22
|
-
return stream.pipeThrough(new TransformStream<ServerSentEvent,
|
|
23
|
+
return stream.pipeThrough(new TransformStream<ServerSentEvent, CompletionChunk>({
|
|
23
24
|
transform(event: ServerSentEvent, controller) {
|
|
24
25
|
if (event.type === 'event' && event.data && event.data !== '[DONE]') {
|
|
25
26
|
try {
|
|
26
|
-
|
|
27
|
+
const result = transform(event.data) ?? ''
|
|
28
|
+
controller.enqueue(result);
|
|
27
29
|
} catch (err) {
|
|
28
30
|
// double check for the last event whicb is not a JSON - at this time togetherai and mistralai returrns the string [DONE]
|
|
29
31
|
// do nothing - happens if data is not a JSON - the last event data is the [DONE] string
|
|
30
32
|
}
|
|
31
33
|
}
|
|
32
34
|
}
|
|
33
|
-
})) as ReadableStream<
|
|
35
|
+
})) as ReadableStream<CompletionChunk> & AsyncIterable<CompletionChunk>;
|
|
34
36
|
}
|
|
35
37
|
|
|
36
38
|
export class EventStream<T, ReturnT = any> implements AsyncIterable<T>{
|
package/src/formatters/index.ts
CHANGED
|
@@ -3,10 +3,8 @@ import { PromptSegment } from "../types.js";
|
|
|
3
3
|
|
|
4
4
|
export type PromptFormatter<T = any> = (messages: PromptSegment[], schema?: JSONSchema4) => T;
|
|
5
5
|
|
|
6
|
-
export * from "./claude.js";
|
|
7
6
|
export * from "./commons.js";
|
|
8
7
|
export * from "./generic.js";
|
|
9
|
-
export * from "./llama2.js";
|
|
10
|
-
export * from "./llama3.js";
|
|
11
8
|
export * from "./openai.js";
|
|
9
|
+
export * from "./nova.js";
|
|
12
10
|
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import { JSONSchema4 } from "json-schema";
|
|
2
|
+
import { PromptRole, PromptSegment, readStreamAsBase64 } from "../index.js";
|
|
3
|
+
//import { readStreamAsBase64 } from "../stream.js";
|
|
4
|
+
import { getJSONSafetyNotice } from "./commons.js";
|
|
5
|
+
|
|
6
|
+
export interface NovaMessage {
|
|
7
|
+
role: 'user' | 'assistant',
|
|
8
|
+
content: NovaMessagePart[]
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export interface NovaSystemMessage {
|
|
12
|
+
text: string
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
interface NovaMessagePart {
|
|
16
|
+
text?: string // only set for text messages
|
|
17
|
+
image?: {
|
|
18
|
+
format: "jpeg" | "png" | "gif" | "webp",
|
|
19
|
+
source: {
|
|
20
|
+
bytes: string //"base64",
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
video?: {
|
|
24
|
+
format: "mkv" | "mov" | "mp4" | "webm" | "three_gp" | "flv" | "mpeg" | "mpg" | "wmv",
|
|
25
|
+
source: {
|
|
26
|
+
//Option 1: sending a s3 location
|
|
27
|
+
s3Location?: {
|
|
28
|
+
uri: string, // example: s3://my-bucket/object-key
|
|
29
|
+
bucketOwner: string // optional. example: "123456789012"
|
|
30
|
+
}
|
|
31
|
+
//Option 2: sending a base64 encoded video
|
|
32
|
+
bytes?: string //"base64",
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
export interface NovaMessagesPrompt {
|
|
38
|
+
system?: NovaSystemMessage[];
|
|
39
|
+
messages: NovaMessage[];
|
|
40
|
+
negative?: string;
|
|
41
|
+
mask?: string;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* A formatter used by Bedrock to format prompts for nova related models
|
|
46
|
+
*/
|
|
47
|
+
|
|
48
|
+
export async function formatNovaPrompt(segments: PromptSegment[], schema?: JSONSchema4): Promise<NovaMessagesPrompt> {
|
|
49
|
+
const system: string[] = [];
|
|
50
|
+
const safety: string[] = [];
|
|
51
|
+
const messages: NovaMessage[] = [];
|
|
52
|
+
let negative: string = "";
|
|
53
|
+
let mask: string = "";
|
|
54
|
+
|
|
55
|
+
for (const segment of segments) {
|
|
56
|
+
|
|
57
|
+
const parts: NovaMessagePart[] = [];
|
|
58
|
+
if (segment.files) for (const f of segment.files) {
|
|
59
|
+
//TODO add video support
|
|
60
|
+
if (!f.mime_type?.startsWith('image')) {
|
|
61
|
+
continue;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const source = await f.getStream();
|
|
65
|
+
const data = await readStreamAsBase64(source);
|
|
66
|
+
const format = f.mime_type?.split('/')[1] || 'png';
|
|
67
|
+
|
|
68
|
+
parts.push({
|
|
69
|
+
image: {
|
|
70
|
+
format: format as "jpeg" | "png" | "gif" | "webp",
|
|
71
|
+
source: {
|
|
72
|
+
bytes: data
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
})
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
if (segment.content) {
|
|
79
|
+
parts.push({
|
|
80
|
+
text: segment.content
|
|
81
|
+
})
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
if (segment.role === PromptRole.system) {
|
|
85
|
+
system.push(segment.content);
|
|
86
|
+
} else if (segment.role === PromptRole.safety) {
|
|
87
|
+
safety.push(segment.content);
|
|
88
|
+
} else if (messages.length > 0 && messages[messages.length - 1].role === segment.role) {
|
|
89
|
+
//Maybe can remove for nova?
|
|
90
|
+
//concatenate messages of the same role (Claude requires alternative user and assistant roles)
|
|
91
|
+
messages[messages.length - 1].content.push(...parts);
|
|
92
|
+
} else if (segment.role === PromptRole.negative) {
|
|
93
|
+
negative = negative.concat(segment.content, ', ');
|
|
94
|
+
} else if (segment.role === PromptRole.mask) {
|
|
95
|
+
mask = mask.concat(segment.content, ' ');
|
|
96
|
+
} else if (segment.role !== PromptRole.tool) {
|
|
97
|
+
messages.push({
|
|
98
|
+
role: segment.role,
|
|
99
|
+
content: parts
|
|
100
|
+
});
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
if (schema) {
|
|
105
|
+
safety.push("IMPORTANT: " + getJSONSafetyNotice(schema));
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// messages must contains at least 1 item. If the prompt doesn;t contains a user message (but only system messages)
|
|
109
|
+
// we need to put the system messages in the messages array
|
|
110
|
+
|
|
111
|
+
let systemMessage = system.join('\n').trim();
|
|
112
|
+
if (messages.length === 0) {
|
|
113
|
+
if (!systemMessage) {
|
|
114
|
+
throw new Error('Prompt must contain at least one message');
|
|
115
|
+
}
|
|
116
|
+
messages.push({ content: [{ text: systemMessage }], role: 'user' });
|
|
117
|
+
systemMessage = safety.join('\n');
|
|
118
|
+
} else if (safety.length > 0) {
|
|
119
|
+
systemMessage = systemMessage + '\n\nIMPORTANT: ' + safety.join('\n');
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
/*start Nova's message to amke sure it answers properly in JSON
|
|
123
|
+
if enabled, this requires to add the { to Nova's response*/
|
|
124
|
+
|
|
125
|
+
if (schema) {
|
|
126
|
+
messages.push({
|
|
127
|
+
role: "assistant",
|
|
128
|
+
content: [{
|
|
129
|
+
text: "{"
|
|
130
|
+
}]
|
|
131
|
+
});
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// put system mesages first and safety last
|
|
135
|
+
return {
|
|
136
|
+
system: systemMessage ? [{ text: systemMessage }] : [{ text: "" }],
|
|
137
|
+
messages: messages,
|
|
138
|
+
negative: negative || undefined,
|
|
139
|
+
mask: mask || undefined,
|
|
140
|
+
}
|
|
141
|
+
}
|
package/src/formatters/openai.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { PromptRole } from "../index.js";
|
|
1
|
+
import { PromptRole, PromptOptions } from "../index.js";
|
|
2
2
|
import { readStreamAsBase64 } from "../stream.js";
|
|
3
3
|
import { PromptSegment } from "../types.js";
|
|
4
4
|
|
|
@@ -13,6 +13,12 @@ export interface OpenAIMessage {
|
|
|
13
13
|
role: "system" | "user" | "assistant";
|
|
14
14
|
name?: string;
|
|
15
15
|
}
|
|
16
|
+
export interface OpenAIToolMessage {
|
|
17
|
+
role: "tool";
|
|
18
|
+
tool_call_id: string;
|
|
19
|
+
content: string;
|
|
20
|
+
}
|
|
21
|
+
export type OpenAIInputMessage = OpenAIMessage | OpenAIToolMessage;
|
|
16
22
|
|
|
17
23
|
export interface OpenAIContentPartText {
|
|
18
24
|
type: "text";
|
|
@@ -42,7 +48,7 @@ export function formatOpenAILikeTextPrompt(segments: PromptSegment[]): OpenAITex
|
|
|
42
48
|
system.push({ content: msg.content, role: "system" });
|
|
43
49
|
} else if (msg.role === PromptRole.safety) {
|
|
44
50
|
safety.push({ content: "IMPORTANT: " + msg.content, role: "system" });
|
|
45
|
-
} else {
|
|
51
|
+
} else if (msg.role !== PromptRole.negative && msg.role !== PromptRole.mask && msg.role !== PromptRole.tool) {
|
|
46
52
|
user.push({
|
|
47
53
|
content: msg.content,
|
|
48
54
|
role: msg.role || 'user',
|
|
@@ -54,10 +60,11 @@ export function formatOpenAILikeTextPrompt(segments: PromptSegment[]): OpenAITex
|
|
|
54
60
|
return system.concat(user).concat(safety);
|
|
55
61
|
}
|
|
56
62
|
|
|
57
|
-
|
|
63
|
+
|
|
64
|
+
export async function formatOpenAILikeMultimodalPrompt(segments: PromptSegment[], opts: PromptOptions & OpenAIPromptFormatterOptions): Promise<OpenAIInputMessage[]> {
|
|
58
65
|
const system: OpenAIMessage[] = [];
|
|
59
66
|
const safety: OpenAIMessage[] = [];
|
|
60
|
-
const others:
|
|
67
|
+
const others: OpenAIInputMessage[] = [];
|
|
61
68
|
|
|
62
69
|
for (const msg of segments) {
|
|
63
70
|
|
|
@@ -69,8 +76,11 @@ export async function formatOpenAILikeMultimodalPrompt(segments: PromptSegment[]
|
|
|
69
76
|
const stream = await file.getStream();
|
|
70
77
|
const data = await readStreamAsBase64(stream);
|
|
71
78
|
parts.push({
|
|
72
|
-
|
|
73
|
-
|
|
79
|
+
type: "image_url",
|
|
80
|
+
image_url: {
|
|
81
|
+
url: `data:${file.mime_type || "image/jpeg"};base64,${data}`,
|
|
82
|
+
//detail: "auto" //This is modified just before execution to "low" | "high" | "auto"
|
|
83
|
+
},
|
|
74
84
|
})
|
|
75
85
|
}
|
|
76
86
|
}
|
|
@@ -83,13 +93,21 @@ export async function formatOpenAILikeMultimodalPrompt(segments: PromptSegment[]
|
|
|
83
93
|
}
|
|
84
94
|
|
|
85
95
|
|
|
86
|
-
|
|
87
96
|
if (msg.role === PromptRole.system) {
|
|
88
97
|
system.push({
|
|
89
98
|
role: "system",
|
|
90
99
|
content: parts
|
|
91
100
|
})
|
|
92
101
|
|
|
102
|
+
|
|
103
|
+
if (opts.useToolForFormatting && opts.schema) {
|
|
104
|
+
system.forEach(s => {
|
|
105
|
+
s.content.forEach(c => {
|
|
106
|
+
if (c.type === "text") c.text = "TOOL: " + c.text;
|
|
107
|
+
})
|
|
108
|
+
})
|
|
109
|
+
}
|
|
110
|
+
|
|
93
111
|
} else if (msg.role === PromptRole.safety) {
|
|
94
112
|
const safetyMsg: OpenAIMessage = {
|
|
95
113
|
role: "system",
|
|
@@ -101,19 +119,41 @@ export async function formatOpenAILikeMultimodalPrompt(segments: PromptSegment[]
|
|
|
101
119
|
})
|
|
102
120
|
|
|
103
121
|
system.push(safetyMsg)
|
|
104
|
-
|
|
105
|
-
|
|
122
|
+
} else if (msg.role === PromptRole.tool) {
|
|
123
|
+
if (!msg.tool_use_id) {
|
|
124
|
+
throw new Error("Tool use id is required for tool messages")
|
|
125
|
+
}
|
|
126
|
+
others.push({
|
|
127
|
+
role: "tool",
|
|
128
|
+
tool_call_id: msg.tool_use_id,
|
|
129
|
+
content: msg.content
|
|
130
|
+
})
|
|
131
|
+
} else if (msg.role !== PromptRole.negative && msg.role !== PromptRole.mask) {
|
|
106
132
|
others.push({
|
|
107
133
|
role: msg.role ?? 'user',
|
|
108
134
|
content: parts
|
|
109
135
|
})
|
|
110
136
|
}
|
|
111
137
|
|
|
138
|
+
}
|
|
112
139
|
|
|
113
|
-
|
|
140
|
+
if (opts.result_schema && !opts.useToolForFormatting) {
|
|
141
|
+
system.push({
|
|
142
|
+
role: "system",
|
|
143
|
+
content: [{
|
|
144
|
+
type: "text",
|
|
145
|
+
text: "IMPORTANT: only answer using JSON, and respecting the schema included below, between the <response_schema> tags. " + `<response_schema>${JSON.stringify(opts.result_schema)}</response_schema>`
|
|
146
|
+
}]
|
|
147
|
+
})
|
|
114
148
|
}
|
|
115
149
|
|
|
116
150
|
// put system mesages first and safety last
|
|
117
|
-
return system.concat(others).concat(safety);
|
|
151
|
+
return ([] as OpenAIInputMessage[]).concat(system).concat(others).concat(safety);
|
|
152
|
+
|
|
153
|
+
}
|
|
118
154
|
|
|
119
|
-
|
|
155
|
+
export interface OpenAIPromptFormatterOptions {
|
|
156
|
+
multimodal?: boolean
|
|
157
|
+
useToolForFormatting?: boolean
|
|
158
|
+
schema?: Object
|
|
159
|
+
}
|
package/src/index.ts
CHANGED