modelfusion 0.41.2 → 0.42.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +11 -2
- package/model-function/generate-text/TextGenerationModel.d.ts +2 -4
- package/model-provider/anthropic/AnthropicApiConfiguration.cjs +23 -0
- package/model-provider/anthropic/AnthropicApiConfiguration.d.ts +11 -0
- package/model-provider/anthropic/AnthropicApiConfiguration.js +19 -0
- package/model-provider/anthropic/AnthropicError.cjs +39 -0
- package/model-provider/anthropic/AnthropicError.d.ts +37 -0
- package/model-provider/anthropic/AnthropicError.js +31 -0
- package/model-provider/anthropic/AnthropicPromptFormat.cjs +66 -0
- package/model-provider/anthropic/AnthropicPromptFormat.d.ts +11 -0
- package/model-provider/anthropic/AnthropicPromptFormat.js +61 -0
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +226 -0
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +102 -0
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +219 -0
- package/model-provider/anthropic/index.cjs +23 -0
- package/model-provider/anthropic/index.d.ts +4 -0
- package/model-provider/anthropic/index.js +4 -0
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +1 -1
- package/model-provider/index.cjs +1 -0
- package/model-provider/index.d.ts +1 -0
- package/model-provider/index.js +1 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +0 -3
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +0 -1
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +0 -3
- package/package.json +1 -1
- package/prompt/PromptFormatTextGenerationModel.cjs +3 -3
- package/prompt/PromptFormatTextGenerationModel.d.ts +1 -1
- package/prompt/PromptFormatTextGenerationModel.js +3 -3
- package/util/getAudioFileExtension.cjs +29 -0
- package/util/getAudioFileExtension.d.ts +1 -0
- package/util/getAudioFileExtension.js +25 -0
- package/util/index.cjs +1 -0
- package/util/index.d.ts +1 -0
- package/util/index.js +1 -0
package/README.md
CHANGED
@@ -51,7 +51,7 @@ const text = await generateText(
|
|
51
51
|
);
|
52
52
|
```
|
53
53
|
|
54
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface)
|
54
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface)
|
55
55
|
|
56
56
|
#### streamText
|
57
57
|
|
@@ -68,7 +68,7 @@ for await (const textFragment of textStream) {
|
|
68
68
|
}
|
69
69
|
```
|
70
70
|
|
71
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
|
71
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
|
72
72
|
|
73
73
|
#### Prompt Format
|
74
74
|
|
@@ -104,6 +104,7 @@ const textStream = await streamText(
|
|
104
104
|
| Prompt Format | Instruction Prompt | Chat Prompt |
|
105
105
|
| ------------- | ------------------ | ----------- |
|
106
106
|
| OpenAI Chat | ✅ | ✅ |
|
107
|
+
| Anthropic | ✅ | ✅ |
|
107
108
|
| Llama 2 | ✅ | ✅ |
|
108
109
|
| Alpaca | ✅ | ❌ |
|
109
110
|
| Vicuna | ❌ | ✅ |
|
@@ -494,10 +495,18 @@ Integrations: [Helicone](https://modelfusion.dev/integration/observability/helic
|
|
494
495
|
|
495
496
|
Examples for almost all of the individual functions and objects. Highly recommended to get started.
|
496
497
|
|
498
|
+
### [StoryTeller](https://github.com/lgrammel/storyteller)
|
499
|
+
|
500
|
+
> _multi-modal_, _structure streaming_, _image generation_, _text to speech_, _speech to text_, _text generation_, _structure generation_, _embeddings_
|
501
|
+
|
502
|
+
StoryTeller is an exploratory web application that creates short audio stories for pre-school kids.
|
503
|
+
|
497
504
|
### [Chatbot (Terminal)](https://github.com/lgrammel/modelfusion/tree/main/examples/chatbot-terminal)
|
498
505
|
|
499
506
|
> _Terminal app_, _chat_, _llama.cpp_
|
500
507
|
|
508
|
+
A chat with an AI assistant, implemented as a terminal app.
|
509
|
+
|
501
510
|
### [Chatbot (Next.JS)](https://github.com/lgrammel/modelfusion/tree/main/examples/chatbot-next-js)
|
502
511
|
|
503
512
|
> _Next.js app_, _OpenAI GPT-3.5-turbo_, _streaming_, _abort handling_
|
@@ -1,7 +1,5 @@
|
|
1
1
|
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
2
|
import { PromptFormat } from "../../prompt/PromptFormat.js";
|
3
|
-
import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
|
4
|
-
import { PromptFormatTextStreamingModel } from "../../prompt/PromptFormatTextStreamingModel.js";
|
5
3
|
import { Delta } from "../Delta.js";
|
6
4
|
import { Model, ModelSettings } from "../Model.js";
|
7
5
|
import { BasicTokenizer, FullTokenizer } from "../tokenize-text/Tokenizer.js";
|
@@ -38,9 +36,9 @@ export interface TextGenerationModel<PROMPT, SETTINGS extends TextGenerationMode
|
|
38
36
|
totalTokens: number;
|
39
37
|
};
|
40
38
|
}>;
|
41
|
-
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, PROMPT>):
|
39
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, PROMPT>): TextGenerationModel<INPUT_PROMPT, SETTINGS>;
|
42
40
|
}
|
43
41
|
export interface TextStreamingModel<PROMPT, SETTINGS extends TextGenerationModelSettings = TextGenerationModelSettings> extends TextGenerationModel<PROMPT, SETTINGS> {
|
44
42
|
doStreamText(prompt: PROMPT, options?: FunctionOptions): PromiseLike<AsyncIterable<Delta<string>>>;
|
45
|
-
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, PROMPT>):
|
43
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, PROMPT>): TextStreamingModel<INPUT_PROMPT, SETTINGS>;
|
46
44
|
}
|
@@ -0,0 +1,23 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.AnthropicApiConfiguration = void 0;
|
4
|
+
const BaseUrlApiConfiguration_js_1 = require("../../core/api/BaseUrlApiConfiguration.cjs");
|
5
|
+
const loadApiKey_js_1 = require("../../core/api/loadApiKey.cjs");
|
6
|
+
class AnthropicApiConfiguration extends BaseUrlApiConfiguration_js_1.BaseUrlApiConfiguration {
|
7
|
+
constructor({ baseUrl = "https://api.anthropic.com/v1", apiKey, retry, throttle, } = {}) {
|
8
|
+
super({
|
9
|
+
baseUrl,
|
10
|
+
headers: {
|
11
|
+
"x-api-key": (0, loadApiKey_js_1.loadApiKey)({
|
12
|
+
apiKey,
|
13
|
+
environmentVariableName: "ANTHROPIC_API_KEY",
|
14
|
+
description: "Anthropic",
|
15
|
+
}),
|
16
|
+
"anthropic-version": "2023-06-01",
|
17
|
+
},
|
18
|
+
retry,
|
19
|
+
throttle,
|
20
|
+
});
|
21
|
+
}
|
22
|
+
}
|
23
|
+
exports.AnthropicApiConfiguration = AnthropicApiConfiguration;
|
@@ -0,0 +1,11 @@
|
|
1
|
+
import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
|
2
|
+
import { RetryFunction } from "../../core/api/RetryFunction.js";
|
3
|
+
import { ThrottleFunction } from "../../core/api/ThrottleFunction.js";
|
4
|
+
export declare class AnthropicApiConfiguration extends BaseUrlApiConfiguration {
|
5
|
+
constructor({ baseUrl, apiKey, retry, throttle, }?: {
|
6
|
+
baseUrl?: string;
|
7
|
+
apiKey?: string;
|
8
|
+
retry?: RetryFunction;
|
9
|
+
throttle?: ThrottleFunction;
|
10
|
+
});
|
11
|
+
}
|
@@ -0,0 +1,19 @@
|
|
1
|
+
import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
|
2
|
+
import { loadApiKey } from "../../core/api/loadApiKey.js";
|
3
|
+
export class AnthropicApiConfiguration extends BaseUrlApiConfiguration {
|
4
|
+
constructor({ baseUrl = "https://api.anthropic.com/v1", apiKey, retry, throttle, } = {}) {
|
5
|
+
super({
|
6
|
+
baseUrl,
|
7
|
+
headers: {
|
8
|
+
"x-api-key": loadApiKey({
|
9
|
+
apiKey,
|
10
|
+
environmentVariableName: "ANTHROPIC_API_KEY",
|
11
|
+
description: "Anthropic",
|
12
|
+
}),
|
13
|
+
"anthropic-version": "2023-06-01",
|
14
|
+
},
|
15
|
+
retry,
|
16
|
+
throttle,
|
17
|
+
});
|
18
|
+
}
|
19
|
+
}
|
@@ -0,0 +1,39 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
4
|
+
};
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
6
|
+
exports.failedAnthropicCallResponseHandler = exports.AnthropicError = exports.anthropicErrorDataSchema = void 0;
|
7
|
+
const secure_json_parse_1 = __importDefault(require("secure-json-parse"));
|
8
|
+
const zod_1 = require("zod");
|
9
|
+
const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
|
10
|
+
exports.anthropicErrorDataSchema = zod_1.z.object({
|
11
|
+
error: zod_1.z.object({
|
12
|
+
type: zod_1.z.string(),
|
13
|
+
message: zod_1.z.string(),
|
14
|
+
}),
|
15
|
+
});
|
16
|
+
class AnthropicError extends ApiCallError_js_1.ApiCallError {
|
17
|
+
constructor({ data, statusCode, url, requestBodyValues, message = data.error.message, }) {
|
18
|
+
super({ message, statusCode, requestBodyValues, url });
|
19
|
+
Object.defineProperty(this, "data", {
|
20
|
+
enumerable: true,
|
21
|
+
configurable: true,
|
22
|
+
writable: true,
|
23
|
+
value: void 0
|
24
|
+
});
|
25
|
+
this.data = data;
|
26
|
+
}
|
27
|
+
}
|
28
|
+
exports.AnthropicError = AnthropicError;
|
29
|
+
const failedAnthropicCallResponseHandler = async ({ response, url, requestBodyValues }) => {
|
30
|
+
const responseBody = await response.text();
|
31
|
+
const parsedError = exports.anthropicErrorDataSchema.parse(secure_json_parse_1.default.parse(responseBody));
|
32
|
+
return new AnthropicError({
|
33
|
+
url,
|
34
|
+
requestBodyValues,
|
35
|
+
statusCode: response.status,
|
36
|
+
data: parsedError,
|
37
|
+
});
|
38
|
+
};
|
39
|
+
exports.failedAnthropicCallResponseHandler = failedAnthropicCallResponseHandler;
|
@@ -0,0 +1,37 @@
|
|
1
|
+
import { z } from "zod";
|
2
|
+
import { ApiCallError } from "../../core/api/ApiCallError.js";
|
3
|
+
import { ResponseHandler } from "../../core/api/postToApi.js";
|
4
|
+
export declare const anthropicErrorDataSchema: z.ZodObject<{
|
5
|
+
error: z.ZodObject<{
|
6
|
+
type: z.ZodString;
|
7
|
+
message: z.ZodString;
|
8
|
+
}, "strip", z.ZodTypeAny, {
|
9
|
+
message: string;
|
10
|
+
type: string;
|
11
|
+
}, {
|
12
|
+
message: string;
|
13
|
+
type: string;
|
14
|
+
}>;
|
15
|
+
}, "strip", z.ZodTypeAny, {
|
16
|
+
error: {
|
17
|
+
message: string;
|
18
|
+
type: string;
|
19
|
+
};
|
20
|
+
}, {
|
21
|
+
error: {
|
22
|
+
message: string;
|
23
|
+
type: string;
|
24
|
+
};
|
25
|
+
}>;
|
26
|
+
export type AnthropicErrorData = z.infer<typeof anthropicErrorDataSchema>;
|
27
|
+
export declare class AnthropicError extends ApiCallError {
|
28
|
+
readonly data: AnthropicErrorData;
|
29
|
+
constructor({ data, statusCode, url, requestBodyValues, message, }: {
|
30
|
+
message?: string;
|
31
|
+
statusCode: number;
|
32
|
+
url: string;
|
33
|
+
requestBodyValues: unknown;
|
34
|
+
data: AnthropicErrorData;
|
35
|
+
});
|
36
|
+
}
|
37
|
+
export declare const failedAnthropicCallResponseHandler: ResponseHandler<ApiCallError>;
|
@@ -0,0 +1,31 @@
|
|
1
|
+
import SecureJSON from "secure-json-parse";
|
2
|
+
import { z } from "zod";
|
3
|
+
import { ApiCallError } from "../../core/api/ApiCallError.js";
|
4
|
+
export const anthropicErrorDataSchema = z.object({
|
5
|
+
error: z.object({
|
6
|
+
type: z.string(),
|
7
|
+
message: z.string(),
|
8
|
+
}),
|
9
|
+
});
|
10
|
+
export class AnthropicError extends ApiCallError {
|
11
|
+
constructor({ data, statusCode, url, requestBodyValues, message = data.error.message, }) {
|
12
|
+
super({ message, statusCode, requestBodyValues, url });
|
13
|
+
Object.defineProperty(this, "data", {
|
14
|
+
enumerable: true,
|
15
|
+
configurable: true,
|
16
|
+
writable: true,
|
17
|
+
value: void 0
|
18
|
+
});
|
19
|
+
this.data = data;
|
20
|
+
}
|
21
|
+
}
|
22
|
+
export const failedAnthropicCallResponseHandler = async ({ response, url, requestBodyValues }) => {
|
23
|
+
const responseBody = await response.text();
|
24
|
+
const parsedError = anthropicErrorDataSchema.parse(SecureJSON.parse(responseBody));
|
25
|
+
return new AnthropicError({
|
26
|
+
url,
|
27
|
+
requestBodyValues,
|
28
|
+
statusCode: response.status,
|
29
|
+
data: parsedError,
|
30
|
+
});
|
31
|
+
};
|
@@ -0,0 +1,66 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.mapChatPromptToAnthropicFormat = exports.mapInstructionPromptToAnthropicFormat = void 0;
|
4
|
+
const validateChatPrompt_js_1 = require("../../prompt/chat/validateChatPrompt.cjs");
|
5
|
+
/**
|
6
|
+
* Formats an instruction prompt as an Anthropic prompt.
|
7
|
+
*/
|
8
|
+
function mapInstructionPromptToAnthropicFormat() {
|
9
|
+
return {
|
10
|
+
format: (instruction) => {
|
11
|
+
let text = "";
|
12
|
+
if (instruction.system != null) {
|
13
|
+
text += `${instruction.system}`;
|
14
|
+
}
|
15
|
+
text += "\n\nHuman:";
|
16
|
+
text += instruction.instruction;
|
17
|
+
if (instruction.input != null) {
|
18
|
+
// use tags per Anthropic instruction:
|
19
|
+
// https://docs.anthropic.com/claude/docs/constructing-a-prompt
|
20
|
+
text += `\n\n<data>${instruction.input}</data>`;
|
21
|
+
}
|
22
|
+
text += "\n\nAssistant:";
|
23
|
+
return text;
|
24
|
+
},
|
25
|
+
stopSequences: [],
|
26
|
+
};
|
27
|
+
}
|
28
|
+
exports.mapInstructionPromptToAnthropicFormat = mapInstructionPromptToAnthropicFormat;
|
29
|
+
/**
|
30
|
+
* Formats a chat prompt as an Anthropic prompt.
|
31
|
+
*/
|
32
|
+
function mapChatPromptToAnthropicFormat() {
|
33
|
+
return {
|
34
|
+
format: (chatPrompt) => {
|
35
|
+
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
36
|
+
let text = "";
|
37
|
+
for (let i = 0; i < chatPrompt.length; i++) {
|
38
|
+
const message = chatPrompt[i];
|
39
|
+
// system message:
|
40
|
+
if (i === 0 &&
|
41
|
+
"system" in message &&
|
42
|
+
typeof message.system === "string") {
|
43
|
+
text += `${message.system}\n\n`;
|
44
|
+
continue;
|
45
|
+
}
|
46
|
+
// user message
|
47
|
+
if ("user" in message) {
|
48
|
+
text += `\n\nHuman:${message.user}`;
|
49
|
+
continue;
|
50
|
+
}
|
51
|
+
// ai message:
|
52
|
+
if ("ai" in message) {
|
53
|
+
text += `\n\nAssistant:${message.ai}`;
|
54
|
+
continue;
|
55
|
+
}
|
56
|
+
// unsupported message:
|
57
|
+
throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
|
58
|
+
}
|
59
|
+
// AI message prefix:
|
60
|
+
text += `\n\nAssistant:`;
|
61
|
+
return text;
|
62
|
+
},
|
63
|
+
stopSequences: [],
|
64
|
+
};
|
65
|
+
}
|
66
|
+
exports.mapChatPromptToAnthropicFormat = mapChatPromptToAnthropicFormat;
|
@@ -0,0 +1,11 @@
|
|
1
|
+
import { ChatPrompt } from "../../prompt/chat/ChatPrompt.js";
|
2
|
+
import { InstructionPrompt } from "../../prompt/InstructionPrompt.js";
|
3
|
+
import { PromptFormat } from "../../prompt/PromptFormat.js";
|
4
|
+
/**
|
5
|
+
* Formats an instruction prompt as an Anthropic prompt.
|
6
|
+
*/
|
7
|
+
export declare function mapInstructionPromptToAnthropicFormat(): PromptFormat<InstructionPrompt, string>;
|
8
|
+
/**
|
9
|
+
* Formats a chat prompt as an Anthropic prompt.
|
10
|
+
*/
|
11
|
+
export declare function mapChatPromptToAnthropicFormat(): PromptFormat<ChatPrompt, string>;
|
@@ -0,0 +1,61 @@
|
|
1
|
+
import { validateChatPrompt } from "../../prompt/chat/validateChatPrompt.js";
|
2
|
+
/**
|
3
|
+
* Formats an instruction prompt as an Anthropic prompt.
|
4
|
+
*/
|
5
|
+
export function mapInstructionPromptToAnthropicFormat() {
|
6
|
+
return {
|
7
|
+
format: (instruction) => {
|
8
|
+
let text = "";
|
9
|
+
if (instruction.system != null) {
|
10
|
+
text += `${instruction.system}`;
|
11
|
+
}
|
12
|
+
text += "\n\nHuman:";
|
13
|
+
text += instruction.instruction;
|
14
|
+
if (instruction.input != null) {
|
15
|
+
// use tags per Anthropic instruction:
|
16
|
+
// https://docs.anthropic.com/claude/docs/constructing-a-prompt
|
17
|
+
text += `\n\n<data>${instruction.input}</data>`;
|
18
|
+
}
|
19
|
+
text += "\n\nAssistant:";
|
20
|
+
return text;
|
21
|
+
},
|
22
|
+
stopSequences: [],
|
23
|
+
};
|
24
|
+
}
|
25
|
+
/**
|
26
|
+
* Formats a chat prompt as an Anthropic prompt.
|
27
|
+
*/
|
28
|
+
export function mapChatPromptToAnthropicFormat() {
|
29
|
+
return {
|
30
|
+
format: (chatPrompt) => {
|
31
|
+
validateChatPrompt(chatPrompt);
|
32
|
+
let text = "";
|
33
|
+
for (let i = 0; i < chatPrompt.length; i++) {
|
34
|
+
const message = chatPrompt[i];
|
35
|
+
// system message:
|
36
|
+
if (i === 0 &&
|
37
|
+
"system" in message &&
|
38
|
+
typeof message.system === "string") {
|
39
|
+
text += `${message.system}\n\n`;
|
40
|
+
continue;
|
41
|
+
}
|
42
|
+
// user message
|
43
|
+
if ("user" in message) {
|
44
|
+
text += `\n\nHuman:${message.user}`;
|
45
|
+
continue;
|
46
|
+
}
|
47
|
+
// ai message:
|
48
|
+
if ("ai" in message) {
|
49
|
+
text += `\n\nAssistant:${message.ai}`;
|
50
|
+
continue;
|
51
|
+
}
|
52
|
+
// unsupported message:
|
53
|
+
throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
|
54
|
+
}
|
55
|
+
// AI message prefix:
|
56
|
+
text += `\n\nAssistant:`;
|
57
|
+
return text;
|
58
|
+
},
|
59
|
+
stopSequences: [],
|
60
|
+
};
|
61
|
+
}
|
@@ -0,0 +1,226 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
4
|
+
};
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
6
|
+
exports.AnthropicTextGenerationResponseFormat = exports.AnthropicTextGenerationModel = exports.ANTHROPIC_TEXT_GENERATION_MODELS = void 0;
|
7
|
+
const secure_json_parse_1 = __importDefault(require("secure-json-parse"));
|
8
|
+
const zod_1 = require("zod");
|
9
|
+
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
10
|
+
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
11
|
+
const AsyncQueue_js_1 = require("../../event-source/AsyncQueue.cjs");
|
12
|
+
const parseEventSourceStream_js_1 = require("../../event-source/parseEventSourceStream.cjs");
|
13
|
+
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
14
|
+
const PromptFormatTextStreamingModel_js_1 = require("../../prompt/PromptFormatTextStreamingModel.cjs");
|
15
|
+
const AnthropicApiConfiguration_js_1 = require("./AnthropicApiConfiguration.cjs");
|
16
|
+
const AnthropicError_js_1 = require("./AnthropicError.cjs");
|
17
|
+
exports.ANTHROPIC_TEXT_GENERATION_MODELS = {
|
18
|
+
"claude-instant-1": {
|
19
|
+
contextWindowSize: 100000,
|
20
|
+
},
|
21
|
+
"claude-instant-1.2": {
|
22
|
+
contextWindowSize: 100000,
|
23
|
+
},
|
24
|
+
"claude-2": {
|
25
|
+
contextWindowSize: 100000,
|
26
|
+
},
|
27
|
+
"claude-2.0": {
|
28
|
+
contextWindowSize: 100000,
|
29
|
+
},
|
30
|
+
};
|
31
|
+
/**
|
32
|
+
* Create a text generation model that calls the Anthropic API.
|
33
|
+
*
|
34
|
+
* @see https://docs.anthropic.com/claude/reference/complete_post
|
35
|
+
*/
|
36
|
+
class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
37
|
+
constructor(settings) {
|
38
|
+
super({ settings });
|
39
|
+
Object.defineProperty(this, "provider", {
|
40
|
+
enumerable: true,
|
41
|
+
configurable: true,
|
42
|
+
writable: true,
|
43
|
+
value: "anthropic"
|
44
|
+
});
|
45
|
+
Object.defineProperty(this, "contextWindowSize", {
|
46
|
+
enumerable: true,
|
47
|
+
configurable: true,
|
48
|
+
writable: true,
|
49
|
+
value: void 0
|
50
|
+
});
|
51
|
+
Object.defineProperty(this, "tokenizer", {
|
52
|
+
enumerable: true,
|
53
|
+
configurable: true,
|
54
|
+
writable: true,
|
55
|
+
value: undefined
|
56
|
+
});
|
57
|
+
Object.defineProperty(this, "countPromptTokens", {
|
58
|
+
enumerable: true,
|
59
|
+
configurable: true,
|
60
|
+
writable: true,
|
61
|
+
value: undefined
|
62
|
+
});
|
63
|
+
this.contextWindowSize =
|
64
|
+
exports.ANTHROPIC_TEXT_GENERATION_MODELS[this.settings.model].contextWindowSize;
|
65
|
+
}
|
66
|
+
get modelName() {
|
67
|
+
return this.settings.model;
|
68
|
+
}
|
69
|
+
async callAPI(prompt, options) {
|
70
|
+
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
71
|
+
retry: this.settings.api?.retry,
|
72
|
+
throttle: this.settings.api?.throttle,
|
73
|
+
call: async () => callAnthropicTextGenerationAPI({
|
74
|
+
...this.settings,
|
75
|
+
stopSequences: this.settings.stopSequences,
|
76
|
+
maxTokens: this.settings.maxCompletionTokens,
|
77
|
+
abortSignal: options.run?.abortSignal,
|
78
|
+
responseFormat: options.responseFormat,
|
79
|
+
prompt,
|
80
|
+
}),
|
81
|
+
});
|
82
|
+
}
|
83
|
+
get settingsForEvent() {
|
84
|
+
const eventSettingProperties = [
|
85
|
+
"maxCompletionTokens",
|
86
|
+
"stopSequences",
|
87
|
+
"temperature",
|
88
|
+
"topK",
|
89
|
+
"topP",
|
90
|
+
"userId",
|
91
|
+
];
|
92
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
93
|
+
}
|
94
|
+
async doGenerateText(prompt, options) {
|
95
|
+
const response = await this.callAPI(prompt, {
|
96
|
+
...options,
|
97
|
+
responseFormat: exports.AnthropicTextGenerationResponseFormat.json,
|
98
|
+
});
|
99
|
+
return {
|
100
|
+
response,
|
101
|
+
text: response.completion,
|
102
|
+
};
|
103
|
+
}
|
104
|
+
doStreamText(prompt, options) {
|
105
|
+
return this.callAPI(prompt, {
|
106
|
+
...options,
|
107
|
+
responseFormat: exports.AnthropicTextGenerationResponseFormat.deltaIterable,
|
108
|
+
});
|
109
|
+
}
|
110
|
+
withPromptFormat(promptFormat) {
|
111
|
+
return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
|
112
|
+
model: this.withSettings({
|
113
|
+
stopSequences: [
|
114
|
+
...(this.settings.stopSequences ?? []),
|
115
|
+
...promptFormat.stopSequences,
|
116
|
+
],
|
117
|
+
}),
|
118
|
+
promptFormat,
|
119
|
+
});
|
120
|
+
}
|
121
|
+
withSettings(additionalSettings) {
|
122
|
+
return new AnthropicTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
|
123
|
+
}
|
124
|
+
}
|
125
|
+
exports.AnthropicTextGenerationModel = AnthropicTextGenerationModel;
|
126
|
+
const anthropicTextGenerationResponseSchema = zod_1.z.object({
|
127
|
+
completion: zod_1.z.string(),
|
128
|
+
stop_reason: zod_1.z.string(),
|
129
|
+
model: zod_1.z.string(),
|
130
|
+
});
|
131
|
+
async function callAnthropicTextGenerationAPI({ api = new AnthropicApiConfiguration_js_1.AnthropicApiConfiguration(), abortSignal, responseFormat, model, prompt, maxTokens, stopSequences, temperature, topK, topP, userId, }) {
|
132
|
+
return (0, postToApi_js_1.postJsonToApi)({
|
133
|
+
url: api.assembleUrl(`/complete`),
|
134
|
+
headers: api.headers,
|
135
|
+
body: {
|
136
|
+
model,
|
137
|
+
prompt,
|
138
|
+
stream: responseFormat.stream,
|
139
|
+
max_tokens_to_sample: maxTokens,
|
140
|
+
temperature,
|
141
|
+
top_k: topK,
|
142
|
+
top_p: topP,
|
143
|
+
stop_sequences: stopSequences,
|
144
|
+
metadata: userId != null ? { user_id: userId } : undefined,
|
145
|
+
},
|
146
|
+
failedResponseHandler: AnthropicError_js_1.failedAnthropicCallResponseHandler,
|
147
|
+
successfulResponseHandler: responseFormat.handler,
|
148
|
+
abortSignal,
|
149
|
+
});
|
150
|
+
}
|
151
|
+
const anthropicTextStreamingResponseSchema = zod_1.z.object({
|
152
|
+
completion: zod_1.z.string(),
|
153
|
+
stop_reason: zod_1.z.string().nullable(),
|
154
|
+
model: zod_1.z.string(),
|
155
|
+
});
|
156
|
+
async function createAnthropicFullDeltaIterableQueue(stream) {
|
157
|
+
const queue = new AsyncQueue_js_1.AsyncQueue();
|
158
|
+
let content = "";
|
159
|
+
// process the stream asynchonously (no 'await' on purpose):
|
160
|
+
(0, parseEventSourceStream_js_1.parseEventSourceStream)({ stream })
|
161
|
+
.then(async (events) => {
|
162
|
+
try {
|
163
|
+
for await (const event of events) {
|
164
|
+
if (event.event === "error") {
|
165
|
+
queue.push({ type: "error", error: event.data });
|
166
|
+
queue.close();
|
167
|
+
return;
|
168
|
+
}
|
169
|
+
if (event.event !== "completion") {
|
170
|
+
continue;
|
171
|
+
}
|
172
|
+
const data = event.data;
|
173
|
+
const json = secure_json_parse_1.default.parse(data);
|
174
|
+
const parseResult = anthropicTextStreamingResponseSchema.safeParse(json);
|
175
|
+
if (!parseResult.success) {
|
176
|
+
queue.push({
|
177
|
+
type: "error",
|
178
|
+
error: parseResult.error,
|
179
|
+
});
|
180
|
+
queue.close();
|
181
|
+
return;
|
182
|
+
}
|
183
|
+
const eventData = parseResult.data;
|
184
|
+
content += eventData.completion;
|
185
|
+
queue.push({
|
186
|
+
type: "delta",
|
187
|
+
fullDelta: {
|
188
|
+
content,
|
189
|
+
isComplete: eventData.stop_reason != null,
|
190
|
+
delta: eventData.completion,
|
191
|
+
},
|
192
|
+
valueDelta: eventData.completion,
|
193
|
+
});
|
194
|
+
if (eventData.stop_reason != null) {
|
195
|
+
queue.close();
|
196
|
+
}
|
197
|
+
}
|
198
|
+
}
|
199
|
+
catch (error) {
|
200
|
+
queue.push({ type: "error", error });
|
201
|
+
queue.close();
|
202
|
+
}
|
203
|
+
})
|
204
|
+
.catch((error) => {
|
205
|
+
queue.push({ type: "error", error });
|
206
|
+
queue.close();
|
207
|
+
});
|
208
|
+
return queue;
|
209
|
+
}
|
210
|
+
exports.AnthropicTextGenerationResponseFormat = {
|
211
|
+
/**
|
212
|
+
* Returns the response as a JSON object.
|
213
|
+
*/
|
214
|
+
json: {
|
215
|
+
stream: false,
|
216
|
+
handler: (0, postToApi_js_1.createJsonResponseHandler)(anthropicTextGenerationResponseSchema),
|
217
|
+
},
|
218
|
+
/**
|
219
|
+
* Returns an async iterable over the full deltas (all choices, including full current state at time of event)
|
220
|
+
* of the response stream.
|
221
|
+
*/
|
222
|
+
deltaIterable: {
|
223
|
+
stream: true,
|
224
|
+
handler: async ({ response }) => createAnthropicFullDeltaIterableQueue(response.body),
|
225
|
+
},
|
226
|
+
};
|
@@ -0,0 +1,102 @@
|
|
1
|
+
import { z } from "zod";
|
2
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
3
|
+
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
|
+
import { ResponseHandler } from "../../core/api/postToApi.js";
|
5
|
+
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
|
+
import { Delta } from "../../model-function/Delta.js";
|
7
|
+
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
8
|
+
import { PromptFormat } from "../../prompt/PromptFormat.js";
|
9
|
+
import { PromptFormatTextStreamingModel } from "../../prompt/PromptFormatTextStreamingModel.js";
|
10
|
+
export declare const ANTHROPIC_TEXT_GENERATION_MODELS: {
|
11
|
+
"claude-instant-1": {
|
12
|
+
contextWindowSize: number;
|
13
|
+
};
|
14
|
+
"claude-instant-1.2": {
|
15
|
+
contextWindowSize: number;
|
16
|
+
};
|
17
|
+
"claude-2": {
|
18
|
+
contextWindowSize: number;
|
19
|
+
};
|
20
|
+
"claude-2.0": {
|
21
|
+
contextWindowSize: number;
|
22
|
+
};
|
23
|
+
};
|
24
|
+
export type AnthropicTextGenerationModelType = keyof typeof ANTHROPIC_TEXT_GENERATION_MODELS;
|
25
|
+
export interface AnthropicTextGenerationModelSettings extends TextGenerationModelSettings {
|
26
|
+
api?: ApiConfiguration;
|
27
|
+
model: AnthropicTextGenerationModelType;
|
28
|
+
temperature?: number;
|
29
|
+
topP?: number;
|
30
|
+
topK?: number;
|
31
|
+
userId?: number;
|
32
|
+
}
|
33
|
+
/**
|
34
|
+
* Create a text generation model that calls the Anthropic API.
|
35
|
+
*
|
36
|
+
* @see https://docs.anthropic.com/claude/reference/complete_post
|
37
|
+
*/
|
38
|
+
export declare class AnthropicTextGenerationModel extends AbstractModel<AnthropicTextGenerationModelSettings> implements TextStreamingModel<string, AnthropicTextGenerationModelSettings> {
|
39
|
+
constructor(settings: AnthropicTextGenerationModelSettings);
|
40
|
+
readonly provider: "anthropic";
|
41
|
+
get modelName(): "claude-instant-1" | "claude-instant-1.2" | "claude-2" | "claude-2.0";
|
42
|
+
readonly contextWindowSize: number;
|
43
|
+
readonly tokenizer: undefined;
|
44
|
+
readonly countPromptTokens: undefined;
|
45
|
+
callAPI<RESPONSE>(prompt: string, options: {
|
46
|
+
responseFormat: AnthropicTextGenerationResponseFormatType<RESPONSE>;
|
47
|
+
} & FunctionOptions): Promise<RESPONSE>;
|
48
|
+
get settingsForEvent(): Partial<AnthropicTextGenerationModelSettings>;
|
49
|
+
doGenerateText(prompt: string, options?: FunctionOptions): Promise<{
|
50
|
+
response: {
|
51
|
+
model: string;
|
52
|
+
completion: string;
|
53
|
+
stop_reason: string;
|
54
|
+
};
|
55
|
+
text: string;
|
56
|
+
}>;
|
57
|
+
doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
58
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, AnthropicTextGenerationModelSettings, this>;
|
59
|
+
withSettings(additionalSettings: Partial<AnthropicTextGenerationModelSettings>): this;
|
60
|
+
}
|
61
|
+
declare const anthropicTextGenerationResponseSchema: z.ZodObject<{
|
62
|
+
completion: z.ZodString;
|
63
|
+
stop_reason: z.ZodString;
|
64
|
+
model: z.ZodString;
|
65
|
+
}, "strip", z.ZodTypeAny, {
|
66
|
+
model: string;
|
67
|
+
completion: string;
|
68
|
+
stop_reason: string;
|
69
|
+
}, {
|
70
|
+
model: string;
|
71
|
+
completion: string;
|
72
|
+
stop_reason: string;
|
73
|
+
}>;
|
74
|
+
export type AnthropicTextGenerationResponse = z.infer<typeof anthropicTextGenerationResponseSchema>;
|
75
|
+
export type AnthropicTextGenerationResponseFormatType<T> = {
|
76
|
+
stream: boolean;
|
77
|
+
handler: ResponseHandler<T>;
|
78
|
+
};
|
79
|
+
export declare const AnthropicTextGenerationResponseFormat: {
|
80
|
+
/**
|
81
|
+
* Returns the response as a JSON object.
|
82
|
+
*/
|
83
|
+
json: {
|
84
|
+
stream: false;
|
85
|
+
handler: ResponseHandler<{
|
86
|
+
model: string;
|
87
|
+
completion: string;
|
88
|
+
stop_reason: string;
|
89
|
+
}>;
|
90
|
+
};
|
91
|
+
/**
|
92
|
+
* Returns an async iterable over the full deltas (all choices, including full current state at time of event)
|
93
|
+
* of the response stream.
|
94
|
+
*/
|
95
|
+
deltaIterable: {
|
96
|
+
stream: true;
|
97
|
+
handler: ({ response }: {
|
98
|
+
response: Response;
|
99
|
+
}) => Promise<AsyncIterable<Delta<string>>>;
|
100
|
+
};
|
101
|
+
};
|
102
|
+
export {};
|
@@ -0,0 +1,219 @@
|
|
1
|
+
import SecureJSON from "secure-json-parse";
|
2
|
+
import { z } from "zod";
|
3
|
+
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
4
|
+
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
5
|
+
import { AsyncQueue } from "../../event-source/AsyncQueue.js";
|
6
|
+
import { parseEventSourceStream } from "../../event-source/parseEventSourceStream.js";
|
7
|
+
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
8
|
+
import { PromptFormatTextStreamingModel } from "../../prompt/PromptFormatTextStreamingModel.js";
|
9
|
+
import { AnthropicApiConfiguration } from "./AnthropicApiConfiguration.js";
|
10
|
+
import { failedAnthropicCallResponseHandler } from "./AnthropicError.js";
|
11
|
+
export const ANTHROPIC_TEXT_GENERATION_MODELS = {
|
12
|
+
"claude-instant-1": {
|
13
|
+
contextWindowSize: 100000,
|
14
|
+
},
|
15
|
+
"claude-instant-1.2": {
|
16
|
+
contextWindowSize: 100000,
|
17
|
+
},
|
18
|
+
"claude-2": {
|
19
|
+
contextWindowSize: 100000,
|
20
|
+
},
|
21
|
+
"claude-2.0": {
|
22
|
+
contextWindowSize: 100000,
|
23
|
+
},
|
24
|
+
};
|
25
|
+
/**
|
26
|
+
* Create a text generation model that calls the Anthropic API.
|
27
|
+
*
|
28
|
+
* @see https://docs.anthropic.com/claude/reference/complete_post
|
29
|
+
*/
|
30
|
+
export class AnthropicTextGenerationModel extends AbstractModel {
|
31
|
+
constructor(settings) {
|
32
|
+
super({ settings });
|
33
|
+
Object.defineProperty(this, "provider", {
|
34
|
+
enumerable: true,
|
35
|
+
configurable: true,
|
36
|
+
writable: true,
|
37
|
+
value: "anthropic"
|
38
|
+
});
|
39
|
+
Object.defineProperty(this, "contextWindowSize", {
|
40
|
+
enumerable: true,
|
41
|
+
configurable: true,
|
42
|
+
writable: true,
|
43
|
+
value: void 0
|
44
|
+
});
|
45
|
+
Object.defineProperty(this, "tokenizer", {
|
46
|
+
enumerable: true,
|
47
|
+
configurable: true,
|
48
|
+
writable: true,
|
49
|
+
value: undefined
|
50
|
+
});
|
51
|
+
Object.defineProperty(this, "countPromptTokens", {
|
52
|
+
enumerable: true,
|
53
|
+
configurable: true,
|
54
|
+
writable: true,
|
55
|
+
value: undefined
|
56
|
+
});
|
57
|
+
this.contextWindowSize =
|
58
|
+
ANTHROPIC_TEXT_GENERATION_MODELS[this.settings.model].contextWindowSize;
|
59
|
+
}
|
60
|
+
get modelName() {
|
61
|
+
return this.settings.model;
|
62
|
+
}
|
63
|
+
async callAPI(prompt, options) {
|
64
|
+
return callWithRetryAndThrottle({
|
65
|
+
retry: this.settings.api?.retry,
|
66
|
+
throttle: this.settings.api?.throttle,
|
67
|
+
call: async () => callAnthropicTextGenerationAPI({
|
68
|
+
...this.settings,
|
69
|
+
stopSequences: this.settings.stopSequences,
|
70
|
+
maxTokens: this.settings.maxCompletionTokens,
|
71
|
+
abortSignal: options.run?.abortSignal,
|
72
|
+
responseFormat: options.responseFormat,
|
73
|
+
prompt,
|
74
|
+
}),
|
75
|
+
});
|
76
|
+
}
|
77
|
+
get settingsForEvent() {
|
78
|
+
const eventSettingProperties = [
|
79
|
+
"maxCompletionTokens",
|
80
|
+
"stopSequences",
|
81
|
+
"temperature",
|
82
|
+
"topK",
|
83
|
+
"topP",
|
84
|
+
"userId",
|
85
|
+
];
|
86
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
87
|
+
}
|
88
|
+
async doGenerateText(prompt, options) {
|
89
|
+
const response = await this.callAPI(prompt, {
|
90
|
+
...options,
|
91
|
+
responseFormat: AnthropicTextGenerationResponseFormat.json,
|
92
|
+
});
|
93
|
+
return {
|
94
|
+
response,
|
95
|
+
text: response.completion,
|
96
|
+
};
|
97
|
+
}
|
98
|
+
doStreamText(prompt, options) {
|
99
|
+
return this.callAPI(prompt, {
|
100
|
+
...options,
|
101
|
+
responseFormat: AnthropicTextGenerationResponseFormat.deltaIterable,
|
102
|
+
});
|
103
|
+
}
|
104
|
+
withPromptFormat(promptFormat) {
|
105
|
+
return new PromptFormatTextStreamingModel({
|
106
|
+
model: this.withSettings({
|
107
|
+
stopSequences: [
|
108
|
+
...(this.settings.stopSequences ?? []),
|
109
|
+
...promptFormat.stopSequences,
|
110
|
+
],
|
111
|
+
}),
|
112
|
+
promptFormat,
|
113
|
+
});
|
114
|
+
}
|
115
|
+
withSettings(additionalSettings) {
|
116
|
+
return new AnthropicTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
|
117
|
+
}
|
118
|
+
}
|
119
|
+
const anthropicTextGenerationResponseSchema = z.object({
|
120
|
+
completion: z.string(),
|
121
|
+
stop_reason: z.string(),
|
122
|
+
model: z.string(),
|
123
|
+
});
|
124
|
+
async function callAnthropicTextGenerationAPI({ api = new AnthropicApiConfiguration(), abortSignal, responseFormat, model, prompt, maxTokens, stopSequences, temperature, topK, topP, userId, }) {
|
125
|
+
return postJsonToApi({
|
126
|
+
url: api.assembleUrl(`/complete`),
|
127
|
+
headers: api.headers,
|
128
|
+
body: {
|
129
|
+
model,
|
130
|
+
prompt,
|
131
|
+
stream: responseFormat.stream,
|
132
|
+
max_tokens_to_sample: maxTokens,
|
133
|
+
temperature,
|
134
|
+
top_k: topK,
|
135
|
+
top_p: topP,
|
136
|
+
stop_sequences: stopSequences,
|
137
|
+
metadata: userId != null ? { user_id: userId } : undefined,
|
138
|
+
},
|
139
|
+
failedResponseHandler: failedAnthropicCallResponseHandler,
|
140
|
+
successfulResponseHandler: responseFormat.handler,
|
141
|
+
abortSignal,
|
142
|
+
});
|
143
|
+
}
|
144
|
+
const anthropicTextStreamingResponseSchema = z.object({
|
145
|
+
completion: z.string(),
|
146
|
+
stop_reason: z.string().nullable(),
|
147
|
+
model: z.string(),
|
148
|
+
});
|
149
|
+
async function createAnthropicFullDeltaIterableQueue(stream) {
|
150
|
+
const queue = new AsyncQueue();
|
151
|
+
let content = "";
|
152
|
+
// process the stream asynchonously (no 'await' on purpose):
|
153
|
+
parseEventSourceStream({ stream })
|
154
|
+
.then(async (events) => {
|
155
|
+
try {
|
156
|
+
for await (const event of events) {
|
157
|
+
if (event.event === "error") {
|
158
|
+
queue.push({ type: "error", error: event.data });
|
159
|
+
queue.close();
|
160
|
+
return;
|
161
|
+
}
|
162
|
+
if (event.event !== "completion") {
|
163
|
+
continue;
|
164
|
+
}
|
165
|
+
const data = event.data;
|
166
|
+
const json = SecureJSON.parse(data);
|
167
|
+
const parseResult = anthropicTextStreamingResponseSchema.safeParse(json);
|
168
|
+
if (!parseResult.success) {
|
169
|
+
queue.push({
|
170
|
+
type: "error",
|
171
|
+
error: parseResult.error,
|
172
|
+
});
|
173
|
+
queue.close();
|
174
|
+
return;
|
175
|
+
}
|
176
|
+
const eventData = parseResult.data;
|
177
|
+
content += eventData.completion;
|
178
|
+
queue.push({
|
179
|
+
type: "delta",
|
180
|
+
fullDelta: {
|
181
|
+
content,
|
182
|
+
isComplete: eventData.stop_reason != null,
|
183
|
+
delta: eventData.completion,
|
184
|
+
},
|
185
|
+
valueDelta: eventData.completion,
|
186
|
+
});
|
187
|
+
if (eventData.stop_reason != null) {
|
188
|
+
queue.close();
|
189
|
+
}
|
190
|
+
}
|
191
|
+
}
|
192
|
+
catch (error) {
|
193
|
+
queue.push({ type: "error", error });
|
194
|
+
queue.close();
|
195
|
+
}
|
196
|
+
})
|
197
|
+
.catch((error) => {
|
198
|
+
queue.push({ type: "error", error });
|
199
|
+
queue.close();
|
200
|
+
});
|
201
|
+
return queue;
|
202
|
+
}
|
203
|
+
export const AnthropicTextGenerationResponseFormat = {
|
204
|
+
/**
|
205
|
+
* Returns the response as a JSON object.
|
206
|
+
*/
|
207
|
+
json: {
|
208
|
+
stream: false,
|
209
|
+
handler: createJsonResponseHandler(anthropicTextGenerationResponseSchema),
|
210
|
+
},
|
211
|
+
/**
|
212
|
+
* Returns an async iterable over the full deltas (all choices, including full current state at time of event)
|
213
|
+
* of the response stream.
|
214
|
+
*/
|
215
|
+
deltaIterable: {
|
216
|
+
stream: true,
|
217
|
+
handler: async ({ response }) => createAnthropicFullDeltaIterableQueue(response.body),
|
218
|
+
},
|
219
|
+
};
|
@@ -0,0 +1,23 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
3
|
+
if (k2 === undefined) k2 = k;
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
7
|
+
}
|
8
|
+
Object.defineProperty(o, k2, desc);
|
9
|
+
}) : (function(o, m, k, k2) {
|
10
|
+
if (k2 === undefined) k2 = k;
|
11
|
+
o[k2] = m[k];
|
12
|
+
}));
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
|
+
};
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
+
exports.anthropicErrorDataSchema = exports.AnthropicError = void 0;
|
18
|
+
__exportStar(require("./AnthropicApiConfiguration.cjs"), exports);
|
19
|
+
var AnthropicError_js_1 = require("./AnthropicError.cjs");
|
20
|
+
Object.defineProperty(exports, "AnthropicError", { enumerable: true, get: function () { return AnthropicError_js_1.AnthropicError; } });
|
21
|
+
Object.defineProperty(exports, "anthropicErrorDataSchema", { enumerable: true, get: function () { return AnthropicError_js_1.anthropicErrorDataSchema; } });
|
22
|
+
__exportStar(require("./AnthropicPromptFormat.cjs"), exports);
|
23
|
+
__exportStar(require("./AnthropicTextGenerationModel.cjs"), exports);
|
@@ -44,9 +44,9 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
|
|
44
44
|
get modelName(): string;
|
45
45
|
readonly contextWindowSize: undefined;
|
46
46
|
readonly tokenizer: undefined;
|
47
|
+
readonly countPromptTokens: undefined;
|
47
48
|
callAPI(prompt: string, options?: FunctionOptions): Promise<HuggingFaceTextGenerationResponse>;
|
48
49
|
get settingsForEvent(): Partial<HuggingFaceTextGenerationModelSettings>;
|
49
|
-
readonly countPromptTokens: undefined;
|
50
50
|
doGenerateText(prompt: string, options?: FunctionOptions): Promise<{
|
51
51
|
response: {
|
52
52
|
generated_text: string;
|
package/model-provider/index.cjs
CHANGED
@@ -14,6 +14,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
15
|
};
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
+
__exportStar(require("./anthropic/index.cjs"), exports);
|
17
18
|
__exportStar(require("./automatic1111/index.cjs"), exports);
|
18
19
|
__exportStar(require("./cohere/index.cjs"), exports);
|
19
20
|
__exportStar(require("./elevenlabs/index.cjs"), exports);
|
package/model-provider/index.js
CHANGED
@@ -102,9 +102,6 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
102
102
|
responseFormat: exports.LlamaCppTextGenerationResponseFormat.deltaIterable,
|
103
103
|
});
|
104
104
|
}
|
105
|
-
extractTextDelta(fullDelta) {
|
106
|
-
return fullDelta.delta;
|
107
|
-
}
|
108
105
|
withPromptFormat(promptFormat) {
|
109
106
|
return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
|
110
107
|
model: this.withSettings({
|
@@ -100,7 +100,6 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
|
|
100
100
|
};
|
101
101
|
}>;
|
102
102
|
doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
103
|
-
extractTextDelta(fullDelta: LlamaCppTextGenerationDelta): string | undefined;
|
104
103
|
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
105
104
|
withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
|
106
105
|
}
|
@@ -96,9 +96,6 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
96
96
|
responseFormat: LlamaCppTextGenerationResponseFormat.deltaIterable,
|
97
97
|
});
|
98
98
|
}
|
99
|
-
extractTextDelta(fullDelta) {
|
100
|
-
return fullDelta.delta;
|
101
|
-
}
|
102
99
|
withPromptFormat(promptFormat) {
|
103
100
|
return new PromptFormatTextStreamingModel({
|
104
101
|
model: this.withSettings({
|
package/package.json
CHANGED
@@ -41,6 +41,9 @@ class PromptFormatTextGenerationModel {
|
|
41
41
|
const mappedPrompt = this.promptFormat.format(prompt);
|
42
42
|
return this.model.doGenerateText(mappedPrompt, options);
|
43
43
|
}
|
44
|
+
get settingsForEvent() {
|
45
|
+
return this.model.settingsForEvent;
|
46
|
+
}
|
44
47
|
withPromptFormat(promptFormat) {
|
45
48
|
return new PromptFormatTextGenerationModel({
|
46
49
|
model: this.withSettings({
|
@@ -52,9 +55,6 @@ class PromptFormatTextGenerationModel {
|
|
52
55
|
promptFormat,
|
53
56
|
});
|
54
57
|
}
|
55
|
-
get settingsForEvent() {
|
56
|
-
return this.model.settingsForEvent;
|
57
|
-
}
|
58
58
|
withSettings(additionalSettings) {
|
59
59
|
return new PromptFormatTextGenerationModel({
|
60
60
|
model: this.model.withSettings(additionalSettings),
|
@@ -22,7 +22,7 @@ export declare class PromptFormatTextGenerationModel<PROMPT, MODEL_PROMPT, SETTI
|
|
22
22
|
totalTokens: number;
|
23
23
|
} | undefined;
|
24
24
|
}>;
|
25
|
-
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, PROMPT>): PromptFormatTextGenerationModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
|
26
25
|
get settingsForEvent(): Partial<SETTINGS>;
|
26
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, PROMPT>): PromptFormatTextGenerationModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
|
27
27
|
withSettings(additionalSettings: Partial<SETTINGS>): this;
|
28
28
|
}
|
@@ -38,6 +38,9 @@ export class PromptFormatTextGenerationModel {
|
|
38
38
|
const mappedPrompt = this.promptFormat.format(prompt);
|
39
39
|
return this.model.doGenerateText(mappedPrompt, options);
|
40
40
|
}
|
41
|
+
get settingsForEvent() {
|
42
|
+
return this.model.settingsForEvent;
|
43
|
+
}
|
41
44
|
withPromptFormat(promptFormat) {
|
42
45
|
return new PromptFormatTextGenerationModel({
|
43
46
|
model: this.withSettings({
|
@@ -49,9 +52,6 @@ export class PromptFormatTextGenerationModel {
|
|
49
52
|
promptFormat,
|
50
53
|
});
|
51
54
|
}
|
52
|
-
get settingsForEvent() {
|
53
|
-
return this.model.settingsForEvent;
|
54
|
-
}
|
55
55
|
withSettings(additionalSettings) {
|
56
56
|
return new PromptFormatTextGenerationModel({
|
57
57
|
model: this.model.withSettings(additionalSettings),
|
@@ -0,0 +1,29 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.getAudioFileExtension = void 0;
|
4
|
+
function getAudioFileExtension(mimeType) {
|
5
|
+
const normalizedMimeType = mimeType.split(";")[0].toLowerCase();
|
6
|
+
switch (normalizedMimeType) {
|
7
|
+
case "audio/webm":
|
8
|
+
return "webm";
|
9
|
+
case "audio/mp3":
|
10
|
+
return "mp3";
|
11
|
+
case "audio/wav":
|
12
|
+
return "wav";
|
13
|
+
case "audio/mp4":
|
14
|
+
return "mp4";
|
15
|
+
case "audio/mpeg":
|
16
|
+
case "audio/mpga":
|
17
|
+
return "mpeg";
|
18
|
+
case "audio/ogg":
|
19
|
+
case "audio/oga":
|
20
|
+
return "ogg";
|
21
|
+
case "audio/flac":
|
22
|
+
return "flac";
|
23
|
+
case "audio/m4a":
|
24
|
+
return "m4a";
|
25
|
+
default:
|
26
|
+
throw new Error(`Unsupported audio format: ${mimeType}`);
|
27
|
+
}
|
28
|
+
}
|
29
|
+
exports.getAudioFileExtension = getAudioFileExtension;
|
@@ -0,0 +1 @@
|
|
1
|
+
export declare function getAudioFileExtension(mimeType: string): "mp3" | "flac" | "m4a" | "mp4" | "mpeg" | "ogg" | "wav" | "webm";
|
@@ -0,0 +1,25 @@
|
|
1
|
+
export function getAudioFileExtension(mimeType) {
|
2
|
+
const normalizedMimeType = mimeType.split(";")[0].toLowerCase();
|
3
|
+
switch (normalizedMimeType) {
|
4
|
+
case "audio/webm":
|
5
|
+
return "webm";
|
6
|
+
case "audio/mp3":
|
7
|
+
return "mp3";
|
8
|
+
case "audio/wav":
|
9
|
+
return "wav";
|
10
|
+
case "audio/mp4":
|
11
|
+
return "mp4";
|
12
|
+
case "audio/mpeg":
|
13
|
+
case "audio/mpga":
|
14
|
+
return "mpeg";
|
15
|
+
case "audio/ogg":
|
16
|
+
case "audio/oga":
|
17
|
+
return "ogg";
|
18
|
+
case "audio/flac":
|
19
|
+
return "flac";
|
20
|
+
case "audio/m4a":
|
21
|
+
return "m4a";
|
22
|
+
default:
|
23
|
+
throw new Error(`Unsupported audio format: ${mimeType}`);
|
24
|
+
}
|
25
|
+
}
|
package/util/index.cjs
CHANGED
@@ -15,3 +15,4 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
15
15
|
};
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
17
|
__exportStar(require("./cosineSimilarity.cjs"), exports);
|
18
|
+
__exportStar(require("./getAudioFileExtension.cjs"), exports);
|
package/util/index.d.ts
CHANGED
package/util/index.js
CHANGED