@budibase/pro 3.28.1 → 3.28.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import AnthropicClient from "@anthropic-ai/sdk";
|
|
2
|
-
import { LLMConfigOptions
|
|
2
|
+
import { LLMConfigOptions } from "@budibase/types";
|
|
3
3
|
import { LLMFullResponse } from "../../types/ai";
|
|
4
4
|
import { LLMRequest } from "../llm";
|
|
5
5
|
import { LLM } from "./base";
|
|
@@ -10,5 +10,4 @@ export declare class Anthropic extends LLM {
|
|
|
10
10
|
firstTextBlock(message: AnthropicClient.Messages.Message): string | undefined;
|
|
11
11
|
uploadFile(_data?: any, _filename?: string, _contentType?: string): Promise<string>;
|
|
12
12
|
protected chatCompletion(request: LLMRequest): Promise<LLMFullResponse>;
|
|
13
|
-
protected chatCompletionStream(request: LLMRequest): AsyncGenerator<LLMStreamChunk, void, unknown>;
|
|
14
13
|
}
|
package/dist/ai/models/base.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { AIFieldMetadata, EnrichedBinding, LLMConfigOptions,
|
|
1
|
+
import { AIFieldMetadata, EnrichedBinding, LLMConfigOptions, Row, Snippet } from "@budibase/types";
|
|
2
2
|
import { Readable } from "node:stream";
|
|
3
3
|
import { LLMFullResponse, LLMPromptResponse } from "../../types/ai";
|
|
4
4
|
import { LLMRequest } from "../llm";
|
|
@@ -11,11 +11,9 @@ export declare abstract class LLM {
|
|
|
11
11
|
get apiKey(): string | undefined;
|
|
12
12
|
get maxTokens(): number;
|
|
13
13
|
protected abstract chatCompletion(request: LLMRequest): Promise<LLMFullResponse>;
|
|
14
|
-
protected abstract chatCompletionStream(request: LLMRequest): AsyncGenerator<LLMStreamChunk, void, unknown>;
|
|
15
14
|
prompt(requestOrString: string | LLMRequest): Promise<LLMPromptResponse>;
|
|
16
15
|
abstract uploadFile(data: Readable | Buffer, filename: string, contentType?: string): Promise<string>;
|
|
17
16
|
chat(request: LLMRequest): Promise<LLMFullResponse>;
|
|
18
|
-
chatStream(request: LLMRequest): AsyncGenerator<LLMStreamChunk, void, unknown>;
|
|
19
17
|
summarizeText(prompt: string): Promise<LLMPromptResponse>;
|
|
20
18
|
generateCronExpression(prompt: string): Promise<LLMPromptResponse>;
|
|
21
19
|
operation(schema: AIFieldMetadata, row: Row): Promise<LLMPromptResponse>;
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import { LLMStreamChunk } from "@budibase/types";
|
|
2
1
|
import { Readable } from "node:stream";
|
|
3
2
|
import { LLMFullResponse, LLMPromptResponse } from "../../types/ai";
|
|
4
3
|
import { LLMRequest } from "../llm";
|
|
@@ -12,7 +11,4 @@ export declare class BudibaseAI extends LLM {
|
|
|
12
11
|
protected chatCompletion(prompt: LLMRequest): Promise<LLMFullResponse>;
|
|
13
12
|
protected chatCompletionCloud(prompt: LLMRequest): Promise<LLMFullResponse>;
|
|
14
13
|
protected chatCompletionSelfHost(prompt: LLMRequest): Promise<LLMFullResponse>;
|
|
15
|
-
protected chatCompletionStream(request: LLMRequest): AsyncGenerator<LLMStreamChunk, void, unknown>;
|
|
16
|
-
protected chatCompletionStreamCloud(request: LLMRequest): AsyncGenerator<LLMStreamChunk, void, unknown>;
|
|
17
|
-
protected chatCompletionStreamSelfHost(request: LLMRequest): AsyncGenerator<LLMStreamChunk, void, unknown>;
|
|
18
14
|
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { LLMConfigOptions,
|
|
1
|
+
import { LLMConfigOptions, ResponseFormat } from "@budibase/types";
|
|
2
2
|
import { Readable } from "node:stream";
|
|
3
3
|
import { default as openai, default as OpenAIClient } from "openai";
|
|
4
4
|
import { LLMFullResponse } from "../../types/ai";
|
|
@@ -18,5 +18,4 @@ export declare class OpenAI extends LLM {
|
|
|
18
18
|
protected getClient(opts: LLMConfigOptions): OpenAIClient;
|
|
19
19
|
uploadFile(data: Readable | Buffer, filename: string, contentType?: string): Promise<string>;
|
|
20
20
|
protected chatCompletion(request: LLMRequest): Promise<LLMFullResponse>;
|
|
21
|
-
protected chatCompletionStream(request: LLMRequest): AsyncGenerator<LLMStreamChunk, void, unknown>;
|
|
22
21
|
}
|