@providerprotocol/ai 0.0.20 → 0.0.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.d.ts +184 -14
- package/dist/anthropic/index.js +210 -82
- package/dist/anthropic/index.js.map +1 -1
- package/dist/{chunk-U3FZWV4U.js → chunk-EDENPF3E.js} +5 -2
- package/dist/{chunk-U3FZWV4U.js.map → chunk-EDENPF3E.js.map} +1 -1
- package/dist/{chunk-UMKWXGO3.js → chunk-M4BMM5IB.js} +86 -2
- package/dist/chunk-M4BMM5IB.js.map +1 -0
- package/dist/{chunk-P5IRTEM5.js → chunk-Y3GBJNA2.js} +2 -2
- package/dist/{chunk-U4JJC2YX.js → chunk-Z4ILICF5.js} +2 -2
- package/dist/chunk-Z4ILICF5.js.map +1 -0
- package/dist/google/index.d.ts +16 -19
- package/dist/google/index.js +14 -36
- package/dist/google/index.js.map +1 -1
- package/dist/http/index.d.ts +2 -2
- package/dist/http/index.js +3 -3
- package/dist/index.d.ts +101 -38
- package/dist/index.js +69 -43
- package/dist/index.js.map +1 -1
- package/dist/ollama/index.d.ts +14 -16
- package/dist/ollama/index.js +5 -7
- package/dist/ollama/index.js.map +1 -1
- package/dist/openai/index.d.ts +25 -133
- package/dist/openai/index.js +27 -81
- package/dist/openai/index.js.map +1 -1
- package/dist/openrouter/index.d.ts +28 -53
- package/dist/openrouter/index.js +20 -43
- package/dist/openrouter/index.js.map +1 -1
- package/dist/provider-DGQHYE6I.d.ts +1319 -0
- package/dist/proxy/index.d.ts +2 -3
- package/dist/proxy/index.js +5 -7
- package/dist/proxy/index.js.map +1 -1
- package/dist/{retry-DR7YRJDz.d.ts → retry-Pcs3hnbu.d.ts} +2 -2
- package/dist/{stream-DRHy6q1a.d.ts → stream-Di9acos2.d.ts} +1 -1
- package/dist/xai/index.d.ts +16 -88
- package/dist/xai/index.js +30 -58
- package/dist/xai/index.js.map +1 -1
- package/package.json +4 -1
- package/dist/chunk-MSR5P65T.js +0 -39
- package/dist/chunk-MSR5P65T.js.map +0 -1
- package/dist/chunk-U4JJC2YX.js.map +0 -1
- package/dist/chunk-UMKWXGO3.js.map +0 -1
- package/dist/content-DEl3z_W2.d.ts +0 -276
- package/dist/image-Dhq-Yuq4.d.ts +0 -456
- package/dist/provider-BBMBZuGn.d.ts +0 -570
- /package/dist/{chunk-P5IRTEM5.js.map → chunk-Y3GBJNA2.js.map} +0 -0
package/dist/index.d.ts
CHANGED
|
@@ -1,12 +1,8 @@
|
|
|
1
|
-
import { M as Message, T as Turn, a as MessageType, b as MessageJSON, c as Tool, d as ToolUseStrategy, J as JSONSchema, S as StreamResult, A as AssistantMessage, e as TokenUsage, f as StreamEvent } from './stream-
|
|
2
|
-
export { l as AfterCallResult, B as BeforeCallResult, E as EventDelta, g as JSONSchemaProperty, h as JSONSchemaPropertyType, r as MessageMetadata, s as MessageOptions, w as StreamEventType, i as ToolCall, m as ToolExecution, k as ToolMetadata, j as ToolResult, n as ToolResultMessage, U as UserMessage, v as aggregateUsage, F as contentBlockStart, G as contentBlockStop, x as createStreamResult, t as createTurn, u as emptyUsage, p as isAssistantMessage, q as isToolResultMessage, o as isUserMessage, C as messageStart, D as messageStop, y as textDelta, z as toolCallDelta } from './stream-
|
|
3
|
-
import { U as UserContent, A as AssistantContent, C as ContentBlock } from './
|
|
4
|
-
export {
|
|
5
|
-
|
|
6
|
-
export { g as EmbeddingProvider, i as EmbeddingRequest, j as EmbeddingResponse, k as EmbeddingVector, e as ErrorCode, h as ImageProvider, K as KeyStrategy, M as Modality, f as ModelReference, R as RetryStrategy, U as UPPError } from './provider-BBMBZuGn.js';
|
|
7
|
-
import { I as ImageOptions, a as ImageInstance } from './image-Dhq-Yuq4.js';
|
|
8
|
-
export { B as BoundImageModel, G as GeneratedImage, b as Image, i as ImageCapabilities, d as ImageEditInput, k as ImageEditRequest, n as ImageHandler, c as ImageInput, o as ImageModelInput, m as ImageProviderStreamResult, j as ImageRequest, l as ImageResponse, f as ImageResult, g as ImageStreamEvent, h as ImageStreamResult, e as ImageUsage } from './image-Dhq-Yuq4.js';
|
|
9
|
-
export { D as DynamicKey, E as ExponentialBackoff, L as LinearBackoff, N as NoRetry, a as RetryAfterStrategy, R as RoundRobinKeys, T as TokenBucket, W as WeightedKeys } from './retry-DR7YRJDz.js';
|
|
1
|
+
import { M as Message, T as Turn, a as MessageType, b as MessageJSON, c as Tool, d as ToolUseStrategy, J as JSONSchema, S as StreamResult, A as AssistantMessage, e as TokenUsage, f as StreamEvent } from './stream-Di9acos2.js';
|
|
2
|
+
export { l as AfterCallResult, B as BeforeCallResult, E as EventDelta, g as JSONSchemaProperty, h as JSONSchemaPropertyType, r as MessageMetadata, s as MessageOptions, w as StreamEventType, i as ToolCall, m as ToolExecution, k as ToolMetadata, j as ToolResult, n as ToolResultMessage, U as UserMessage, v as aggregateUsage, F as contentBlockStart, G as contentBlockStop, x as createStreamResult, t as createTurn, u as emptyUsage, p as isAssistantMessage, q as isToolResultMessage, o as isUserMessage, C as messageStart, D as messageStop, y as textDelta, z as toolCallDelta } from './stream-Di9acos2.js';
|
|
3
|
+
import { U as UserContent, A as AssistantContent, P as ProviderIdentity, a as ProviderConfig, C as ContentBlock, L as LLMProvider, E as EmbeddingInput, b as EmbeddingUsage, B as BoundEmbeddingModel, I as ImageOptions, c as ImageInstance, d as LLMHandler$1, e as EmbeddingHandler, f as ImageHandler, g as Provider, M as ModelReference } from './provider-DGQHYE6I.js';
|
|
4
|
+
export { m as AudioBlock, n as BinaryBlock, _ as BoundImageModel, v as EmbeddingProvider, x as EmbeddingRequest, y as EmbeddingResponse, z as EmbeddingVector, j as ErrorCode, H as GeneratedImage, h as Image, l as ImageBlock, S as ImageCapabilities, F as ImageEditInput, X as ImageEditRequest, G as ImageGenerateOptions, $ as ImageHandler, D as ImageInput, a0 as ImageModelInput, w as ImageProvider, Z as ImageProviderStreamResult, W as ImageRequest, Y as ImageResponse, N as ImageResult, o as ImageSource, O as ImageStreamEvent, Q as ImageStreamResult, J as ImageUsage, K as KeyStrategy, k as Modality, R as RetryStrategy, T as TextBlock, i as UPPError, V as VideoBlock, r as isAudioBlock, u as isBinaryBlock, q as isImageBlock, p as isTextBlock, s as isVideoBlock, t as text } from './provider-DGQHYE6I.js';
|
|
5
|
+
export { D as DynamicKey, E as ExponentialBackoff, L as LinearBackoff, N as NoRetry, a as RetryAfterStrategy, R as RoundRobinKeys, T as TokenBucket, W as WeightedKeys } from './retry-Pcs3hnbu.js';
|
|
10
6
|
|
|
11
7
|
/**
|
|
12
8
|
* @fileoverview Thread class for managing conversation history.
|
|
@@ -228,16 +224,26 @@ declare class Thread {
|
|
|
228
224
|
* Structural type for model input that accepts any ModelReference.
|
|
229
225
|
* Uses structural typing to avoid generic variance issues with Provider generics.
|
|
230
226
|
* The nested types use `unknown` to accept any provider parameter types.
|
|
227
|
+
*
|
|
228
|
+
* @remarks
|
|
229
|
+
* This type mirrors {@link ModelReference} while keeping provider options
|
|
230
|
+
* structurally compatible across providers.
|
|
231
|
+
*
|
|
232
|
+
* @see ModelReference
|
|
231
233
|
*/
|
|
232
234
|
type ModelInput = {
|
|
233
235
|
readonly modelId: string;
|
|
234
|
-
readonly provider:
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
236
|
+
readonly provider: ProviderIdentity;
|
|
237
|
+
/**
|
|
238
|
+
* Optional provider-specific configuration that gets merged into request config.
|
|
239
|
+
* Set when creating a model reference with provider-specific options.
|
|
240
|
+
*/
|
|
241
|
+
readonly providerConfig?: Partial<ProviderConfig>;
|
|
242
|
+
/**
|
|
243
|
+
* The original options passed when creating this model reference.
|
|
244
|
+
* Used by providers with multiple LLM handlers to resolve the correct handler.
|
|
245
|
+
*/
|
|
246
|
+
readonly options?: unknown;
|
|
241
247
|
};
|
|
242
248
|
/**
|
|
243
249
|
* LLM capabilities declare what a provider's API supports.
|
|
@@ -273,7 +279,7 @@ interface LLMCapabilities {
|
|
|
273
279
|
videoInput: boolean;
|
|
274
280
|
/** Provider API supports audio input in messages */
|
|
275
281
|
audioInput: boolean;
|
|
276
|
-
/** Provider API supports image generation output (via
|
|
282
|
+
/** Provider API supports image generation output (via image() or built-in tools) */
|
|
277
283
|
imageOutput?: boolean;
|
|
278
284
|
}
|
|
279
285
|
/**
|
|
@@ -554,16 +560,16 @@ declare function llm<TParams = unknown>(options: LLMOptions<TParams>): LLMInstan
|
|
|
554
560
|
/**
|
|
555
561
|
* Structural type for embedding model input.
|
|
556
562
|
* Uses structural typing to avoid generic variance issues with Provider generics.
|
|
563
|
+
*
|
|
564
|
+
* @remarks
|
|
565
|
+
* This type mirrors {@link ModelReference} while keeping provider options
|
|
566
|
+
* structurally compatible across providers.
|
|
567
|
+
*
|
|
568
|
+
* @see ModelReference
|
|
557
569
|
*/
|
|
558
570
|
interface EmbeddingModelInput {
|
|
559
571
|
readonly modelId: string;
|
|
560
|
-
readonly provider:
|
|
561
|
-
readonly name: string;
|
|
562
|
-
readonly version: string;
|
|
563
|
-
readonly modalities: {
|
|
564
|
-
embedding?: unknown;
|
|
565
|
-
};
|
|
566
|
-
};
|
|
572
|
+
readonly provider: ProviderIdentity;
|
|
567
573
|
}
|
|
568
574
|
/**
|
|
569
575
|
* Options for creating an embedding instance with the embedding() function.
|
|
@@ -778,12 +784,36 @@ declare function embedding<TParams = unknown>(options: EmbeddingOptions<TParams>
|
|
|
778
784
|
*/
|
|
779
785
|
declare function image<TParams = unknown>(options: ImageOptions<TParams>): ImageInstance<TParams>;
|
|
780
786
|
|
|
787
|
+
/**
|
|
788
|
+
* @fileoverview Internal handler registry and resolver utilities.
|
|
789
|
+
*
|
|
790
|
+
* @module core/provider-handlers
|
|
791
|
+
*/
|
|
792
|
+
|
|
793
|
+
/**
|
|
794
|
+
* Resolver for dynamically selecting LLM handlers based on model options.
|
|
795
|
+
*
|
|
796
|
+
* Used by providers that support multiple API modes (e.g., OpenAI with responses/completions).
|
|
797
|
+
* The resolver eliminates shared mutable state by storing the mode on the ModelReference
|
|
798
|
+
* and resolving the correct handler at request time.
|
|
799
|
+
*
|
|
800
|
+
* @typeParam TOptions - Provider-specific options type
|
|
801
|
+
*/
|
|
802
|
+
interface LLMHandlerResolver<TOptions = unknown> {
|
|
803
|
+
/** Map of mode identifiers to their corresponding LLM handlers */
|
|
804
|
+
handlers: Record<string, LLMHandler$1>;
|
|
805
|
+
/** The default mode when options don't specify one */
|
|
806
|
+
defaultMode: string;
|
|
807
|
+
/** Function to extract the mode from provider options */
|
|
808
|
+
getMode: (options: TOptions | undefined) => string;
|
|
809
|
+
}
|
|
810
|
+
|
|
781
811
|
/**
|
|
782
812
|
* @fileoverview Base provider interface and factory for the Universal Provider Protocol.
|
|
783
813
|
*
|
|
784
814
|
* This module provides the foundation for creating AI providers that conform to the
|
|
785
815
|
* UPP specification. Providers are callable functions that create model references
|
|
786
|
-
* and
|
|
816
|
+
* and register internal handlers for LLM, embedding, and image modalities.
|
|
787
817
|
*
|
|
788
818
|
* @module core/provider
|
|
789
819
|
*/
|
|
@@ -791,43 +821,63 @@ declare function image<TParams = unknown>(options: ImageOptions<TParams>): Image
|
|
|
791
821
|
/**
|
|
792
822
|
* Configuration options for creating a new provider.
|
|
793
823
|
*
|
|
824
|
+
* @typeParam TOptions - Provider-specific options type
|
|
825
|
+
*
|
|
794
826
|
* @example
|
|
795
827
|
* ```typescript
|
|
828
|
+
* // Simple provider with single handler
|
|
796
829
|
* const options: CreateProviderOptions = {
|
|
797
830
|
* name: 'my-provider',
|
|
798
831
|
* version: '1.0.0',
|
|
799
|
-
*
|
|
832
|
+
* handlers: {
|
|
800
833
|
* llm: createLLMHandler(),
|
|
801
834
|
* embedding: createEmbeddingHandler(),
|
|
802
835
|
* },
|
|
803
836
|
* };
|
|
837
|
+
*
|
|
838
|
+
* // Provider with multiple LLM handlers (API modes)
|
|
839
|
+
* const options: CreateProviderOptions<OpenAIOptions> = {
|
|
840
|
+
* name: 'openai',
|
|
841
|
+
* version: '1.0.0',
|
|
842
|
+
* handlers: {
|
|
843
|
+
* llm: {
|
|
844
|
+
* handlers: { responses: handler1, completions: handler2 },
|
|
845
|
+
* defaultMode: 'responses',
|
|
846
|
+
* getMode: (opts) => opts?.api ?? 'responses',
|
|
847
|
+
* },
|
|
848
|
+
* },
|
|
849
|
+
* };
|
|
804
850
|
* ```
|
|
805
851
|
*/
|
|
806
|
-
interface CreateProviderOptions {
|
|
852
|
+
interface CreateProviderOptions<TOptions = unknown> {
|
|
807
853
|
/** Unique identifier for the provider */
|
|
808
854
|
name: string;
|
|
809
855
|
/** Semantic version string for the provider implementation */
|
|
810
856
|
version: string;
|
|
811
857
|
/** Handlers for supported modalities (LLM, embedding, image generation) */
|
|
812
|
-
|
|
813
|
-
/** Handler for language model completions */
|
|
814
|
-
llm?: LLMHandler$1
|
|
858
|
+
handlers: {
|
|
859
|
+
/** Handler for language model completions, or resolver for multi-handler providers */
|
|
860
|
+
llm?: LLMHandler$1 | LLMHandlerResolver<TOptions>;
|
|
815
861
|
/** Handler for text embeddings */
|
|
816
862
|
embedding?: EmbeddingHandler;
|
|
817
863
|
/** Handler for image generation */
|
|
818
864
|
image?: ImageHandler;
|
|
819
865
|
};
|
|
866
|
+
/**
|
|
867
|
+
* Custom function to create model references from options.
|
|
868
|
+
* Use this to map provider options to providerConfig (e.g., betas to headers).
|
|
869
|
+
*/
|
|
870
|
+
createModelReference?: (modelId: string, options: TOptions | undefined, provider: Provider<TOptions>) => ModelReference<TOptions>;
|
|
820
871
|
}
|
|
821
872
|
/**
|
|
822
|
-
* Creates a provider factory function with
|
|
873
|
+
* Creates a provider factory function with registered modality handlers.
|
|
823
874
|
*
|
|
824
875
|
* The returned provider is a callable function that creates model references
|
|
825
|
-
* when invoked with a model ID. It
|
|
826
|
-
* `modalities` properties for introspection.
|
|
876
|
+
* when invoked with a model ID. It exposes `name` and `version` metadata.
|
|
827
877
|
*
|
|
828
878
|
* @typeParam TOptions - Provider-specific options type (defaults to unknown)
|
|
829
879
|
* @param options - Provider configuration including name, version, and handlers
|
|
830
|
-
* @returns A callable Provider with
|
|
880
|
+
* @returns A callable Provider with handlers registered internally
|
|
831
881
|
*
|
|
832
882
|
* @example
|
|
833
883
|
* ```typescript
|
|
@@ -835,7 +885,7 @@ interface CreateProviderOptions {
|
|
|
835
885
|
* const anthropic = createProvider({
|
|
836
886
|
* name: 'anthropic',
|
|
837
887
|
* version: '1.0.0',
|
|
838
|
-
*
|
|
888
|
+
* handlers: { llm: createLLMHandler() },
|
|
839
889
|
* });
|
|
840
890
|
*
|
|
841
891
|
* // Use the provider to create a model reference
|
|
@@ -846,11 +896,24 @@ interface CreateProviderOptions {
|
|
|
846
896
|
* const myProvider = createProvider<MyOptions>({
|
|
847
897
|
* name: 'my-provider',
|
|
848
898
|
* version: '1.0.0',
|
|
849
|
-
*
|
|
899
|
+
* handlers: { llm: handler },
|
|
900
|
+
* });
|
|
901
|
+
*
|
|
902
|
+
* // Provider with multiple LLM handlers (API modes)
|
|
903
|
+
* const openai = createProvider<OpenAIOptions>({
|
|
904
|
+
* name: 'openai',
|
|
905
|
+
* version: '1.0.0',
|
|
906
|
+
* handlers: {
|
|
907
|
+
* llm: {
|
|
908
|
+
* handlers: { responses: responsesHandler, completions: completionsHandler },
|
|
909
|
+
* defaultMode: 'responses',
|
|
910
|
+
* getMode: (opts) => opts?.api ?? 'responses',
|
|
911
|
+
* },
|
|
912
|
+
* },
|
|
850
913
|
* });
|
|
851
914
|
* ```
|
|
852
915
|
*/
|
|
853
|
-
declare function createProvider<TOptions = unknown>(options: CreateProviderOptions): Provider<TOptions>;
|
|
916
|
+
declare function createProvider<TOptions = unknown>(options: CreateProviderOptions<TOptions>): Provider<TOptions>;
|
|
854
917
|
|
|
855
918
|
/**
|
|
856
919
|
* @fileoverview Unified Provider Protocol (UPP) - A unified interface for AI model inference
|
|
@@ -910,4 +973,4 @@ declare const ai: {
|
|
|
910
973
|
image: typeof image;
|
|
911
974
|
};
|
|
912
975
|
|
|
913
|
-
export { AssistantContent, AssistantMessage, BoundEmbeddingModel, type BoundLLMModel, ContentBlock, type EmbedOptions, type Embedding, EmbeddingHandler, EmbeddingInput, type EmbeddingInstance, type EmbeddingModelInput, type EmbeddingOptions, type EmbeddingProgress, type EmbeddingResult, type EmbeddingStream, EmbeddingUsage, ImageInstance, ImageOptions, type InferenceInput, JSONSchema, type LLMCapabilities, type LLMHandler, type LLMInstance, type LLMOptions, LLMProvider, type LLMRequest, type LLMResponse, type LLMStreamResult, Message, MessageJSON, MessageType, Provider, ProviderConfig, StreamEvent, StreamResult, Thread, type ThreadJSON, TokenUsage, Tool, ToolUseStrategy, Turn, UserContent, ai, createProvider, embedding, image, llm };
|
|
976
|
+
export { AssistantContent, AssistantMessage, BoundEmbeddingModel, type BoundLLMModel, ContentBlock, type EmbedOptions, type Embedding, EmbeddingHandler, EmbeddingInput, type EmbeddingInstance, type EmbeddingModelInput, type EmbeddingOptions, type EmbeddingProgress, type EmbeddingResult, type EmbeddingStream, EmbeddingUsage, ImageInstance, ImageOptions, type InferenceInput, JSONSchema, type LLMCapabilities, type LLMHandler, type LLMInstance, type LLMOptions, LLMProvider, type LLMRequest, type LLMResponse, type LLMStreamResult, Message, MessageJSON, MessageType, ModelReference, Provider, ProviderConfig, ProviderIdentity, StreamEvent, StreamResult, Thread, type ThreadJSON, TokenUsage, Tool, ToolUseStrategy, Turn, UserContent, ai, createProvider, embedding, image, llm };
|
package/dist/index.js
CHANGED
|
@@ -3,9 +3,6 @@ import {
|
|
|
3
3
|
createTurn,
|
|
4
4
|
emptyUsage
|
|
5
5
|
} from "./chunk-SKY2JLA7.js";
|
|
6
|
-
import {
|
|
7
|
-
createProvider
|
|
8
|
-
} from "./chunk-MSR5P65T.js";
|
|
9
6
|
import {
|
|
10
7
|
Image
|
|
11
8
|
} from "./chunk-WAKD3OO5.js";
|
|
@@ -14,27 +11,31 @@ import {
|
|
|
14
11
|
Message,
|
|
15
12
|
ToolResultMessage,
|
|
16
13
|
UserMessage,
|
|
14
|
+
createProvider,
|
|
17
15
|
generateId,
|
|
18
16
|
isAssistantMessage,
|
|
19
17
|
isToolResultMessage,
|
|
20
|
-
isUserMessage
|
|
21
|
-
|
|
18
|
+
isUserMessage,
|
|
19
|
+
resolveEmbeddingHandler,
|
|
20
|
+
resolveImageHandler,
|
|
21
|
+
resolveLLMHandler
|
|
22
|
+
} from "./chunk-M4BMM5IB.js";
|
|
22
23
|
import {
|
|
23
24
|
ExponentialBackoff,
|
|
24
25
|
LinearBackoff,
|
|
25
26
|
NoRetry,
|
|
26
27
|
RetryAfterStrategy,
|
|
27
28
|
TokenBucket
|
|
28
|
-
} from "./chunk-
|
|
29
|
+
} from "./chunk-Z4ILICF5.js";
|
|
29
30
|
import "./chunk-Z7RBRCRN.js";
|
|
30
31
|
import {
|
|
31
32
|
DynamicKey,
|
|
32
33
|
RoundRobinKeys,
|
|
33
34
|
WeightedKeys
|
|
34
|
-
} from "./chunk-
|
|
35
|
+
} from "./chunk-Y3GBJNA2.js";
|
|
35
36
|
import {
|
|
36
37
|
UPPError
|
|
37
|
-
} from "./chunk-
|
|
38
|
+
} from "./chunk-EDENPF3E.js";
|
|
38
39
|
|
|
39
40
|
// src/types/stream.ts
|
|
40
41
|
function createStreamResult(generator, turnPromise, abortController) {
|
|
@@ -108,9 +109,19 @@ function toolExecutionEnd(toolCallId, toolName, result, isError, timestamp, inde
|
|
|
108
109
|
// src/core/llm.ts
|
|
109
110
|
var DEFAULT_MAX_ITERATIONS = 10;
|
|
110
111
|
function llm(options) {
|
|
111
|
-
const { model: modelRef, config = {}, params, system, tools, toolStrategy, structure } = options;
|
|
112
|
+
const { model: modelRef, config: explicitConfig = {}, params, system, tools, toolStrategy, structure } = options;
|
|
113
|
+
const providerConfig = modelRef.providerConfig ?? {};
|
|
114
|
+
const config = {
|
|
115
|
+
...providerConfig,
|
|
116
|
+
...explicitConfig,
|
|
117
|
+
headers: {
|
|
118
|
+
...providerConfig.headers,
|
|
119
|
+
...explicitConfig.headers
|
|
120
|
+
}
|
|
121
|
+
};
|
|
112
122
|
const provider = modelRef.provider;
|
|
113
|
-
|
|
123
|
+
const llmHandler = resolveLLMHandler(provider, modelRef.options);
|
|
124
|
+
if (!llmHandler) {
|
|
114
125
|
throw new UPPError(
|
|
115
126
|
`Provider '${provider.name}' does not support LLM modality`,
|
|
116
127
|
"INVALID_REQUEST",
|
|
@@ -118,7 +129,6 @@ function llm(options) {
|
|
|
118
129
|
"llm"
|
|
119
130
|
);
|
|
120
131
|
}
|
|
121
|
-
const llmHandler = provider.modalities.llm;
|
|
122
132
|
const boundModel = llmHandler.bind(modelRef.modelId);
|
|
123
133
|
const capabilities = boundModel.capabilities;
|
|
124
134
|
if (structure && !capabilities.structuredOutput) {
|
|
@@ -309,6 +319,9 @@ function executeStream(model, config, system, params, tools, toolStrategy, struc
|
|
|
309
319
|
const maxIterations = toolStrategy?.maxIterations ?? DEFAULT_MAX_ITERATIONS;
|
|
310
320
|
async function* generateStream() {
|
|
311
321
|
try {
|
|
322
|
+
if (abortController.signal.aborted) {
|
|
323
|
+
throw new UPPError("Stream cancelled", "CANCELLED", model.provider.name, "llm");
|
|
324
|
+
}
|
|
312
325
|
while (cycles < maxIterations + 1) {
|
|
313
326
|
cycles++;
|
|
314
327
|
const request = {
|
|
@@ -539,7 +552,8 @@ function validateMediaCapabilities(messages, capabilities, providerName) {
|
|
|
539
552
|
function embedding(options) {
|
|
540
553
|
const { model: modelRef, config = {}, params } = options;
|
|
541
554
|
const provider = modelRef.provider;
|
|
542
|
-
|
|
555
|
+
const handler = resolveEmbeddingHandler(provider);
|
|
556
|
+
if (!handler) {
|
|
543
557
|
throw new UPPError(
|
|
544
558
|
`Provider '${provider.name}' does not support embedding modality`,
|
|
545
559
|
"INVALID_REQUEST",
|
|
@@ -547,20 +561,19 @@ function embedding(options) {
|
|
|
547
561
|
"embedding"
|
|
548
562
|
);
|
|
549
563
|
}
|
|
550
|
-
const handler = provider.modalities.embedding;
|
|
551
564
|
const boundModel = handler.bind(modelRef.modelId);
|
|
552
|
-
|
|
565
|
+
function embed(input, embedOptions) {
|
|
566
|
+
const inputs = Array.isArray(input) ? input : [input];
|
|
567
|
+
if (embedOptions?.chunked) {
|
|
568
|
+
return createChunkedStream(boundModel, inputs, params, config, embedOptions);
|
|
569
|
+
}
|
|
570
|
+
return executeEmbed(boundModel, inputs, params, config, embedOptions?.signal);
|
|
571
|
+
}
|
|
572
|
+
return {
|
|
553
573
|
model: boundModel,
|
|
554
574
|
params,
|
|
555
|
-
embed
|
|
556
|
-
const inputs = Array.isArray(input) ? input : [input];
|
|
557
|
-
if (embedOptions?.chunked) {
|
|
558
|
-
return createChunkedStream(boundModel, inputs, params, config, embedOptions);
|
|
559
|
-
}
|
|
560
|
-
return executeEmbed(boundModel, inputs, params, config, embedOptions?.signal);
|
|
561
|
-
}
|
|
575
|
+
embed
|
|
562
576
|
};
|
|
563
|
-
return instance;
|
|
564
577
|
}
|
|
565
578
|
async function executeEmbed(model, inputs, params, config, signal) {
|
|
566
579
|
const response = await model.embed({
|
|
@@ -569,12 +582,12 @@ async function executeEmbed(model, inputs, params, config, signal) {
|
|
|
569
582
|
config: config ?? {},
|
|
570
583
|
signal
|
|
571
584
|
});
|
|
572
|
-
return normalizeResponse(response);
|
|
585
|
+
return normalizeResponse(response, model.provider.name);
|
|
573
586
|
}
|
|
574
|
-
function normalizeResponse(response) {
|
|
587
|
+
function normalizeResponse(response, providerName) {
|
|
575
588
|
return {
|
|
576
589
|
embeddings: response.embeddings.map((vec, i) => {
|
|
577
|
-
const vector = normalizeVector(vec.vector);
|
|
590
|
+
const vector = normalizeVector(vec.vector, providerName);
|
|
578
591
|
return {
|
|
579
592
|
vector,
|
|
580
593
|
dimensions: vector.length,
|
|
@@ -587,20 +600,32 @@ function normalizeResponse(response) {
|
|
|
587
600
|
metadata: response.metadata
|
|
588
601
|
};
|
|
589
602
|
}
|
|
590
|
-
function normalizeVector(vector) {
|
|
603
|
+
function normalizeVector(vector, providerName) {
|
|
591
604
|
if (Array.isArray(vector)) {
|
|
592
605
|
return vector;
|
|
593
606
|
}
|
|
594
|
-
return decodeBase64(vector);
|
|
607
|
+
return decodeBase64(vector, providerName);
|
|
595
608
|
}
|
|
596
|
-
function decodeBase64(b64) {
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
609
|
+
function decodeBase64(b64, providerName) {
|
|
610
|
+
try {
|
|
611
|
+
const binary = atob(b64);
|
|
612
|
+
const bytes = new Uint8Array(binary.length);
|
|
613
|
+
for (let i = 0; i < binary.length; i++) {
|
|
614
|
+
bytes[i] = binary.charCodeAt(i);
|
|
615
|
+
}
|
|
616
|
+
const floats = new Float32Array(bytes.buffer);
|
|
617
|
+
return Array.from(floats);
|
|
618
|
+
} catch (error) {
|
|
619
|
+
const cause = error instanceof Error ? error : new Error("Failed to decode base64 vector");
|
|
620
|
+
throw new UPPError(
|
|
621
|
+
"Invalid base64 embedding vector",
|
|
622
|
+
"INVALID_RESPONSE",
|
|
623
|
+
providerName,
|
|
624
|
+
"embedding",
|
|
625
|
+
void 0,
|
|
626
|
+
cause
|
|
627
|
+
);
|
|
601
628
|
}
|
|
602
|
-
const floats = new Float32Array(bytes.buffer);
|
|
603
|
-
return Array.from(floats);
|
|
604
629
|
}
|
|
605
630
|
function createChunkedStream(model, inputs, params, config, options) {
|
|
606
631
|
const abortController = new AbortController();
|
|
@@ -644,7 +669,7 @@ function createChunkedStream(model, inputs, params, config, options) {
|
|
|
644
669
|
const batchEmbeddings = [];
|
|
645
670
|
for (const response of responses) {
|
|
646
671
|
for (const vec of response.embeddings) {
|
|
647
|
-
const vector = normalizeVector(vec.vector);
|
|
672
|
+
const vector = normalizeVector(vec.vector, model.provider.name);
|
|
648
673
|
const emb = {
|
|
649
674
|
vector,
|
|
650
675
|
dimensions: vector.length,
|
|
@@ -685,7 +710,8 @@ function createChunkedStream(model, inputs, params, config, options) {
|
|
|
685
710
|
function image(options) {
|
|
686
711
|
const { model: modelRef, config = {}, params } = options;
|
|
687
712
|
const provider = modelRef.provider;
|
|
688
|
-
|
|
713
|
+
const imageHandler = resolveImageHandler(provider);
|
|
714
|
+
if (!imageHandler) {
|
|
689
715
|
throw new UPPError(
|
|
690
716
|
`Provider '${provider.name}' does not support image modality`,
|
|
691
717
|
"INVALID_REQUEST",
|
|
@@ -693,19 +719,19 @@ function image(options) {
|
|
|
693
719
|
"image"
|
|
694
720
|
);
|
|
695
721
|
}
|
|
696
|
-
const imageHandler = provider.modalities.image;
|
|
697
722
|
const boundModel = imageHandler.bind(modelRef.modelId);
|
|
698
723
|
const capabilities = boundModel.capabilities;
|
|
699
724
|
const instance = {
|
|
700
725
|
model: boundModel,
|
|
701
726
|
params,
|
|
702
727
|
capabilities,
|
|
703
|
-
async generate(input) {
|
|
728
|
+
async generate(input, options2) {
|
|
704
729
|
const prompt = normalizeInput(input);
|
|
705
730
|
const response = await boundModel.generate({
|
|
706
731
|
prompt,
|
|
707
732
|
params,
|
|
708
|
-
config
|
|
733
|
+
config,
|
|
734
|
+
signal: options2?.signal
|
|
709
735
|
});
|
|
710
736
|
return {
|
|
711
737
|
images: response.images,
|
|
@@ -715,11 +741,11 @@ function image(options) {
|
|
|
715
741
|
}
|
|
716
742
|
};
|
|
717
743
|
if (capabilities.streaming && boundModel.stream) {
|
|
718
|
-
const
|
|
744
|
+
const stream = boundModel.stream;
|
|
719
745
|
instance.stream = function(input) {
|
|
720
746
|
const prompt = normalizeInput(input);
|
|
721
747
|
const abortController = new AbortController();
|
|
722
|
-
const providerStream =
|
|
748
|
+
const providerStream = stream({
|
|
723
749
|
prompt,
|
|
724
750
|
params,
|
|
725
751
|
config,
|
|
@@ -738,9 +764,9 @@ function image(options) {
|
|
|
738
764
|
};
|
|
739
765
|
}
|
|
740
766
|
if (capabilities.edit && boundModel.edit) {
|
|
741
|
-
const
|
|
767
|
+
const edit = boundModel.edit;
|
|
742
768
|
instance.edit = async function(input) {
|
|
743
|
-
const response = await
|
|
769
|
+
const response = await edit({
|
|
744
770
|
image: input.image,
|
|
745
771
|
mask: input.mask,
|
|
746
772
|
prompt: input.prompt,
|