@jerome-benoit/sap-ai-provider-v2 4.1.2-rc.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +187 -0
- package/README.md +777 -0
- package/dist/index.cjs +31868 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +972 -0
- package/dist/index.d.ts +972 -0
- package/dist/index.js +31865 -0
- package/dist/index.js.map +1 -0
- package/package.json +109 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,972 @@
|
|
|
1
|
+
import { EmbeddingModelV2, SharedV2ProviderOptions, SharedV2ProviderMetadata, SharedV2Headers, LanguageModelV2, LanguageModelV2CallOptions, LanguageModelV2Content, LanguageModelV2FinishReason, LanguageModelV2ResponseMetadata, LanguageModelV2Usage, LanguageModelV2CallWarning, LanguageModelV2StreamPart } from '@ai-sdk/provider';
|
|
2
|
+
import { DeploymentIdConfig, ResourceGroupConfig } from '@sap-ai-sdk/ai-api/internal.js';
|
|
3
|
+
import { HttpDestinationOrFetchOptions } from '@sap-cloud-sdk/connectivity';
|
|
4
|
+
import { EmbeddingModelParams, FilteringModule, GroundingModule, MaskingModule, ChatCompletionTool, TranslationModule, ChatModel } from '@sap-ai-sdk/orchestration';
|
|
5
|
+
export { AssistantChatMessage, ChatCompletionRequest, ChatCompletionTool, ChatMessage, DeveloperChatMessage, DocumentTranslationApplyToSelector, FilteringModule, FunctionObject, GroundingModule, LlmModelDetails, LlmModelParams, MaskingModule, OrchestrationClient, OrchestrationConfigRef, OrchestrationEmbeddingClient, OrchestrationErrorResponse, OrchestrationModuleConfig, OrchestrationResponse, OrchestrationStreamChunkResponse, OrchestrationStreamResponse, PromptTemplatingModule, SystemChatMessage, ToolChatMessage, TranslationApplyToCategory, TranslationInputParameters, TranslationModule, TranslationOutputParameters, TranslationTargetLanguage, UserChatMessage, buildAzureContentSafetyFilter, buildDocumentGroundingConfig, buildDpiMaskingProvider, buildLlamaGuard38BFilter, buildTranslationConfig, isConfigReference } from '@sap-ai-sdk/orchestration';
|
|
6
|
+
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
7
|
+
import { InferSchema } from '@ai-sdk/provider-utils';
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Model ID type for SAP AI embedding models.
|
|
11
|
+
*
|
|
12
|
+
* Common embedding models available in SAP AI Core:
|
|
13
|
+
* - `text-embedding-ada-002` - OpenAI Ada v2
|
|
14
|
+
* - `text-embedding-3-small` - OpenAI v3 small
|
|
15
|
+
* - `text-embedding-3-large` - OpenAI v3 large
|
|
16
|
+
*/
|
|
17
|
+
type SAPAIEmbeddingModelId = string;
|
|
18
|
+
/**
|
|
19
|
+
* Settings for the SAP AI Embedding Model.
|
|
20
|
+
*/
|
|
21
|
+
interface SAPAIEmbeddingSettings {
|
|
22
|
+
/**
|
|
23
|
+
* Maximum number of embeddings per API call.
|
|
24
|
+
* @default 2048
|
|
25
|
+
*/
|
|
26
|
+
maxEmbeddingsPerCall?: number;
|
|
27
|
+
/**
|
|
28
|
+
* Additional model parameters passed to the embedding API.
|
|
29
|
+
*/
|
|
30
|
+
modelParams?: EmbeddingModelParams;
|
|
31
|
+
/**
|
|
32
|
+
* Embedding task type.
|
|
33
|
+
* @default 'text'
|
|
34
|
+
*/
|
|
35
|
+
type?: "document" | "query" | "text";
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* SAP AI Embedding Model V2 implementation.
|
|
40
|
+
*
|
|
41
|
+
* This module provides an EmbeddingModelV2 facade that wraps the internal
|
|
42
|
+
* EmbeddingModelV3 implementation and transforms the output to V2 format.
|
|
43
|
+
*
|
|
44
|
+
* This approach allows us to:
|
|
45
|
+
* - Reuse all SAP AI Core business logic from the V3 implementation
|
|
46
|
+
* - Present a V2 API to users (compatible with AI SDK 5.x)
|
|
47
|
+
* - Keep the upstream V3 code unchanged for easy git merges
|
|
48
|
+
* @module sap-ai-embedding-model-v2
|
|
49
|
+
*/
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Internal configuration for the SAP AI Embedding Model.
|
|
53
|
+
* @internal
|
|
54
|
+
*/
|
|
55
|
+
interface SAPAIEmbeddingConfig {
|
|
56
|
+
deploymentConfig: DeploymentIdConfig | ResourceGroupConfig;
|
|
57
|
+
destination?: HttpDestinationOrFetchOptions;
|
|
58
|
+
provider: string;
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* SAP AI Core Embedding Model implementing the Vercel AI SDK EmbeddingModelV2 interface.
|
|
62
|
+
*
|
|
63
|
+
* This class implements the AI SDK's `EmbeddingModelV2` interface (for AI SDK 5.x),
|
|
64
|
+
* providing a bridge between AI SDK 5.x and SAP AI Core's Orchestration API
|
|
65
|
+
* using the official SAP AI SDK (@sap-ai-sdk/orchestration).
|
|
66
|
+
*
|
|
67
|
+
* **Architecture:**
|
|
68
|
+
* This is a thin facade that delegates to the internal V3 implementation
|
|
69
|
+
* (SAPAIEmbeddingModel) and transforms the output to V2 format.
|
|
70
|
+
*
|
|
71
|
+
* **Features:**
|
|
72
|
+
* - Text embedding generation (single and batch)
|
|
73
|
+
* - Multiple embedding types (query, document)
|
|
74
|
+
* - Parallel batch processing
|
|
75
|
+
*
|
|
76
|
+
* **Model Support:**
|
|
77
|
+
* - Azure OpenAI embeddings (text-embedding-ada-002, text-embedding-3-small, text-embedding-3-large)
|
|
78
|
+
* - Anthropic embeddings (if available through SAP AI Core)
|
|
79
|
+
* - Other embedding models available in SAP AI Core
|
|
80
|
+
* @see {@link https://sdk.vercel.ai/docs/ai-sdk-core/embeddings Vercel AI SDK Embeddings}
|
|
81
|
+
* @see {@link https://help.sap.com/docs/sap-ai-core/sap-ai-core-service-guide/orchestration SAP AI Core Orchestration}
|
|
82
|
+
* @example
|
|
83
|
+
* ```typescript
|
|
84
|
+
* // Create via provider
|
|
85
|
+
* const provider = createSAPAIProvider();
|
|
86
|
+
*
|
|
87
|
+
* // Single embedding
|
|
88
|
+
* const { embedding } = await embed({
|
|
89
|
+
* model: provider.textEmbedding('text-embedding-ada-002'),
|
|
90
|
+
* value: 'Hello, world!'
|
|
91
|
+
* });
|
|
92
|
+
*
|
|
93
|
+
* // Multiple embeddings
|
|
94
|
+
* const { embeddings } = await embedMany({
|
|
95
|
+
* model: provider.textEmbedding('text-embedding-3-small'),
|
|
96
|
+
* values: ['Hello', 'World', 'AI']
|
|
97
|
+
* });
|
|
98
|
+
* ```
|
|
99
|
+
*/
|
|
100
|
+
declare class SAPAIEmbeddingModelV2 implements EmbeddingModelV2<string> {
|
|
101
|
+
readonly maxEmbeddingsPerCall: number | undefined;
|
|
102
|
+
readonly modelId: string;
|
|
103
|
+
readonly provider: string;
|
|
104
|
+
readonly specificationVersion: "v2";
|
|
105
|
+
readonly supportsParallelCalls: boolean;
|
|
106
|
+
/** Internal V3 model instance that handles all SAP AI Core logic */
|
|
107
|
+
private readonly v3Model;
|
|
108
|
+
/**
|
|
109
|
+
* Creates a new SAP AI Embedding Model V2 instance.
|
|
110
|
+
* @internal
|
|
111
|
+
* @param modelId - The embedding model identifier (e.g., 'text-embedding-ada-002')
|
|
112
|
+
* @param settings - Model-specific configuration settings
|
|
113
|
+
* @param config - Internal configuration (deployment config, destination, etc.)
|
|
114
|
+
*/
|
|
115
|
+
constructor(modelId: string, settings: SAPAIEmbeddingSettings, config: SAPAIEmbeddingConfig);
|
|
116
|
+
/**
|
|
117
|
+
* Generates embeddings for the given text values.
|
|
118
|
+
*
|
|
119
|
+
* Implements `EmbeddingModelV2.doEmbed`, delegating to the internal V3 implementation
|
|
120
|
+
* and transforming the response to V2 format.
|
|
121
|
+
*
|
|
122
|
+
* The main differences between V2 and V3:
|
|
123
|
+
* - V3 includes a `warnings` array that needs to be converted
|
|
124
|
+
* - Provider metadata and headers use different type definitions
|
|
125
|
+
* @param options - Embedding options including values and headers
|
|
126
|
+
* @param options.values - Array of text values to embed
|
|
127
|
+
* @param options.abortSignal - Optional abort signal for cancelling the operation
|
|
128
|
+
* @param options.providerOptions - Optional provider-specific options
|
|
129
|
+
* @param options.headers - Optional HTTP headers
|
|
130
|
+
* @returns Promise resolving to embeddings and metadata in V2 format
|
|
131
|
+
*/
|
|
132
|
+
doEmbed(options: {
|
|
133
|
+
abortSignal?: AbortSignal;
|
|
134
|
+
headers?: Record<string, string | undefined>;
|
|
135
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
136
|
+
values: string[];
|
|
137
|
+
}): Promise<{
|
|
138
|
+
embeddings: number[][];
|
|
139
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
140
|
+
response?: {
|
|
141
|
+
body?: unknown;
|
|
142
|
+
headers?: SharedV2Headers;
|
|
143
|
+
};
|
|
144
|
+
usage?: {
|
|
145
|
+
tokens: number;
|
|
146
|
+
};
|
|
147
|
+
}>;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
/**
|
|
151
|
+
* The provider identifier used for provider options.
|
|
152
|
+
* Use this key in `providerOptions` to pass SAP AI-specific options.
|
|
153
|
+
* @example
|
|
154
|
+
* ```typescript
|
|
155
|
+
* providerOptions: {
|
|
156
|
+
* [SAP_AI_PROVIDER_NAME]: { includeReasoning: true }
|
|
157
|
+
* }
|
|
158
|
+
* ```
|
|
159
|
+
*/
|
|
160
|
+
declare const SAP_AI_PROVIDER_NAME: "sap-ai";
|
|
161
|
+
/**
|
|
162
|
+
* Zod schema for SAP AI language model provider options.
|
|
163
|
+
*
|
|
164
|
+
* These options can be passed per-call via `providerOptions['sap-ai']` to override
|
|
165
|
+
* constructor settings or provide request-specific configuration.
|
|
166
|
+
* @example
|
|
167
|
+
* ```typescript
|
|
168
|
+
* const result = await generateText({
|
|
169
|
+
* model: provider('gpt-4o'),
|
|
170
|
+
* prompt: 'Hello',
|
|
171
|
+
* providerOptions: {
|
|
172
|
+
* 'sap-ai': {
|
|
173
|
+
* includeReasoning: true,
|
|
174
|
+
* modelParams: { temperature: 0.7, maxTokens: 1000 }
|
|
175
|
+
* }
|
|
176
|
+
* }
|
|
177
|
+
* });
|
|
178
|
+
* ```
|
|
179
|
+
*/
|
|
180
|
+
declare const sapAILanguageModelProviderOptions: _ai_sdk_provider_utils.LazySchema<{
|
|
181
|
+
includeReasoning?: boolean | undefined;
|
|
182
|
+
modelParams?: {
|
|
183
|
+
[x: string]: unknown;
|
|
184
|
+
frequencyPenalty?: number | undefined;
|
|
185
|
+
maxTokens?: number | undefined;
|
|
186
|
+
n?: number | undefined;
|
|
187
|
+
parallel_tool_calls?: boolean | undefined;
|
|
188
|
+
presencePenalty?: number | undefined;
|
|
189
|
+
temperature?: number | undefined;
|
|
190
|
+
topP?: number | undefined;
|
|
191
|
+
} | undefined;
|
|
192
|
+
}>;
|
|
193
|
+
/**
|
|
194
|
+
* TypeScript type for SAP AI language model provider options.
|
|
195
|
+
* Inferred from the Zod schema for type safety.
|
|
196
|
+
*/
|
|
197
|
+
type SAPAILanguageModelProviderOptions = InferSchema<typeof sapAILanguageModelProviderOptions>;
|
|
198
|
+
/**
|
|
199
|
+
* Zod schema for SAP AI embedding model provider options.
|
|
200
|
+
*
|
|
201
|
+
* These options can be passed per-call via `providerOptions['sap-ai']` to override
|
|
202
|
+
* constructor settings or provide request-specific configuration.
|
|
203
|
+
* @example
|
|
204
|
+
* ```typescript
|
|
205
|
+
* const { embedding } = await embed({
|
|
206
|
+
* model: provider.embedding('text-embedding-ada-002'),
|
|
207
|
+
* value: 'Hello, world!',
|
|
208
|
+
* providerOptions: {
|
|
209
|
+
* 'sap-ai': {
|
|
210
|
+
* type: 'query'
|
|
211
|
+
* }
|
|
212
|
+
* }
|
|
213
|
+
* });
|
|
214
|
+
* ```
|
|
215
|
+
*/
|
|
216
|
+
declare const sapAIEmbeddingProviderOptions: _ai_sdk_provider_utils.LazySchema<{
|
|
217
|
+
modelParams?: {
|
|
218
|
+
[x: string]: unknown;
|
|
219
|
+
dimensions?: number | undefined;
|
|
220
|
+
encoding_format?: "base64" | "binary" | "float" | undefined;
|
|
221
|
+
normalize?: boolean | undefined;
|
|
222
|
+
} | undefined;
|
|
223
|
+
type?: "document" | "query" | "text" | undefined;
|
|
224
|
+
}>;
|
|
225
|
+
/**
|
|
226
|
+
* TypeScript type for SAP AI embedding model provider options.
|
|
227
|
+
* Inferred from the Zod schema for type safety.
|
|
228
|
+
*/
|
|
229
|
+
type SAPAIEmbeddingProviderOptions = InferSchema<typeof sapAIEmbeddingProviderOptions>;
|
|
230
|
+
|
|
231
|
+
/**
|
|
232
|
+
* Supported model IDs in SAP AI Core.
|
|
233
|
+
*
|
|
234
|
+
* These models are available through the SAP AI Core Orchestration service.
|
|
235
|
+
* **Note:** The models listed here are representative examples. Actual model availability
|
|
236
|
+
* depends on your SAP AI Core tenant configuration, region, and subscription.
|
|
237
|
+
* @see {@link https://help.sap.com/docs/sap-ai-core/sap-ai-core-service-guide/models-and-scenarios SAP AI Core Models Documentation}
|
|
238
|
+
*
|
|
239
|
+
* **Azure OpenAI Models:**
|
|
240
|
+
* - gpt-4o, gpt-4o-mini
|
|
241
|
+
* - gpt-4.1, gpt-4.1-mini, gpt-4.1-nano
|
|
242
|
+
* - o1, o3, o3-mini, o4-mini
|
|
243
|
+
*
|
|
244
|
+
* **Google Vertex AI Models:**
|
|
245
|
+
* - gemini-2.0-flash, gemini-2.0-flash-lite
|
|
246
|
+
* - gemini-2.5-flash, gemini-2.5-pro
|
|
247
|
+
* - ⚠️ **Limitation:** Gemini models support only 1 tool per request
|
|
248
|
+
*
|
|
249
|
+
* **AWS Bedrock Models:**
|
|
250
|
+
* - anthropic--claude-3-haiku, anthropic--claude-3-sonnet, anthropic--claude-3-opus
|
|
251
|
+
* - anthropic--claude-3.5-sonnet, anthropic--claude-3.7-sonnet
|
|
252
|
+
* - anthropic--claude-4-sonnet, anthropic--claude-4-opus
|
|
253
|
+
* - amazon--nova-pro, amazon--nova-lite, amazon--nova-micro, amazon--nova-premier
|
|
254
|
+
*
|
|
255
|
+
* **AI Core Open Source Models:**
|
|
256
|
+
* - mistralai--mistral-large-instruct, mistralai--mistral-medium-instruct, mistralai--mistral-small-instruct
|
|
257
|
+
* - meta--llama3.1-70b-instruct
|
|
258
|
+
* - cohere--command-a-reasoning
|
|
259
|
+
*/
|
|
260
|
+
type SAPAIModelId = ChatModel;
|
|
261
|
+
/**
|
|
262
|
+
* Settings for configuring SAP AI Core model behavior.
|
|
263
|
+
*
|
|
264
|
+
* These settings control model parameters, data masking, content filtering,
|
|
265
|
+
* and tool usage. Settings can be provided at provider-level (defaults) or
|
|
266
|
+
* per-model call (overrides).
|
|
267
|
+
* @example
|
|
268
|
+
* **Basic usage with model parameters**
|
|
269
|
+
* ```typescript
|
|
270
|
+
* const model = provider('gpt-4o', {
|
|
271
|
+
* modelParams: {
|
|
272
|
+
* temperature: 0.7,
|
|
273
|
+
* maxTokens: 2000
|
|
274
|
+
* }
|
|
275
|
+
* });
|
|
276
|
+
* ```
|
|
277
|
+
* @example
|
|
278
|
+
* **With data masking (DPI)**
|
|
279
|
+
* ```typescript
|
|
280
|
+
* import { buildDpiMaskingProvider } from '@mymediset/sap-ai-provider';
|
|
281
|
+
*
|
|
282
|
+
* const model = provider('gpt-4o', {
|
|
283
|
+
* masking: {
|
|
284
|
+
* masking_providers: [
|
|
285
|
+
* buildDpiMaskingProvider({
|
|
286
|
+
* method: 'anonymization',
|
|
287
|
+
* entities: ['profile-email', 'profile-person']
|
|
288
|
+
* })
|
|
289
|
+
* ]
|
|
290
|
+
* }
|
|
291
|
+
* });
|
|
292
|
+
* ```
|
|
293
|
+
* @example
|
|
294
|
+
* **With content filtering**
|
|
295
|
+
* ```typescript
|
|
296
|
+
* import { buildAzureContentSafetyFilter } from '@mymediset/sap-ai-provider';
|
|
297
|
+
*
|
|
298
|
+
* const model = provider('gpt-4o', {
|
|
299
|
+
* filtering: {
|
|
300
|
+
* input: {
|
|
301
|
+
* filters: [buildAzureContentSafetyFilter('input', { hate: 'ALLOW_SAFE' })]
|
|
302
|
+
* }
|
|
303
|
+
* }
|
|
304
|
+
* });
|
|
305
|
+
* ```
|
|
306
|
+
*/
|
|
307
|
+
interface SAPAISettings {
|
|
308
|
+
/**
|
|
309
|
+
* Filtering configuration for input and output content safety.
|
|
310
|
+
* Supports Azure Content Safety and Llama Guard filters.
|
|
311
|
+
* @see {@link https://help.sap.com/docs/sap-ai-core/sap-ai-core-service-guide/content-filtering SAP Content Filtering Documentation}
|
|
312
|
+
* @example
|
|
313
|
+
* ```typescript
|
|
314
|
+
* import { buildAzureContentSafetyFilter } from '@sap-ai-sdk/orchestration';
|
|
315
|
+
*
|
|
316
|
+
* const model = provider('gpt-4o', {
|
|
317
|
+
* filtering: {
|
|
318
|
+
* input: {
|
|
319
|
+
* filters: [
|
|
320
|
+
* buildAzureContentSafetyFilter('input', {
|
|
321
|
+
* hate: 'ALLOW_SAFE',
|
|
322
|
+
* violence: 'ALLOW_SAFE_LOW_MEDIUM'
|
|
323
|
+
* })
|
|
324
|
+
* ]
|
|
325
|
+
* }
|
|
326
|
+
* }
|
|
327
|
+
* });
|
|
328
|
+
* ```
|
|
329
|
+
*/
|
|
330
|
+
filtering?: FilteringModule;
|
|
331
|
+
/**
|
|
332
|
+
* Grounding module configuration for document-based retrieval (RAG).
|
|
333
|
+
* Enables retrieval-augmented generation using SAP Document Grounding Service.
|
|
334
|
+
*
|
|
335
|
+
* Use `buildDocumentGroundingConfig()` to create the configuration.
|
|
336
|
+
* @see {@link https://help.sap.com/docs/sap-ai-core/sap-ai-core-service-guide/document-grounding SAP Document Grounding Documentation}
|
|
337
|
+
* @example
|
|
338
|
+
* ```typescript
|
|
339
|
+
* import { buildDocumentGroundingConfig } from '@mymediset/sap-ai-provider';
|
|
340
|
+
*
|
|
341
|
+
* const model = provider('gpt-4o', {
|
|
342
|
+
* grounding: buildDocumentGroundingConfig({
|
|
343
|
+
* filters: [
|
|
344
|
+
* {
|
|
345
|
+
* id: 'my-vector-store',
|
|
346
|
+
* data_repository_type: 'vector',
|
|
347
|
+
* data_repositories: ['document-repo-1'],
|
|
348
|
+
* chunk_overlap: 50
|
|
349
|
+
* }
|
|
350
|
+
* ],
|
|
351
|
+
* placeholders: {
|
|
352
|
+
* input: ['?question'],
|
|
353
|
+
* output: 'groundingOutput'
|
|
354
|
+
* }
|
|
355
|
+
* })
|
|
356
|
+
* });
|
|
357
|
+
* ```
|
|
358
|
+
*/
|
|
359
|
+
grounding?: GroundingModule;
|
|
360
|
+
/**
|
|
361
|
+
* Whether to include assistant reasoning parts in the SAP prompt conversion.
|
|
362
|
+
*
|
|
363
|
+
* Reasoning parts contain internal model chain-of-thought reasoning. When disabled,
|
|
364
|
+
* only the final response content is forwarded. Enable for debugging/analysis;
|
|
365
|
+
* disable for production applications and user-facing chatbots.
|
|
366
|
+
* @default false
|
|
367
|
+
* @example
|
|
368
|
+
* ```typescript
|
|
369
|
+
* const debugModel = provider('gpt-4o', { includeReasoning: true });
|
|
370
|
+
* const prodModel = provider('gpt-4o'); // includeReasoning defaults to false
|
|
371
|
+
* ```
|
|
372
|
+
*/
|
|
373
|
+
includeReasoning?: boolean;
|
|
374
|
+
/**
|
|
375
|
+
* Masking configuration for SAP AI Core orchestration.
|
|
376
|
+
* When provided, sensitive information in prompts can be anonymized or
|
|
377
|
+
* pseudonymized by SAP Data Privacy Integration (DPI).
|
|
378
|
+
* @see {@link https://help.sap.com/docs/sap-ai-core/sap-ai-core-service-guide/data-privacy-integration SAP DPI Documentation}
|
|
379
|
+
* @example
|
|
380
|
+
* ```typescript
|
|
381
|
+
* import { buildDpiMaskingProvider } from '@sap-ai-sdk/orchestration';
|
|
382
|
+
*
|
|
383
|
+
* const model = provider('gpt-4o', {
|
|
384
|
+
* masking: {
|
|
385
|
+
* masking_providers: [
|
|
386
|
+
* buildDpiMaskingProvider({
|
|
387
|
+
* method: 'anonymization',
|
|
388
|
+
* entities: ['profile-email', 'profile-phone']
|
|
389
|
+
* })
|
|
390
|
+
* ]
|
|
391
|
+
* }
|
|
392
|
+
* });
|
|
393
|
+
* ```
|
|
394
|
+
*/
|
|
395
|
+
masking?: MaskingModule;
|
|
396
|
+
/**
|
|
397
|
+
* Model generation parameters that control the output.
|
|
398
|
+
*/
|
|
399
|
+
modelParams?: {
|
|
400
|
+
/**
|
|
401
|
+
* Frequency penalty between -2.0 and 2.0.
|
|
402
|
+
* Positive values penalize tokens based on their frequency.
|
|
403
|
+
* If not specified, the model's default is used (typically 0).
|
|
404
|
+
*/
|
|
405
|
+
frequencyPenalty?: number;
|
|
406
|
+
/**
|
|
407
|
+
* Maximum number of tokens to generate.
|
|
408
|
+
* Higher values allow for longer responses but increase latency and cost.
|
|
409
|
+
*/
|
|
410
|
+
maxTokens?: number;
|
|
411
|
+
/**
|
|
412
|
+
* Number of completions to generate.
|
|
413
|
+
* Multiple completions provide alternative responses.
|
|
414
|
+
*
|
|
415
|
+
* Note: Not supported by Amazon and Anthropic models. When used with these
|
|
416
|
+
* models, the parameter is silently omitted from the request.
|
|
417
|
+
* If not specified, typically defaults to 1 on the model side.
|
|
418
|
+
*/
|
|
419
|
+
n?: number;
|
|
420
|
+
/**
|
|
421
|
+
* Whether to enable parallel tool calls.
|
|
422
|
+
* When enabled, the model can call multiple tools in parallel.
|
|
423
|
+
*
|
|
424
|
+
* Note: This uses the SAP/OpenAI-style key `parallel_tool_calls`.
|
|
425
|
+
*/
|
|
426
|
+
parallel_tool_calls?: boolean;
|
|
427
|
+
/**
|
|
428
|
+
* Presence penalty between -2.0 and 2.0.
|
|
429
|
+
* Positive values penalize tokens that have appeared in the text.
|
|
430
|
+
* If not specified, the model's default is used (typically 0).
|
|
431
|
+
*/
|
|
432
|
+
presencePenalty?: number;
|
|
433
|
+
/**
|
|
434
|
+
* Sampling temperature between 0 and 2.
|
|
435
|
+
* Higher values make output more random, lower values more deterministic.
|
|
436
|
+
* If not specified, the model's default temperature is used.
|
|
437
|
+
*/
|
|
438
|
+
temperature?: number;
|
|
439
|
+
/**
|
|
440
|
+
* Nucleus sampling parameter between 0 and 1.
|
|
441
|
+
* Controls diversity via cumulative probability cutoff.
|
|
442
|
+
* If not specified, the model's default topP is used (typically 1).
|
|
443
|
+
*/
|
|
444
|
+
topP?: number;
|
|
445
|
+
};
|
|
446
|
+
/**
|
|
447
|
+
* Specific version of the model to use.
|
|
448
|
+
* If not provided, the latest version will be used.
|
|
449
|
+
*/
|
|
450
|
+
modelVersion?: string;
|
|
451
|
+
/**
|
|
452
|
+
* Response format for templating prompt (OpenAI-compatible)
|
|
453
|
+
* Allows specifying structured output formats
|
|
454
|
+
* @example
|
|
455
|
+
* ```typescript
|
|
456
|
+
* const model = provider('gpt-4o', {
|
|
457
|
+
* responseFormat: {
|
|
458
|
+
* type: 'json_schema',
|
|
459
|
+
* json_schema: {
|
|
460
|
+
* name: 'response',
|
|
461
|
+
* schema: { type: 'object', properties: { answer: { type: 'string' } } }
|
|
462
|
+
* }
|
|
463
|
+
* }
|
|
464
|
+
* });
|
|
465
|
+
* ```
|
|
466
|
+
*/
|
|
467
|
+
responseFormat?: {
|
|
468
|
+
json_schema: {
|
|
469
|
+
description?: string;
|
|
470
|
+
name: string;
|
|
471
|
+
schema?: unknown;
|
|
472
|
+
strict?: boolean | null;
|
|
473
|
+
};
|
|
474
|
+
type: "json_schema";
|
|
475
|
+
} | {
|
|
476
|
+
type: "json_object";
|
|
477
|
+
} | {
|
|
478
|
+
type: "text";
|
|
479
|
+
};
|
|
480
|
+
/**
|
|
481
|
+
* Tool definitions in SAP AI SDK format
|
|
482
|
+
*
|
|
483
|
+
*
|
|
484
|
+
* Use this to pass tools directly with proper JSON Schema definitions.
|
|
485
|
+
* This bypasses the AI SDK's Zod conversion which may have issues.
|
|
486
|
+
*
|
|
487
|
+
* Note: This should be used in conjunction with AI SDK's tool handling
|
|
488
|
+
* to provide the actual tool implementations (execute functions).
|
|
489
|
+
* @example
|
|
490
|
+
* ```typescript
|
|
491
|
+
* const model = provider('gpt-4o', {
|
|
492
|
+
* tools: [
|
|
493
|
+
* {
|
|
494
|
+
* type: 'function',
|
|
495
|
+
* function: {
|
|
496
|
+
* name: 'get_weather',
|
|
497
|
+
* description: 'Get weather for a location',
|
|
498
|
+
* parameters: {
|
|
499
|
+
* type: 'object',
|
|
500
|
+
* properties: {
|
|
501
|
+
* location: { type: 'string', description: 'City name' }
|
|
502
|
+
* },
|
|
503
|
+
* required: ['location']
|
|
504
|
+
* }
|
|
505
|
+
* }
|
|
506
|
+
* }
|
|
507
|
+
* ]
|
|
508
|
+
* });
|
|
509
|
+
* ```
|
|
510
|
+
*/
|
|
511
|
+
tools?: ChatCompletionTool[];
|
|
512
|
+
/**
|
|
513
|
+
* Translation module configuration for input/output translation.
|
|
514
|
+
* Enables automatic translation using SAP Document Translation service.
|
|
515
|
+
*
|
|
516
|
+
* Use `buildTranslationConfig()` to create input/output configurations.
|
|
517
|
+
* @see {@link https://help.sap.com/docs/sap-ai-core/sap-ai-core-service-guide/translation SAP Translation Documentation}
|
|
518
|
+
* @example
|
|
519
|
+
* ```typescript
|
|
520
|
+
* import { buildTranslationConfig } from '@mymediset/sap-ai-provider';
|
|
521
|
+
*
|
|
522
|
+
* const model = provider('gpt-4o', {
|
|
523
|
+
* translation: {
|
|
524
|
+
* input: buildTranslationConfig('input', {
|
|
525
|
+
* sourceLanguage: 'de-DE',
|
|
526
|
+
* targetLanguage: 'en-US'
|
|
527
|
+
* }),
|
|
528
|
+
* output: buildTranslationConfig('output', {
|
|
529
|
+
* targetLanguage: 'de-DE'
|
|
530
|
+
* })
|
|
531
|
+
* }
|
|
532
|
+
* });
|
|
533
|
+
* ```
|
|
534
|
+
*/
|
|
535
|
+
translation?: TranslationModule;
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
/**
|
|
539
|
+
* SAP AI Language Model V2 implementation.
|
|
540
|
+
*
|
|
541
|
+
* This module provides a LanguageModelV2 facade that wraps the internal
|
|
542
|
+
* LanguageModelV3 implementation and transforms the output to V2 format.
|
|
543
|
+
*
|
|
544
|
+
* This approach allows us to:
|
|
545
|
+
* - Reuse all SAP AI Core business logic from the V3 implementation
|
|
546
|
+
* - Present a V2 API to users (compatible with AI SDK 5.x)
|
|
547
|
+
* - Keep the upstream V3 code unchanged for easy git merges
|
|
548
|
+
* @module sap-ai-language-model-v2
|
|
549
|
+
*/
|
|
550
|
+
|
|
551
|
+
/**
|
|
552
|
+
* Internal configuration for the SAP AI Language Model.
|
|
553
|
+
* @internal
|
|
554
|
+
*/
|
|
555
|
+
interface SAPAIConfig {
|
|
556
|
+
deploymentConfig: DeploymentIdConfig | ResourceGroupConfig;
|
|
557
|
+
destination?: HttpDestinationOrFetchOptions;
|
|
558
|
+
provider: string;
|
|
559
|
+
}
|
|
560
|
+
/**
|
|
561
|
+
* SAP AI Language Model V2 implementation.
|
|
562
|
+
*
|
|
563
|
+
* This class implements the AI SDK's `LanguageModelV2` interface (for AI SDK 5.x),
|
|
564
|
+
* providing a bridge between AI SDK 5.x and SAP AI Core's Orchestration API
|
|
565
|
+
* using the official SAP AI SDK (@sap-ai-sdk/orchestration).
|
|
566
|
+
*
|
|
567
|
+
* **Architecture:**
|
|
568
|
+
* This is a thin facade that delegates to the internal V3 implementation
|
|
569
|
+
* (SAPAILanguageModel) and transforms the output to V2 format.
|
|
570
|
+
*
|
|
571
|
+
* **Features:**
|
|
572
|
+
* - Text generation (streaming and non-streaming)
|
|
573
|
+
* - Tool calling (function calling)
|
|
574
|
+
* - Multi-modal input (text + images)
|
|
575
|
+
* - Data masking (SAP DPI)
|
|
576
|
+
* - Content filtering
|
|
577
|
+
*
|
|
578
|
+
* **Model Support:**
|
|
579
|
+
* - Azure OpenAI models (gpt-4o, gpt-4o-mini, o1, o3, etc.)
|
|
580
|
+
* - Google Vertex AI models (gemini-2.0-flash, gemini-2.5-pro, etc.)
|
|
581
|
+
* - AWS Bedrock models (anthropic--claude-*, amazon--nova-*, etc.)
|
|
582
|
+
* - AI Core open source models (mistralai--, cohere--, etc.)
|
|
583
|
+
* @see {@link https://sdk.vercel.ai/docs/ai-sdk-core/language-model-v2 Vercel AI SDK LanguageModelV2}
|
|
584
|
+
* @see {@link https://help.sap.com/docs/sap-ai-core/sap-ai-core-service-guide/orchestration SAP AI Core Orchestration}
|
|
585
|
+
* @example
|
|
586
|
+
* ```typescript
|
|
587
|
+
* // Create via provider
|
|
588
|
+
* const provider = createSAPAIProvider();
|
|
589
|
+
* const model = provider('gpt-4o');
|
|
590
|
+
*
|
|
591
|
+
* // Use with AI SDK 5.x
|
|
592
|
+
* const result = await generateText({
|
|
593
|
+
* model,
|
|
594
|
+
* prompt: 'Hello, world!'
|
|
595
|
+
* });
|
|
596
|
+
* ```
|
|
597
|
+
*/
|
|
598
|
+
declare class SAPAILanguageModelV2 implements LanguageModelV2 {
|
|
599
|
+
readonly modelId: SAPAIModelId;
|
|
600
|
+
readonly specificationVersion: "v2";
|
|
601
|
+
/**
|
|
602
|
+
* Returns the provider identifier.
|
|
603
|
+
* @returns The provider name
|
|
604
|
+
*/
|
|
605
|
+
get provider(): string;
|
|
606
|
+
/**
|
|
607
|
+
* Returns supported URL patterns for different content types.
|
|
608
|
+
* @returns Record of content types to regex patterns
|
|
609
|
+
*/
|
|
610
|
+
get supportedUrls(): Record<string, RegExp[]>;
|
|
611
|
+
/** Internal V3 model instance that handles all SAP AI Core logic */
|
|
612
|
+
private readonly v3Model;
|
|
613
|
+
/**
|
|
614
|
+
* Creates a new SAP AI Language Model V2 instance.
|
|
615
|
+
* @internal
|
|
616
|
+
* @param modelId - The model identifier
|
|
617
|
+
* @param settings - Model-specific configuration settings
|
|
618
|
+
* @param config - Internal configuration (deployment config, destination, etc.)
|
|
619
|
+
* @throws {z.ZodError} If modelParams contains invalid values
|
|
620
|
+
*/
|
|
621
|
+
constructor(modelId: SAPAIModelId, settings: SAPAISettings, config: SAPAIConfig);
|
|
622
|
+
/**
|
|
623
|
+
* Generates a single completion (non-streaming).
|
|
624
|
+
*
|
|
625
|
+
* This method implements the `LanguageModelV2.doGenerate` interface,
|
|
626
|
+
* delegating to the internal V3 implementation and transforming the result
|
|
627
|
+
* to V2 format.
|
|
628
|
+
*
|
|
629
|
+
* **Features:**
|
|
630
|
+
* - Tool calling support
|
|
631
|
+
* - Multi-modal input (text + images)
|
|
632
|
+
* - Data masking (if configured)
|
|
633
|
+
* - Content filtering (if configured)
|
|
634
|
+
* - Abort signal support (via Promise.race)
|
|
635
|
+
*
|
|
636
|
+
* **Note on Abort Signal:**
|
|
637
|
+
* The abort signal implementation uses Promise.race to reject the promise when
|
|
638
|
+
* aborted. However, this does not cancel the underlying HTTP request to SAP AI Core -
|
|
639
|
+
* the request continues executing on the server. This is a current limitation of the
|
|
640
|
+
* SAP AI SDK's API. See https://github.com/SAP/ai-sdk-js/issues/1429
|
|
641
|
+
* @param options - Generation options including prompt, tools, and settings
|
|
642
|
+
* @returns Promise resolving to the generation result with content, usage, and metadata
|
|
643
|
+
* @see {@link convertFinishReasonV3ToV2} for finish reason transformation
|
|
644
|
+
* @see {@link convertUsageV3ToV2} for usage statistics transformation
|
|
645
|
+
* @see {@link convertWarningsV3ToV2} for warnings transformation
|
|
646
|
+
* @since 1.0.0
|
|
647
|
+
* @example
|
|
648
|
+
* ```typescript
|
|
649
|
+
* const result = await model.doGenerate({
|
|
650
|
+
* prompt: [
|
|
651
|
+
* { role: 'user', content: [{ type: 'text', text: 'Hello!' }] }
|
|
652
|
+
* ]
|
|
653
|
+
* });
|
|
654
|
+
*
|
|
655
|
+
* console.log(result.content); // Generated content
|
|
656
|
+
* console.log(result.usage); // Token usage (V2 format)
|
|
657
|
+
* console.log(result.finishReason); // 'stop' (V2 string format)
|
|
658
|
+
* ```
|
|
659
|
+
*/
|
|
660
|
+
doGenerate(options: LanguageModelV2CallOptions): Promise<{
|
|
661
|
+
content: LanguageModelV2Content[];
|
|
662
|
+
finishReason: LanguageModelV2FinishReason;
|
|
663
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
664
|
+
request?: {
|
|
665
|
+
body?: unknown;
|
|
666
|
+
};
|
|
667
|
+
response?: LanguageModelV2ResponseMetadata & {
|
|
668
|
+
body?: unknown;
|
|
669
|
+
headers?: SharedV2Headers;
|
|
670
|
+
};
|
|
671
|
+
usage: LanguageModelV2Usage;
|
|
672
|
+
warnings: LanguageModelV2CallWarning[];
|
|
673
|
+
}>;
|
|
674
|
+
/**
|
|
675
|
+
* Generates a streaming completion.
|
|
676
|
+
*
|
|
677
|
+
* Implements `LanguageModelV2.doStream`, delegating to the internal V3 implementation
|
|
678
|
+
* and transforming the stream to V2 format.
|
|
679
|
+
*
|
|
680
|
+
* **Stream Events:**
|
|
681
|
+
* - `stream-start` - Initialization with warnings
|
|
682
|
+
* - `response-metadata` - Model, timestamp, response ID
|
|
683
|
+
* - `text-start` - Text block begins (with unique ID)
|
|
684
|
+
* - `text-delta` - Incremental text chunks
|
|
685
|
+
* - `text-end` - Text block completes
|
|
686
|
+
* - `tool-input-start/delta/end` - Tool input lifecycle
|
|
687
|
+
* - `tool-call` - Complete tool call
|
|
688
|
+
* - `finish` - Stream completes with usage (V2 format) and finish reason (V2 string)
|
|
689
|
+
* - `error` - Error occurred
|
|
690
|
+
*
|
|
691
|
+
* **Response ID:**
|
|
692
|
+
* Client-generated UUID in `response-metadata.id` and `providerMetadata['sap-ai'].responseId`.
|
|
693
|
+
* TODO: Use backend's `x-request-id` when `OrchestrationStreamResponse` exposes `rawResponse`.
|
|
694
|
+
* @see https://github.com/SAP/ai-sdk-js/issues/1429 - Enhancement request for rawResponse access
|
|
695
|
+
*
|
|
696
|
+
* **Abort Signal:**
|
|
697
|
+
* Same limitation as `doGenerate` - see its documentation for details.
|
|
698
|
+
* @see {@link https://sdk.vercel.ai/docs/ai-sdk-core/streaming Vercel AI SDK Streaming}
|
|
699
|
+
* @see {@link convertStreamV3ToV2} for stream transformation logic
|
|
700
|
+
* @see {@link convertStreamPartV3ToV2} for individual stream part conversion
|
|
701
|
+
* @param options - Streaming options including prompt, tools, and settings
|
|
702
|
+
* @returns Promise resolving to stream and request metadata
|
|
703
|
+
* @example
|
|
704
|
+
* ```typescript
|
|
705
|
+
* const { stream } = await model.doStream({
|
|
706
|
+
* prompt: [
|
|
707
|
+
* { role: 'user', content: [{ type: 'text', text: 'Write a story' }] }
|
|
708
|
+
* ]
|
|
709
|
+
* });
|
|
710
|
+
*
|
|
711
|
+
* for await (const part of stream) {
|
|
712
|
+
* if (part.type === 'text-delta') {
|
|
713
|
+
* process.stdout.write(part.delta);
|
|
714
|
+
* }
|
|
715
|
+
* }
|
|
716
|
+
* ```
|
|
717
|
+
* @since 1.0.0
|
|
718
|
+
*/
|
|
719
|
+
doStream(options: LanguageModelV2CallOptions): Promise<{
|
|
720
|
+
request?: {
|
|
721
|
+
body?: unknown;
|
|
722
|
+
};
|
|
723
|
+
response?: {
|
|
724
|
+
headers?: SharedV2Headers;
|
|
725
|
+
};
|
|
726
|
+
stream: ReadableStream<LanguageModelV2StreamPart>;
|
|
727
|
+
}>;
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
/**
|
|
731
|
+
* SAP AI Provider V2 implementation.
|
|
732
|
+
*
|
|
733
|
+
* This module provides a ProviderV2-compatible factory that creates
|
|
734
|
+
* LanguageModelV2 instances for use with AI SDK 5.x.
|
|
735
|
+
* @module sap-ai-provider-v2
|
|
736
|
+
*/
|
|
737
|
+
|
|
738
|
+
/**
|
|
739
|
+
* Deployment configuration type used by SAP AI SDK.
|
|
740
|
+
*/
|
|
741
|
+
type DeploymentConfig = DeploymentIdConfig | ResourceGroupConfig;
|
|
742
|
+
/**
|
|
743
|
+
* Configuration settings for the SAP AI Provider V2.
|
|
744
|
+
*
|
|
745
|
+
* This interface defines all available options for configuring the SAP AI Core connection
|
|
746
|
+
* using the official SAP AI SDK. See {@link createSAPAIProvider} for authentication details.
|
|
747
|
+
* @example
|
|
748
|
+
* ```typescript
|
|
749
|
+
* // Using default configuration (auto-detects service binding or env var)
|
|
750
|
+
* const provider = createSAPAIProvider();
|
|
751
|
+
*
|
|
752
|
+
* // With specific resource group
|
|
753
|
+
* const provider = createSAPAIProvider({
|
|
754
|
+
* resourceGroup: 'production'
|
|
755
|
+
* });
|
|
756
|
+
*
|
|
757
|
+
* // With custom destination
|
|
758
|
+
* const provider = createSAPAIProvider({
|
|
759
|
+
* destination: {
|
|
760
|
+
* url: 'https://my-ai-core-instance.cfapps.eu10.hana.ondemand.com'
|
|
761
|
+
* }
|
|
762
|
+
* });
|
|
763
|
+
* ```
|
|
764
|
+
*/
|
|
765
|
+
interface SAPAIProviderSettings {
|
|
766
|
+
/**
|
|
767
|
+
* Default model settings applied to every model instance created by this provider.
|
|
768
|
+
* Per-call settings provided to the model will override these.
|
|
769
|
+
*/
|
|
770
|
+
defaultSettings?: SAPAISettings;
|
|
771
|
+
/**
|
|
772
|
+
* SAP AI Core deployment ID.
|
|
773
|
+
*
|
|
774
|
+
* A specific deployment ID to use for orchestration requests.
|
|
775
|
+
* If not provided, the SDK will resolve the deployment automatically.
|
|
776
|
+
* @example
|
|
777
|
+
* ```typescript
|
|
778
|
+
* deploymentId: 'd65d81e7c077e583'
|
|
779
|
+
* ```
|
|
780
|
+
*/
|
|
781
|
+
deploymentId?: string;
|
|
782
|
+
/**
|
|
783
|
+
* Custom destination configuration for SAP AI Core.
|
|
784
|
+
*
|
|
785
|
+
* Override the default destination detection. Useful for:
|
|
786
|
+
* - Custom proxy configurations
|
|
787
|
+
* - Non-standard SAP AI Core setups
|
|
788
|
+
* - Testing environments
|
|
789
|
+
* @example
|
|
790
|
+
* ```typescript
|
|
791
|
+
* destination: {
|
|
792
|
+
* url: 'https://api.ai.prod.eu-central-1.aws.ml.hana.ondemand.com'
|
|
793
|
+
* }
|
|
794
|
+
* ```
|
|
795
|
+
*/
|
|
796
|
+
destination?: HttpDestinationOrFetchOptions;
|
|
797
|
+
/**
|
|
798
|
+
* SAP AI Core resource group.
|
|
799
|
+
*
|
|
800
|
+
* Logical grouping of AI resources in SAP AI Core.
|
|
801
|
+
* Used for resource isolation and access control.
|
|
802
|
+
* Different resource groups can have different permissions and quotas.
|
|
803
|
+
* @default 'default'
|
|
804
|
+
* @example
|
|
805
|
+
* ```typescript
|
|
806
|
+
* resourceGroup: 'default' // Default resource group
|
|
807
|
+
* resourceGroup: 'production' // Production environment
|
|
808
|
+
* resourceGroup: 'development' // Development environment
|
|
809
|
+
* ```
|
|
810
|
+
*/
|
|
811
|
+
resourceGroup?: string;
|
|
812
|
+
/**
|
|
813
|
+
* Whether to emit warnings for ambiguous configurations.
|
|
814
|
+
*
|
|
815
|
+
* When enabled (default), the provider will warn when mutually-exclusive
|
|
816
|
+
* settings are provided (e.g. both `deploymentId` and `resourceGroup`).
|
|
817
|
+
*/
|
|
818
|
+
warnOnAmbiguousConfig?: boolean;
|
|
819
|
+
}
|
|
820
|
+
/**
|
|
821
|
+
* SAP AI Provider V2 interface.
|
|
822
|
+
*
|
|
823
|
+
* This is the main interface for creating and configuring SAP AI Core models
|
|
824
|
+
* compatible with AI SDK 5.x (LanguageModelV2 and EmbeddingModelV2).
|
|
825
|
+
* @example
|
|
826
|
+
* ```typescript
|
|
827
|
+
* const provider = createSAPAIProvider({
|
|
828
|
+
* resourceGroup: 'default'
|
|
829
|
+
* });
|
|
830
|
+
*
|
|
831
|
+
* // Create a language model instance
|
|
832
|
+
* const model = provider('gpt-4o', {
|
|
833
|
+
* modelParams: {
|
|
834
|
+
* temperature: 0.7,
|
|
835
|
+
* maxTokens: 1000
|
|
836
|
+
* }
|
|
837
|
+
* });
|
|
838
|
+
*
|
|
839
|
+
* // Create an embedding model instance
|
|
840
|
+
* const embeddingModel = provider.textEmbedding('text-embedding-ada-002');
|
|
841
|
+
*
|
|
842
|
+
* // Or use the explicit languageModel or chat method
|
|
843
|
+
* const chatModel = provider.languageModel('gpt-4o');
|
|
844
|
+
* ```
|
|
845
|
+
*/
|
|
846
|
+
interface SAPAIProviderV2 {
|
|
847
|
+
/**
|
|
848
|
+
* Create a language model instance (V2).
|
|
849
|
+
* @param modelId - The SAP AI Core model identifier (e.g., 'gpt-4o', 'anthropic--claude-3.5-sonnet')
|
|
850
|
+
* @param settings - Optional model configuration settings
|
|
851
|
+
* @returns Configured SAP AI chat language model instance (V2)
|
|
852
|
+
*/
|
|
853
|
+
(modelId: SAPAIModelId, settings?: SAPAISettings): SAPAILanguageModelV2;
|
|
854
|
+
/**
|
|
855
|
+
* Explicit method for creating chat models (V2).
|
|
856
|
+
*
|
|
857
|
+
* This method is equivalent to calling languageModel() or the provider function directly,
|
|
858
|
+
* but provides a more explicit API for chat-based interactions.
|
|
859
|
+
* @param modelId - The SAP AI Core model identifier
|
|
860
|
+
* @param settings - Optional model configuration settings
|
|
861
|
+
* @returns Configured SAP AI chat language model instance (V2)
|
|
862
|
+
*/
|
|
863
|
+
chat(modelId: SAPAIModelId, settings?: SAPAISettings): SAPAILanguageModelV2;
|
|
864
|
+
/**
|
|
865
|
+
* Create an embedding model instance (V2).
|
|
866
|
+
*
|
|
867
|
+
* This method creates text embedding models compatible with AI SDK 5.x.
|
|
868
|
+
* @param modelId - The embedding model identifier (e.g., 'text-embedding-ada-002')
|
|
869
|
+
* @param settings - Optional embedding model settings
|
|
870
|
+
* @returns Configured SAP AI embedding model instance (V2)
|
|
871
|
+
*/
|
|
872
|
+
embedding(modelId: SAPAIEmbeddingModelId, settings?: SAPAIEmbeddingSettings): SAPAIEmbeddingModelV2;
|
|
873
|
+
/**
|
|
874
|
+
* Create a language model instance (V2).
|
|
875
|
+
*
|
|
876
|
+
* This is the standard method for creating language models.
|
|
877
|
+
* @param modelId - The SAP AI Core model identifier
|
|
878
|
+
* @param settings - Optional model configuration settings
|
|
879
|
+
* @returns Configured SAP AI language model instance (V2)
|
|
880
|
+
*/
|
|
881
|
+
languageModel(modelId: SAPAIModelId, settings?: SAPAISettings): SAPAILanguageModelV2;
|
|
882
|
+
/**
|
|
883
|
+
* Create a text embedding model instance (V2).
|
|
884
|
+
*
|
|
885
|
+
* Alias for the embedding() method. Provides compatibility with common provider patterns.
|
|
886
|
+
* @param modelId - The embedding model identifier
|
|
887
|
+
* @param settings - Optional embedding model settings
|
|
888
|
+
* @returns Configured SAP AI embedding model instance (V2)
|
|
889
|
+
*/
|
|
890
|
+
textEmbeddingModel(modelId: SAPAIEmbeddingModelId, settings?: SAPAIEmbeddingSettings): SAPAIEmbeddingModelV2;
|
|
891
|
+
}
|
|
892
|
+
/**
|
|
893
|
+
* Creates a SAP AI Core provider instance for use with AI SDK 5.x (LanguageModelV2).
|
|
894
|
+
*
|
|
895
|
+
* This is the main entry point for integrating SAP AI Core with AI SDK 5.x.
|
|
896
|
+
* It uses the official SAP AI SDK (@sap-ai-sdk/orchestration) under the hood,
|
|
897
|
+
* which handles authentication and API communication automatically.
|
|
898
|
+
*
|
|
899
|
+
* **Authentication:**
|
|
900
|
+
* The SAP AI SDK automatically handles authentication:
|
|
901
|
+
* 1. On SAP BTP: Uses service binding (VCAP_SERVICES)
|
|
902
|
+
* 2. Locally: Uses AICORE_SERVICE_KEY environment variable
|
|
903
|
+
*
|
|
904
|
+
* **Key Features:**
|
|
905
|
+
* - Automatic authentication via SAP AI SDK
|
|
906
|
+
* - Support for all SAP AI Core orchestration models
|
|
907
|
+
* - Streaming and non-streaming responses
|
|
908
|
+
* - Tool calling support
|
|
909
|
+
* - Data masking (DPI)
|
|
910
|
+
* - Content filtering
|
|
911
|
+
* @param options - Configuration options for the provider
|
|
912
|
+
* @returns A configured SAP AI provider (V2)
|
|
913
|
+
* @example
|
|
914
|
+
* **Basic Usage (AI SDK 5.x)**
|
|
915
|
+
* ```typescript
|
|
916
|
+
* import { createSAPAIProvider } from '@mymediset/sap-ai-provider';
|
|
917
|
+
* import { generateText } from 'ai'; // SDK 5.x
|
|
918
|
+
*
|
|
919
|
+
* const provider = createSAPAIProvider();
|
|
920
|
+
*
|
|
921
|
+
* const result = await generateText({
|
|
922
|
+
* model: provider('gpt-4o'),
|
|
923
|
+
* prompt: 'Hello, world!'
|
|
924
|
+
* });
|
|
925
|
+
* ```
|
|
926
|
+
* @example
|
|
927
|
+
* **With Resource Group**
|
|
928
|
+
* ```typescript
|
|
929
|
+
* const provider = createSAPAIProvider({
|
|
930
|
+
* resourceGroup: 'production'
|
|
931
|
+
* });
|
|
932
|
+
*
|
|
933
|
+
* const model = provider('anthropic--claude-3.5-sonnet', {
|
|
934
|
+
* modelParams: {
|
|
935
|
+
* temperature: 0.3,
|
|
936
|
+
* maxTokens: 2000
|
|
937
|
+
* }
|
|
938
|
+
* });
|
|
939
|
+
* ```
|
|
940
|
+
* @example
|
|
941
|
+
* **With Default Settings**
|
|
942
|
+
* ```typescript
|
|
943
|
+
* const provider = createSAPAIProvider({
|
|
944
|
+
* defaultSettings: {
|
|
945
|
+
* modelParams: {
|
|
946
|
+
* temperature: 0.7
|
|
947
|
+
* }
|
|
948
|
+
* }
|
|
949
|
+
* });
|
|
950
|
+
* ```
|
|
951
|
+
*/
|
|
952
|
+
declare function createSAPAIProvider(options?: SAPAIProviderSettings): SAPAIProviderV2;
|
|
953
|
+
/**
|
|
954
|
+
* Default SAP AI provider instance (V2).
|
|
955
|
+
*
|
|
956
|
+
* Uses default configuration with automatic authentication.
|
|
957
|
+
* Compatible with AI SDK 5.x (LanguageModelV2).
|
|
958
|
+
* See {@link createSAPAIProvider} for authentication details.
|
|
959
|
+
* @example
|
|
960
|
+
* ```typescript
|
|
961
|
+
* import { sapai } from '@mymediset/sap-ai-provider';
|
|
962
|
+
* import { generateText } from 'ai'; // SDK 5.x
|
|
963
|
+
*
|
|
964
|
+
* const result = await generateText({
|
|
965
|
+
* model: sapai('gpt-4o'),
|
|
966
|
+
* prompt: 'Hello!'
|
|
967
|
+
* });
|
|
968
|
+
* ```
|
|
969
|
+
*/
|
|
970
|
+
declare const sapai: SAPAIProviderV2;
|
|
971
|
+
|
|
972
|
+
export { type DeploymentConfig, SAPAIEmbeddingModelV2 as SAPAIEmbeddingModel, type SAPAIEmbeddingModelId, type SAPAIEmbeddingProviderOptions, type SAPAIEmbeddingSettings, type SAPAILanguageModelProviderOptions, type SAPAIModelId, type SAPAIProviderV2 as SAPAIProvider, type SAPAIProviderSettings, type SAPAISettings, SAP_AI_PROVIDER_NAME, createSAPAIProvider, sapAIEmbeddingProviderOptions, sapAILanguageModelProviderOptions, sapai };
|