yandex-ai 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/chat.d.ts ADDED
@@ -0,0 +1,28 @@
1
+ import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult } from "@ai-sdk/provider";
2
+ type YandexModelId = "aliceai-llm" | "yandexgpt/rc" | "yandexgpt-lite" | "yandexgpt/latest" | "qwen3-235b-a22b-fp8/latest" | "gpt-oss-120b/latest" | "gpt-oss-20b/latest" | "gemma-3-27b-it/latest" | (string & {});
3
+ type YandexReasoningMode = "REASONING_MODE_UNSPECIFIED" | "DISABLED" | "ENABLED_HIDDEN";
4
+ type YandexChatProviderOptions = {
5
+ reasoningOptions?: YandexReasoningMode;
6
+ parallelToolCalls?: boolean;
7
+ };
8
+ declare class YandexChatModel implements LanguageModelV3 {
9
+ private folderId;
10
+ private secretKey;
11
+ readonly specificationVersion = "v3";
12
+ readonly provider = "yandex-cloud";
13
+ readonly modelId;
14
+ readonly supportedUrls: {};
15
+ constructor(modelId: YandexModelId, { folderId, secretKey }: {
16
+ folderId: string;
17
+ secretKey: string;
18
+ });
19
+ private convertTools;
20
+ private convertRole;
21
+ private convertFromAiSdkToYandex;
22
+ private convertFromYandexToAiSdk;
23
+ private convertToolChoice;
24
+ private convertFinishReason;
25
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
26
+ doStream(): Promise<never>;
27
+ }
28
+ export { YandexChatProviderOptions, YandexChatModel };
package/dist/chat.js ADDED
@@ -0,0 +1,280 @@
1
+ // src/chat.ts
2
+ class YandexChatModel {
3
+ folderId;
4
+ secretKey;
5
+ specificationVersion = "v3";
6
+ provider = "yandex-cloud";
7
+ modelId;
8
+ supportedUrls = {};
9
+ constructor(modelId, { folderId, secretKey }) {
10
+ this.modelId = modelId;
11
+ this.folderId = folderId;
12
+ this.secretKey = secretKey;
13
+ }
14
+ convertTools(tools) {
15
+ const warnings = [];
16
+ const convertedTools = [];
17
+ if (!tools) {
18
+ return { tools: [], warnings: [] };
19
+ }
20
+ for (const tool of tools) {
21
+ if (tool.type !== "function") {
22
+ warnings.push(`Tool ${tool.name} is not a function tool`);
23
+ continue;
24
+ }
25
+ convertedTools.push({
26
+ name: tool.name,
27
+ description: tool.description,
28
+ parameters: tool.inputSchema,
29
+ strict: true
30
+ });
31
+ }
32
+ return {
33
+ tools: convertedTools.map((tool) => ({
34
+ function: tool
35
+ })),
36
+ warnings
37
+ };
38
+ }
39
+ convertRole(role) {
40
+ switch (role) {
41
+ case "assistant":
42
+ return "assistant";
43
+ case "user":
44
+ return "user";
45
+ default:
46
+ return "system";
47
+ }
48
+ }
49
+ convertFromAiSdkToYandex(messages) {
50
+ const warnings = [];
51
+ const convertedMessages = [];
52
+ for (const message of messages) {
53
+ const role = this.convertRole(message.role);
54
+ if (typeof message.content === "string") {
55
+ convertedMessages.push({
56
+ role,
57
+ text: message.content
58
+ });
59
+ continue;
60
+ }
61
+ for (const part of message.content) {
62
+ console.log({ part });
63
+ if (typeof part === "string") {
64
+ convertedMessages.push({
65
+ role,
66
+ text: part
67
+ });
68
+ continue;
69
+ }
70
+ switch (part.type) {
71
+ case "text":
72
+ convertedMessages.push({
73
+ role,
74
+ text: part.text
75
+ });
76
+ continue;
77
+ case "tool-call":
78
+ convertedMessages.push({
79
+ role,
80
+ toolCallList: {
81
+ toolCalls: [
82
+ {
83
+ functionCall: {
84
+ name: part.toolName,
85
+ arguments: part.input
86
+ }
87
+ }
88
+ ]
89
+ }
90
+ });
91
+ continue;
92
+ case "tool-result":
93
+ convertedMessages.push({
94
+ role: "user",
95
+ toolResultList: {
96
+ toolResults: [
97
+ {
98
+ functionResult: {
99
+ name: part.toolName,
100
+ content: JSON.stringify(part.output)
101
+ }
102
+ }
103
+ ]
104
+ }
105
+ });
106
+ continue;
107
+ case "file":
108
+ throw new Error("Files are not implemented yet");
109
+ case "reasoning":
110
+ throw new Error("Reasoning is not implemented yet");
111
+ case "tool-approval-response":
112
+ throw new Error("Tool approval is not implemented yet");
113
+ }
114
+ }
115
+ }
116
+ return {
117
+ messages: convertedMessages,
118
+ warnings
119
+ };
120
+ }
121
+ convertFromYandexToAiSdk(alternatives) {
122
+ const content = [];
123
+ let status = {
124
+ raw: undefined,
125
+ unified: "stop"
126
+ };
127
+ for (const part of alternatives) {
128
+ status = this.convertFinishReason(part.status);
129
+ if (part.message.text) {
130
+ content.push({
131
+ type: "text",
132
+ text: part.message.text
133
+ });
134
+ } else if (part.message.toolCallList) {
135
+ for (const toolCall of part.message.toolCallList.toolCalls) {
136
+ content.push({
137
+ toolCallId: crypto.randomUUID(),
138
+ type: "tool-call",
139
+ toolName: toolCall.functionCall.name,
140
+ input: JSON.stringify(toolCall.functionCall.arguments)
141
+ });
142
+ }
143
+ } else if (part.message.toolResultList) {
144
+ for (const toolResult of part.message.toolResultList.toolResults) {
145
+ content.push({
146
+ toolCallId: crypto.randomUUID(),
147
+ type: "tool-result",
148
+ toolName: toolResult.functionResult.name,
149
+ result: JSON.parse(toolResult.functionResult.content)
150
+ });
151
+ }
152
+ }
153
+ }
154
+ return {
155
+ content,
156
+ finishReason: status
157
+ };
158
+ }
159
+ convertToolChoice(toolChoice) {
160
+ if (!toolChoice) {
161
+ return {
162
+ mode: "TOOL_CHOICE_MODE_UNSPECIFIED"
163
+ };
164
+ }
165
+ if (toolChoice.type === "tool") {
166
+ return {
167
+ functionName: toolChoice.toolName
168
+ };
169
+ }
170
+ switch (toolChoice.type) {
171
+ case "auto":
172
+ return {
173
+ mode: "AUTO"
174
+ };
175
+ case "none":
176
+ return {
177
+ mode: "NONE"
178
+ };
179
+ case "required":
180
+ return {
181
+ mode: "REQUIRED"
182
+ };
183
+ }
184
+ }
185
+ convertFinishReason(reason) {
186
+ switch (reason) {
187
+ case "ALTERNATIVE_STATUS_UNSPECIFIED":
188
+ return {
189
+ raw: reason,
190
+ unified: "stop"
191
+ };
192
+ case "ALTERNATIVE_STATUS_PARTIAL":
193
+ return {
194
+ raw: reason,
195
+ unified: "other"
196
+ };
197
+ case "ALTERNATIVE_STATUS_TRUNCATED_FINAL":
198
+ return {
199
+ raw: reason,
200
+ unified: "length"
201
+ };
202
+ case "ALTERNATIVE_STATUS_FINAL":
203
+ return {
204
+ raw: reason,
205
+ unified: "stop"
206
+ };
207
+ case "ALTERNATIVE_STATUS_CONTENT_FILTER": {
208
+ return {
209
+ raw: reason,
210
+ unified: "other"
211
+ };
212
+ }
213
+ case "ALTERNATIVE_STATUS_TOOL_CALLS": {
214
+ return {
215
+ raw: reason,
216
+ unified: "tool-calls"
217
+ };
218
+ }
219
+ }
220
+ }
221
+ async doGenerate(options) {
222
+ const { messages, warnings: messageWarnings } = this.convertFromAiSdkToYandex(options.prompt);
223
+ const { tools, warnings: toolWarnings } = this.convertTools(options.tools);
224
+ const po = options.providerOptions;
225
+ const body = {
226
+ modelUri: `gpt://${this.folderId}/${this.modelId}`,
227
+ completionOptions: {
228
+ stream: false,
229
+ temperature: options.temperature,
230
+ maxTokens: options.maxOutputTokens,
231
+ reasoningOptions: po?.reasoningOptions
232
+ },
233
+ messages,
234
+ tools,
235
+ jsonSchema: options.responseFormat?.type === "json" ? options.responseFormat.schema : undefined,
236
+ toolChoice: this.convertToolChoice(options.toolChoice),
237
+ parallelToolCalls: po?.parallelToolCalls
238
+ };
239
+ const response = await fetch("https://llm.api.cloud.yandex.net/foundationModels/v1/completion", {
240
+ method: "POST",
241
+ headers: {
242
+ Authorization: `Api-Key ${this.secretKey}`,
243
+ "Content-Type": "application/json"
244
+ },
245
+ body: JSON.stringify(body)
246
+ });
247
+ if (!response.ok) {
248
+ throw new Error(`Yandex API error: ${response.status} ${response.statusText}: ${await response.text()}`);
249
+ }
250
+ const data = await response.json();
251
+ const { finishReason, content } = this.convertFromYandexToAiSdk(data.result.alternatives);
252
+ return {
253
+ content,
254
+ finishReason,
255
+ usage: {
256
+ inputTokens: {
257
+ total: Number.parseInt(data.result.usage.inputTextTokens, 10),
258
+ noCache: Number.parseInt(data.result.usage.inputTextTokens, 10),
259
+ cacheRead: 0,
260
+ cacheWrite: 0
261
+ },
262
+ outputTokens: {
263
+ total: Number.parseInt(data.result.usage.completionTokens, 10),
264
+ text: Number.parseInt(data.result.usage.completionTokens, 10),
265
+ reasoning: 0
266
+ }
267
+ },
268
+ warnings: [...messageWarnings, ...toolWarnings].map((s) => ({
269
+ type: "compatibility",
270
+ feature: s
271
+ }))
272
+ };
273
+ }
274
+ async doStream() {
275
+ throw new Error("Стриминг (doStream) пока не реализован.");
276
+ }
277
+ }
278
+ export {
279
+ YandexChatModel
280
+ };
@@ -0,0 +1,62 @@
1
+ import { JSONObject, SharedV2Headers, SharedV3Warning, SpeechModelV3, SpeechModelV3CallOptions } from "@ai-sdk/provider";
2
+ declare const yandexSpeechVoices: readonly ["alena", "filipp", "ermil", "jane", "omazh", "zahar", "dasha", "julia", "lera", "masha", "marina", "alexander", "kirill", "anton", "madi_ru", "saule_ru", "zamira_ru", "zhanar_ru", "yulduz_ru"];
3
+ type YandexSpeechVoice = (typeof yandexSpeechVoices)[number];
4
+ declare const voiceEmotions: {
5
+ readonly alena: readonly ["neutral", "good"];
6
+ readonly filipp: readonly ["neutral"];
7
+ readonly ermil: readonly ["neutral", "good"];
8
+ readonly jane: readonly ["neutral", "good", "evil"];
9
+ readonly omazh: readonly ["neutral", "evil"];
10
+ readonly zahar: readonly ["neutral", "good"];
11
+ readonly dasha: readonly ["neutral", "good", "friendly"];
12
+ readonly julia: readonly ["neutral", "strict"];
13
+ readonly lera: readonly ["neutral", "friendly"];
14
+ readonly masha: readonly ["good", "strict", "friendly"];
15
+ readonly marina: readonly ["neutral", "whisper", "friendly"];
16
+ readonly alexander: readonly ["neutral", "good"];
17
+ readonly kirill: readonly ["neutral", "strict", "good"];
18
+ readonly anton: readonly ["neutral", "good"];
19
+ readonly madi_ru: readonly ["neutral"];
20
+ readonly saule_ru: readonly ["neutral", "strict", "whisper"];
21
+ readonly zamira_ru: readonly ["neutral", "strict", "friendly"];
22
+ readonly zhanar_ru: readonly ["neutral", "strict", "friendly"];
23
+ readonly yulduz_ru: readonly ["neutral", "strict", "friendly", "whisper"];
24
+ };
25
+ type YandexSpeechEmotionFor<T extends YandexSpeechVoice> = (typeof voiceEmotions)[T][number];
26
+ type YandexSpeechProviderOptions<T extends YandexSpeechVoice> = {
27
+ emotion?: YandexSpeechEmotionFor<T>;
28
+ sampleRateHertz?: 48000 | 16000 | 8000;
29
+ };
30
+ type YandexSpeechOptions<T extends YandexSpeechVoice> = {
31
+ voice: T;
32
+ speed?: number;
33
+ providerOptions?: YandexSpeechProviderOptions<T>;
34
+ outputFormat?: "mp3" | "oggopus" | "lpcm";
35
+ };
36
+ declare class YandexSpeechModel implements SpeechModelV3 {
37
+ private folderId;
38
+ private secretKey;
39
+ readonly specificationVersion = "v3";
40
+ readonly provider = "yandex-cloud";
41
+ readonly modelId = "tts:synthesize";
42
+ constructor({ folderId, secretKey }: {
43
+ folderId: string;
44
+ secretKey: string;
45
+ });
46
+ private sanitizeInput;
47
+ doGenerate(options: SpeechModelV3CallOptions): Promise<{
48
+ audio: string | Uint8Array;
49
+ warnings: Array<SharedV3Warning>;
50
+ request?: {
51
+ body?: unknown;
52
+ };
53
+ response: {
54
+ timestamp: Date;
55
+ modelId: string;
56
+ headers?: SharedV2Headers;
57
+ body?: unknown;
58
+ };
59
+ providerMetadata?: Record<string, JSONObject>;
60
+ }>;
61
+ }
62
+ export { YandexSpeechVoice, YandexSpeechProviderOptions, YandexSpeechOptions, YandexSpeechModel, YandexSpeechEmotionFor };
package/dist/speech.js ADDED
@@ -0,0 +1,65 @@
1
+ // src/speech.ts
2
+ class YandexSpeechModel {
3
+ folderId;
4
+ secretKey;
5
+ specificationVersion = "v3";
6
+ provider = "yandex-cloud";
7
+ modelId = "tts:synthesize";
8
+ constructor({
9
+ folderId,
10
+ secretKey
11
+ }) {
12
+ this.folderId = folderId;
13
+ this.secretKey = secretKey;
14
+ }
15
+ sanitizeInput(input) {
16
+ return input.replace(/[^a-zA-Z0-9а-яА-ЯёЁ]/g, " ");
17
+ }
18
+ async doGenerate(options) {
19
+ const po = options.providerOptions;
20
+ const requestBody = {
21
+ text: this.sanitizeInput(options.text),
22
+ voice: options.voice,
23
+ lang: options.language,
24
+ emotion: po.emotion,
25
+ speed: options.speed,
26
+ format: options.outputFormat,
27
+ sampleRateHertz: po.sampleRateHertz,
28
+ folderId: this.folderId
29
+ };
30
+ console.log(requestBody.text);
31
+ const formData = new URLSearchParams;
32
+ for (const [key, value] of Object.entries(requestBody)) {
33
+ if (value !== undefined && value !== null) {
34
+ formData.append(key, String(value));
35
+ }
36
+ }
37
+ const response = await fetch("https://tts.api.cloud.yandex.net/speech/v1/tts:synthesize", {
38
+ method: "POST",
39
+ headers: {
40
+ Authorization: `Api-Key ${this.secretKey}`,
41
+ "Content-Type": "application/x-www-form-urlencoded"
42
+ },
43
+ body: formData.toString()
44
+ });
45
+ if (!response.ok) {
46
+ throw new Error(`Yandex TTS API error: ${response.status} ${response.statusText}`);
47
+ }
48
+ const arrayBuffer = await response.arrayBuffer();
49
+ const audio = new Uint8Array(arrayBuffer);
50
+ return {
51
+ audio,
52
+ warnings: [],
53
+ request: { body: requestBody },
54
+ response: {
55
+ timestamp: new Date,
56
+ modelId: this.modelId,
57
+ headers: Object.fromEntries(response.headers.entries()),
58
+ body: audio
59
+ }
60
+ };
61
+ }
62
+ }
63
+ export {
64
+ YandexSpeechModel
65
+ };
@@ -0,0 +1,40 @@
1
+ import { JSONObject, SharedV3Headers, SharedV3Warning, TranscriptionModelV3, TranscriptionModelV3CallOptions } from "@ai-sdk/provider";
2
+ type YandexTranscriptProviderOptions = {
3
+ profanityFilter?: boolean;
4
+ rawResults?: boolean;
5
+ format?: "lpcm" | "oggopus";
6
+ sampleRateHertz?: 48000 | 16000 | 8000;
7
+ };
8
+ declare class YandexTranscriptModel implements TranscriptionModelV3 {
9
+ private folderId;
10
+ private secretKey;
11
+ readonly specificationVersion = "v3";
12
+ readonly provider = "yandex-cloud";
13
+ readonly modelId = "sst:recognize";
14
+ constructor({ folderId, secretKey }: {
15
+ folderId: string;
16
+ secretKey: string;
17
+ });
18
+ doGenerate(options: TranscriptionModelV3CallOptions): Promise<{
19
+ text: string;
20
+ segments: Array<{
21
+ text: string;
22
+ startSecond: number;
23
+ endSecond: number;
24
+ }>;
25
+ language: string | undefined;
26
+ durationInSeconds: number | undefined;
27
+ warnings: Array<SharedV3Warning>;
28
+ request?: {
29
+ body?: string;
30
+ };
31
+ response: {
32
+ timestamp: Date;
33
+ modelId: string;
34
+ headers?: SharedV3Headers;
35
+ body?: unknown;
36
+ };
37
+ providerMetadata?: Record<string, JSONObject>;
38
+ }>;
39
+ }
40
+ export { YandexTranscriptProviderOptions, YandexTranscriptModel };
@@ -0,0 +1,52 @@
1
+ // src/transcript.ts
2
+ class YandexTranscriptModel {
3
+ folderId;
4
+ secretKey;
5
+ specificationVersion = "v3";
6
+ provider = "yandex-cloud";
7
+ modelId = "sst:recognize";
8
+ constructor({
9
+ folderId,
10
+ secretKey
11
+ }) {
12
+ this.folderId = folderId;
13
+ this.secretKey = secretKey;
14
+ }
15
+ async doGenerate(options) {
16
+ const queryParams = new URLSearchParams({
17
+ folderId: this.folderId,
18
+ ...options.providerOptions
19
+ });
20
+ const response = await fetch(`https://stt.api.cloud.yandex.net/speech/v1/stt:recognize?${queryParams.toString()}`, {
21
+ method: "POST",
22
+ headers: {
23
+ Authorization: `Api-Key ${this.secretKey}`,
24
+ "Content-Type": "application/octet-stream"
25
+ },
26
+ body: options.audio
27
+ });
28
+ if (!response.ok) {
29
+ throw new Error(`Yandex STT API error: ${response.status} ${response.statusText}`);
30
+ }
31
+ const data = await response.json();
32
+ return {
33
+ text: data.result.length > 0 ? data.result : "нет звуков",
34
+ segments: [],
35
+ language: undefined,
36
+ durationInSeconds: undefined,
37
+ warnings: [],
38
+ request: {
39
+ body: queryParams.toString()
40
+ },
41
+ response: {
42
+ timestamp: new Date,
43
+ modelId: this.modelId,
44
+ headers: Object.fromEntries(response.headers.entries()),
45
+ body: data
46
+ }
47
+ };
48
+ }
49
+ }
50
+ export {
51
+ YandexTranscriptModel
52
+ };
package/package.json ADDED
@@ -0,0 +1,51 @@
1
+ {
2
+ "name": "yandex-ai",
3
+ "version": "0.0.1",
4
+ "description": "Yandex GPT integrations for Vercel's AI SDK. Includes LLM, TTS and STT.",
5
+ "author": "Denis Smirnov <wonepng@gmail.com>",
6
+ "license": "MIT",
7
+ "type": "module",
8
+ "main": "./dist/index.js",
9
+ "types": "./dist/index.d.ts",
10
+ "exports": {
11
+ ".": {
12
+ "types": "./dist/index.d.ts",
13
+ "default": "./dist/index.js"
14
+ },
15
+ "./chat": {
16
+ "types": "./dist/chat/index.d.ts",
17
+ "default": "./dist/chat/index.js"
18
+ },
19
+ "./speech": {
20
+ "types": "./dist/speech/index.d.ts",
21
+ "default": "./dist/speech/index.js"
22
+ },
23
+ "./transcript": {
24
+ "types": "./dist/transcript/index.d.ts",
25
+ "default": "./dist/transcript/index.js"
26
+ }
27
+ },
28
+ "files": [
29
+ "dist",
30
+ "README.md",
31
+ "LICENCE"
32
+ ],
33
+ "scripts": {
34
+ "build": "bunup src/chat.ts src/speech.ts src/transcript.ts",
35
+ "publish": "bun publish"
36
+ },
37
+ "peerDependencies": {
38
+ "typescript": "^5"
39
+ },
40
+ "dependencies": {
41
+ "ai": "^6.0.62"
42
+ },
43
+ "devDependencies": {
44
+ "bunup": "^0.16.21",
45
+ "typescript": "^5.6.2"
46
+ },
47
+ "sideEffects": false,
48
+ "publishConfig": {
49
+ "access": "public"
50
+ }
51
+ }