@untiny/qwen-ai-provider 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,1274 @@
1
+ // src/qwen-provider.ts
2
+ import { NoSuchModelError } from "@ai-sdk/provider";
3
+ import { loadApiKey, VERSION, withoutTrailingSlash, withUserAgentSuffix } from "@ai-sdk/provider-utils";
4
+
5
+ // src/chat/qwen-chat-language-model.ts
6
+ import {
7
+ InvalidResponseDataError
8
+ } from "@ai-sdk/provider";
9
+ import {
10
+ combineHeaders,
11
+ createEventSourceResponseHandler,
12
+ createJsonResponseHandler,
13
+ generateId,
14
+ isParsableJson,
15
+ postJsonToApi
16
+ } from "@ai-sdk/provider-utils";
17
+
18
+ // src/qwen-error.ts
19
+ import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
20
+ import z from "zod";
21
+ var qwenErrorDataSchema = z.object({
22
+ object: z.literal("error"),
23
+ message: z.string(),
24
+ type: z.string(),
25
+ param: z.string().nullable(),
26
+ code: z.string().nullable()
27
+ });
28
+ var qwenFailedResponseHandler = createJsonErrorResponseHandler({
29
+ errorSchema: qwenErrorDataSchema,
30
+ errorToMessage: (data) => data.message
31
+ });
32
+
33
+ // src/get-response-metadata.ts
34
+ function getResponseMetadata({
35
+ id,
36
+ model,
37
+ created
38
+ }) {
39
+ return {
40
+ id: id != null ? id : void 0,
41
+ modelId: model != null ? model : void 0,
42
+ timestamp: created != null ? new Date(created * 1e3) : void 0
43
+ };
44
+ }
45
+
46
+ // src/map-qwen-finish-reason.ts
47
+ function mapQwenFinishReason(finishReason) {
48
+ switch (finishReason) {
49
+ case "stop":
50
+ return "stop";
51
+ case "length":
52
+ return "length";
53
+ case "content_filter":
54
+ return "content-filter";
55
+ case "function_call":
56
+ case "tool_calls":
57
+ return "tool-calls";
58
+ default:
59
+ return "other";
60
+ }
61
+ }
62
+
63
+ // src/chat/convert-qwen-chat-usage.ts
64
+ function convertQwenChatUsage(usage) {
65
+ var _a, _b, _c, _d;
66
+ if (!usage) {
67
+ return {
68
+ inputTokens: {
69
+ total: void 0,
70
+ noCache: void 0,
71
+ cacheRead: void 0,
72
+ cacheWrite: void 0
73
+ },
74
+ outputTokens: {
75
+ total: void 0,
76
+ text: void 0,
77
+ reasoning: void 0
78
+ },
79
+ raw: void 0
80
+ };
81
+ }
82
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
83
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
84
+ const cachedTokens = (_d = (_c = usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null ? _d : 0;
85
+ return {
86
+ inputTokens: {
87
+ total: promptTokens,
88
+ noCache: promptTokens - cachedTokens,
89
+ cacheRead: cachedTokens,
90
+ cacheWrite: void 0
91
+ },
92
+ outputTokens: {
93
+ total: completionTokens,
94
+ text: void 0,
95
+ reasoning: void 0
96
+ },
97
+ raw: usage
98
+ };
99
+ }
100
+
101
+ // src/chat/convert-to-qwen-chat-messages.ts
102
+ import {
103
+ UnsupportedFunctionalityError
104
+ } from "@ai-sdk/provider";
105
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
106
+ function contentToAssistantMessage(content) {
107
+ var _a;
108
+ const message = {
109
+ role: "assistant",
110
+ content: ""
111
+ };
112
+ for (const part of content) {
113
+ switch (part.type) {
114
+ case "text": {
115
+ message.content += part.text;
116
+ break;
117
+ }
118
+ case "tool-call": {
119
+ (_a = message.tool_calls) != null ? _a : message.tool_calls = [];
120
+ message.tool_calls.push({
121
+ id: part.toolCallId,
122
+ type: "function",
123
+ function: {
124
+ name: part.toolName,
125
+ arguments: JSON.stringify(part.input)
126
+ },
127
+ index: message.tool_calls.length
128
+ });
129
+ break;
130
+ }
131
+ }
132
+ }
133
+ return message;
134
+ }
135
+ function contentToSystemMessage(content) {
136
+ return { role: "system", content };
137
+ }
138
+ function contentToToolMessage(content) {
139
+ var _a;
140
+ const messages = [];
141
+ for (const toolResponse of content) {
142
+ if (toolResponse.type === "tool-approval-response") {
143
+ continue;
144
+ }
145
+ const output = toolResponse.output;
146
+ let contentValue;
147
+ switch (output.type) {
148
+ case "text":
149
+ case "error-text":
150
+ contentValue = output.value;
151
+ break;
152
+ case "execution-denied":
153
+ contentValue = (_a = output.reason) != null ? _a : "Tool execution denied.";
154
+ break;
155
+ case "content":
156
+ case "json":
157
+ case "error-json":
158
+ contentValue = JSON.stringify(output.value);
159
+ break;
160
+ }
161
+ messages.push({
162
+ role: "tool",
163
+ tool_call_id: toolResponse.toolCallId,
164
+ content: contentValue
165
+ });
166
+ }
167
+ return messages;
168
+ }
169
+ function contentToUserMessage(content) {
170
+ if (content.length === 1 && content[0].type === "text") {
171
+ return {
172
+ role: "user",
173
+ content: content[0].text
174
+ };
175
+ }
176
+ const contents = [];
177
+ for (const part of content) {
178
+ if (part.type === "text") {
179
+ contents.push({ type: "text", text: part.text });
180
+ } else if (part.mediaType.startsWith("image/")) {
181
+ const data = part.data instanceof URL ? part.data.toString() : convertToBase64(part.data);
182
+ const url = data.startsWith("http") || data.startsWith("data:") ? data : `data:${part.mediaType};base64,${data}`;
183
+ contents.push({
184
+ type: "image_url",
185
+ image_url: { url }
186
+ });
187
+ } else if (part.mediaType.startsWith("audio/")) {
188
+ const data = part.data instanceof URL ? part.data.toString() : convertToBase64(part.data);
189
+ const url = data.startsWith("http") || data.startsWith("data:") ? data : `data:${part.mediaType};base64,${data}`;
190
+ switch (part.mediaType) {
191
+ case "audio/wav": {
192
+ contents.push({
193
+ type: "input_audio",
194
+ input_audio: {
195
+ data: url,
196
+ format: "wav"
197
+ }
198
+ });
199
+ break;
200
+ }
201
+ case "audio/mp3":
202
+ case "audio/mpeg": {
203
+ contents.push({
204
+ type: "input_audio",
205
+ input_audio: {
206
+ data: url,
207
+ format: "mp3"
208
+ }
209
+ });
210
+ break;
211
+ }
212
+ default: {
213
+ throw new UnsupportedFunctionalityError({
214
+ functionality: `audio content parts with media type ${part.mediaType}`
215
+ });
216
+ }
217
+ }
218
+ } else {
219
+ throw new UnsupportedFunctionalityError({
220
+ functionality: `file part media type ${part.mediaType}`
221
+ });
222
+ }
223
+ }
224
+ return {
225
+ role: "user",
226
+ content: contents
227
+ };
228
+ }
229
+ function convertToQwenChatMessages(options) {
230
+ const messages = [];
231
+ const warnings = [];
232
+ for (const { role, content } of options.prompt) {
233
+ switch (role) {
234
+ case "system": {
235
+ messages.push(contentToSystemMessage(content));
236
+ break;
237
+ }
238
+ case "user": {
239
+ messages.push(contentToUserMessage(content));
240
+ break;
241
+ }
242
+ case "assistant": {
243
+ messages.push(contentToAssistantMessage(content));
244
+ break;
245
+ }
246
+ case "tool": {
247
+ messages.push(...contentToToolMessage(content));
248
+ break;
249
+ }
250
+ default: {
251
+ const _exhaustiveCheck = role;
252
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
253
+ }
254
+ }
255
+ }
256
+ return { messages, warnings };
257
+ }
258
+
259
+ // src/chat/qwen-chat-api.ts
260
+ import { lazySchema, zodSchema } from "@ai-sdk/provider-utils";
261
+ import z2 from "zod";
262
+ var qwenChatUsageSchema = z2.object({
263
+ prompt_tokens: z2.number().nullish(),
264
+ completion_tokens: z2.number().nullish(),
265
+ total_tokens: z2.number().nullish(),
266
+ prompt_tokens_details: z2.object({
267
+ cached_tokens: z2.number().nullish()
268
+ }).nullish()
269
+ }).nullish();
270
+ var qwenChatResponseSchema = lazySchema(
271
+ () => zodSchema(
272
+ z2.object({
273
+ id: z2.string().nullish(),
274
+ created: z2.number().nullish(),
275
+ model: z2.string().nullish(),
276
+ choices: z2.array(
277
+ z2.object({
278
+ finish_reason: z2.string().nullish(),
279
+ index: z2.number(),
280
+ message: z2.object({
281
+ role: z2.literal("assistant").nullish(),
282
+ content: z2.string().nullish(),
283
+ reasoning_content: z2.string().nullish(),
284
+ tool_calls: z2.array(
285
+ z2.object({
286
+ id: z2.string().nullish(),
287
+ type: z2.literal("function"),
288
+ function: z2.object({
289
+ name: z2.string(),
290
+ arguments: z2.string()
291
+ })
292
+ })
293
+ )
294
+ })
295
+ })
296
+ ),
297
+ usage: qwenChatUsageSchema
298
+ })
299
+ )
300
+ );
301
+ var qwenChatChunkSchema = lazySchema(
302
+ () => zodSchema(
303
+ z2.union([
304
+ z2.object({
305
+ id: z2.string().nullish(),
306
+ created: z2.number().nullish(),
307
+ model: z2.string().nullish(),
308
+ choices: z2.array(
309
+ z2.object({
310
+ delta: z2.object({
311
+ role: z2.enum(["assistant"]).nullish(),
312
+ content: z2.string().nullish(),
313
+ reasoning_content: z2.string().nullish(),
314
+ tool_calls: z2.array(
315
+ z2.object({
316
+ index: z2.number(),
317
+ id: z2.string().nullish(),
318
+ type: z2.literal("function").nullish(),
319
+ function: z2.object({
320
+ name: z2.string().nullish(),
321
+ arguments: z2.string().nullish()
322
+ })
323
+ })
324
+ ).nullish()
325
+ }),
326
+ finish_reason: z2.string().nullish(),
327
+ index: z2.number()
328
+ })
329
+ ),
330
+ usage: qwenChatUsageSchema
331
+ }),
332
+ qwenErrorDataSchema
333
+ ])
334
+ )
335
+ );
336
+
337
+ // src/chat/qwen-chat-prepare-tools.ts
338
+ import { UnsupportedFunctionalityError as UnsupportedFunctionalityError2 } from "@ai-sdk/provider";
339
+ function prepareChatTools({
340
+ tools,
341
+ toolChoice
342
+ }) {
343
+ var _a;
344
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
345
+ const toolWarnings = [];
346
+ if (tools == null) {
347
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
348
+ }
349
+ const chatTools = [];
350
+ for (const tool of tools) {
351
+ if (tool.type === "function") {
352
+ chatTools.push({
353
+ type: "function",
354
+ function: {
355
+ name: tool.name,
356
+ description: (_a = tool.description) != null ? _a : "",
357
+ parameters: tool.inputSchema,
358
+ ...tool.strict != null ? { strict: tool.strict } : {}
359
+ }
360
+ });
361
+ } else {
362
+ toolWarnings.push({
363
+ type: "unsupported",
364
+ feature: `tool type: ${tool.type}`
365
+ });
366
+ }
367
+ }
368
+ if (toolChoice == null) {
369
+ return { tools: chatTools, toolChoice: void 0, toolWarnings };
370
+ }
371
+ const type = toolChoice.type;
372
+ switch (type) {
373
+ case "auto":
374
+ case "none":
375
+ case "required":
376
+ return { tools: chatTools, toolChoice: type, toolWarnings };
377
+ case "tool":
378
+ return {
379
+ tools: chatTools,
380
+ toolChoice: {
381
+ type: "function",
382
+ function: {
383
+ name: toolChoice.toolName
384
+ }
385
+ },
386
+ toolWarnings
387
+ };
388
+ default: {
389
+ const _exhaustiveCheck = type;
390
+ throw new UnsupportedFunctionalityError2({
391
+ functionality: `tool choice type: ${_exhaustiveCheck}`
392
+ });
393
+ }
394
+ }
395
+ }
396
+
397
+ // src/chat/qwen-chat-language-model.ts
398
+ var QwenChatLanguageModel = class {
399
+ constructor(modelId, config) {
400
+ this.specificationVersion = "v3";
401
+ this.supportedUrls = {
402
+ "image/*": [/^https?:\/\/.*$/]
403
+ };
404
+ this.modelId = modelId;
405
+ this.config = config;
406
+ }
407
+ get provider() {
408
+ return this.config.provider;
409
+ }
410
+ async getArgs(options) {
411
+ var _a, _b;
412
+ const { messages, warnings } = convertToQwenChatMessages(options);
413
+ const args = {
414
+ model: this.modelId,
415
+ messages,
416
+ max_tokens: options.maxOutputTokens,
417
+ temperature: options.temperature,
418
+ stop: options.stopSequences,
419
+ top_p: options.topP,
420
+ top_k: options.topK,
421
+ presence_penalty: options.presencePenalty,
422
+ frequency_penalty: options.frequencyPenalty,
423
+ seed: options.seed
424
+ };
425
+ if (((_a = options.responseFormat) == null ? void 0 : _a.type) === "json") {
426
+ if (options.responseFormat.schema != null) {
427
+ args.response_format = {
428
+ type: "json_schema",
429
+ json_schema: {
430
+ schema: options.responseFormat.schema,
431
+ name: (_b = options.responseFormat.name) != null ? _b : "response",
432
+ description: options.responseFormat.description
433
+ }
434
+ };
435
+ } else {
436
+ args.response_format = { type: "json_object" };
437
+ }
438
+ }
439
+ const { tools, toolChoice, toolWarnings } = prepareChatTools({
440
+ tools: options.tools,
441
+ toolChoice: options.toolChoice
442
+ });
443
+ args.tools = tools;
444
+ args.tool_choice = toolChoice;
445
+ warnings.push(...toolWarnings);
446
+ return { warnings, args };
447
+ }
448
+ async doGenerate(options) {
449
+ var _a, _b, _c;
450
+ const { warnings, args: body } = await this.getArgs(options);
451
+ const {
452
+ responseHeaders,
453
+ value: response,
454
+ rawValue: rawResponse
455
+ } = await postJsonToApi({
456
+ url: this.config.url({
457
+ path: "/chat/completions",
458
+ modelId: this.modelId
459
+ }),
460
+ headers: combineHeaders(this.config.headers(), options.headers),
461
+ body,
462
+ failedResponseHandler: qwenFailedResponseHandler,
463
+ successfulResponseHandler: createJsonResponseHandler(qwenChatResponseSchema),
464
+ abortSignal: options.abortSignal,
465
+ fetch: this.config.fetch
466
+ });
467
+ const choice = response.choices[0];
468
+ const content = [];
469
+ const reasoning = choice.message.reasoning_content;
470
+ if (reasoning != null && reasoning.length > 0) {
471
+ content.push({
472
+ type: "reasoning",
473
+ text: reasoning
474
+ });
475
+ }
476
+ const text = choice.message.content;
477
+ if (text != null && text.length > 0) {
478
+ content.push({ type: "text", text });
479
+ }
480
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
481
+ content.push({
482
+ type: "tool-call",
483
+ toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
484
+ toolName: toolCall.function.name,
485
+ input: toolCall.function.arguments
486
+ });
487
+ }
488
+ return {
489
+ content,
490
+ finishReason: {
491
+ unified: mapQwenFinishReason(choice.finish_reason),
492
+ raw: (_c = choice.finish_reason) != null ? _c : void 0
493
+ },
494
+ usage: convertQwenChatUsage(response.usage),
495
+ request: { body },
496
+ response: {
497
+ ...getResponseMetadata(response),
498
+ headers: responseHeaders,
499
+ body: rawResponse
500
+ },
501
+ warnings,
502
+ providerMetadata: {
503
+ qwen: {}
504
+ }
505
+ };
506
+ }
507
+ async doStream(options) {
508
+ const { warnings, args } = await this.getArgs(options);
509
+ const body = {
510
+ ...args,
511
+ stream: true,
512
+ stream_options: {
513
+ include_usage: true
514
+ }
515
+ };
516
+ const { responseHeaders, value: response } = await postJsonToApi({
517
+ url: this.config.url({
518
+ path: "/chat/completions",
519
+ modelId: this.modelId
520
+ }),
521
+ headers: combineHeaders(this.config.headers(), options.headers),
522
+ body,
523
+ failedResponseHandler: qwenFailedResponseHandler,
524
+ successfulResponseHandler: createEventSourceResponseHandler(qwenChatChunkSchema),
525
+ abortSignal: options.abortSignal,
526
+ fetch: this.config.fetch
527
+ });
528
+ const toolCalls = [];
529
+ let finishReason = {
530
+ unified: "other",
531
+ raw: void 0
532
+ };
533
+ let usage;
534
+ let isFirstChunk = true;
535
+ let isActiveReasoning = false;
536
+ let isActiveText = false;
537
+ let textId;
538
+ let reasoningId;
539
+ return {
540
+ stream: response.pipeThrough(
541
+ new TransformStream({
542
+ start(controller) {
543
+ controller.enqueue({ type: "stream-start", warnings });
544
+ },
545
+ transform(chunk, controller) {
546
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
547
+ if (options.includeRawChunks) {
548
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
549
+ }
550
+ if (!chunk.success) {
551
+ finishReason = { unified: "error", raw: void 0 };
552
+ controller.enqueue({ type: "error", error: chunk.error });
553
+ return;
554
+ }
555
+ const value = chunk.value;
556
+ if ("object" in value) {
557
+ finishReason = { unified: "error", raw: void 0 };
558
+ controller.enqueue({
559
+ type: "error",
560
+ error: value.message
561
+ });
562
+ return;
563
+ }
564
+ if (isFirstChunk) {
565
+ isFirstChunk = false;
566
+ controller.enqueue({
567
+ type: "response-metadata",
568
+ ...getResponseMetadata(value)
569
+ });
570
+ }
571
+ if (value.usage != null) {
572
+ usage = value.usage;
573
+ }
574
+ const choice = value.choices[0];
575
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
576
+ finishReason = {
577
+ unified: mapQwenFinishReason(choice.finish_reason),
578
+ raw: choice.finish_reason
579
+ };
580
+ }
581
+ if ((choice == null ? void 0 : choice.delta) == null) {
582
+ return;
583
+ }
584
+ const delta = choice.delta;
585
+ const reasoningContent = delta.reasoning_content;
586
+ if (reasoningContent) {
587
+ if (!isActiveReasoning) {
588
+ reasoningId = generateId();
589
+ controller.enqueue({
590
+ type: "reasoning-start",
591
+ id: reasoningId
592
+ });
593
+ isActiveReasoning = true;
594
+ }
595
+ controller.enqueue({
596
+ type: "reasoning-delta",
597
+ id: reasoningId,
598
+ delta: reasoningContent
599
+ });
600
+ }
601
+ if (delta.content) {
602
+ if (!isActiveText) {
603
+ textId = generateId();
604
+ controller.enqueue({ type: "text-start", id: textId });
605
+ isActiveText = true;
606
+ }
607
+ if (isActiveReasoning) {
608
+ controller.enqueue({
609
+ type: "reasoning-end",
610
+ id: reasoningId
611
+ });
612
+ isActiveReasoning = false;
613
+ }
614
+ controller.enqueue({
615
+ type: "text-delta",
616
+ id: textId,
617
+ delta: delta.content
618
+ });
619
+ }
620
+ if (delta.tool_calls != null) {
621
+ if (isActiveReasoning) {
622
+ controller.enqueue({
623
+ type: "reasoning-end",
624
+ id: reasoningId
625
+ });
626
+ isActiveReasoning = false;
627
+ }
628
+ for (const toolCallDelta of delta.tool_calls) {
629
+ const index = toolCallDelta.index;
630
+ if (toolCalls[index] == null) {
631
+ if (toolCallDelta.id == null) {
632
+ throw new InvalidResponseDataError({
633
+ data: toolCallDelta,
634
+ message: `Expected 'id' to be a string.`
635
+ });
636
+ }
637
+ if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
638
+ throw new InvalidResponseDataError({
639
+ data: toolCallDelta,
640
+ message: `Expected 'function.name' to be a string.`
641
+ });
642
+ }
643
+ controller.enqueue({
644
+ type: "tool-input-start",
645
+ id: toolCallDelta.id,
646
+ toolName: toolCallDelta.function.name
647
+ });
648
+ toolCalls[index] = {
649
+ id: toolCallDelta.id,
650
+ type: "function",
651
+ function: {
652
+ name: toolCallDelta.function.name,
653
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
654
+ },
655
+ hasFinished: false
656
+ };
657
+ const toolCall2 = toolCalls[index];
658
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
659
+ if (toolCall2.function.arguments.length > 0) {
660
+ controller.enqueue({
661
+ type: "tool-input-delta",
662
+ id: toolCall2.id,
663
+ delta: toolCall2.function.arguments
664
+ });
665
+ }
666
+ if (isParsableJson(toolCall2.function.arguments)) {
667
+ controller.enqueue({
668
+ type: "tool-input-end",
669
+ id: toolCall2.id
670
+ });
671
+ controller.enqueue({
672
+ type: "tool-call",
673
+ toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
674
+ toolName: toolCall2.function.name,
675
+ input: toolCall2.function.arguments
676
+ });
677
+ toolCall2.hasFinished = true;
678
+ }
679
+ }
680
+ continue;
681
+ }
682
+ const toolCall = toolCalls[index];
683
+ if (toolCall.hasFinished) {
684
+ continue;
685
+ }
686
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
687
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
688
+ }
689
+ controller.enqueue({
690
+ type: "tool-input-delta",
691
+ id: toolCall.id,
692
+ delta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
693
+ });
694
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
695
+ controller.enqueue({
696
+ type: "tool-input-end",
697
+ id: toolCall.id
698
+ });
699
+ controller.enqueue({
700
+ type: "tool-call",
701
+ toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
702
+ toolName: toolCall.function.name,
703
+ input: toolCall.function.arguments
704
+ });
705
+ toolCall.hasFinished = true;
706
+ }
707
+ }
708
+ }
709
+ },
710
+ flush(controller) {
711
+ var _a;
712
+ if (isActiveReasoning) {
713
+ controller.enqueue({ type: "reasoning-end", id: reasoningId });
714
+ }
715
+ if (isActiveText) {
716
+ controller.enqueue({ type: "text-end", id: textId });
717
+ }
718
+ for (const toolCall of toolCalls.filter((toolCall2) => !toolCall2.hasFinished)) {
719
+ controller.enqueue({
720
+ type: "tool-input-end",
721
+ id: toolCall.id
722
+ });
723
+ controller.enqueue({
724
+ type: "tool-call",
725
+ toolCallId: (_a = toolCall.id) != null ? _a : generateId(),
726
+ toolName: toolCall.function.name,
727
+ input: toolCall.function.arguments
728
+ });
729
+ }
730
+ controller.enqueue({
731
+ type: "finish",
732
+ finishReason,
733
+ usage: convertQwenChatUsage(usage),
734
+ providerMetadata: {
735
+ qwen: {}
736
+ }
737
+ });
738
+ }
739
+ })
740
+ ),
741
+ request: { body },
742
+ response: { headers: responseHeaders }
743
+ };
744
+ }
745
+ };
746
+
747
+ // src/completion/qwen-completion-language-model.ts
748
+ import {
749
+ combineHeaders as combineHeaders2,
750
+ createEventSourceResponseHandler as createEventSourceResponseHandler2,
751
+ createJsonResponseHandler as createJsonResponseHandler2,
752
+ postJsonToApi as postJsonToApi2
753
+ } from "@ai-sdk/provider-utils";
754
+
755
+ // src/completion/convert-qwen-completion-usage.ts
756
+ function convertQwenCompletionUsage(usage) {
757
+ var _a, _b, _c, _d;
758
+ if (usage == null) {
759
+ return {
760
+ inputTokens: {
761
+ total: void 0,
762
+ noCache: void 0,
763
+ cacheRead: void 0,
764
+ cacheWrite: void 0
765
+ },
766
+ outputTokens: {
767
+ total: void 0,
768
+ text: void 0,
769
+ reasoning: void 0
770
+ },
771
+ raw: void 0
772
+ };
773
+ }
774
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
775
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
776
+ return {
777
+ inputTokens: {
778
+ total: (_c = usage.prompt_tokens) != null ? _c : void 0,
779
+ noCache: promptTokens,
780
+ cacheRead: void 0,
781
+ cacheWrite: void 0
782
+ },
783
+ outputTokens: {
784
+ total: (_d = usage.completion_tokens) != null ? _d : void 0,
785
+ text: completionTokens,
786
+ reasoning: void 0
787
+ },
788
+ raw: usage
789
+ };
790
+ }
791
+
792
+ // src/completion/convert-to-qwen-completion-prompt.ts
793
+ import { InvalidPromptError, UnsupportedFunctionalityError as UnsupportedFunctionalityError3 } from "@ai-sdk/provider";
794
+ function convertToQwenCompletionPrompt({
795
+ prompt,
796
+ user = "user",
797
+ assistant = "assistant"
798
+ }) {
799
+ let text = "";
800
+ if (prompt[0].role === "system") {
801
+ text += `${prompt[0].content}
802
+
803
+ `;
804
+ prompt = prompt.slice(1);
805
+ }
806
+ for (const { role, content } of prompt) {
807
+ switch (role) {
808
+ case "system": {
809
+ throw new InvalidPromptError({
810
+ message: `Unexpected system message in prompt: ${content}`,
811
+ prompt
812
+ });
813
+ }
814
+ case "user": {
815
+ const userMessage = content.filter((part) => part.type === "text").map((part) => part.text).filter(Boolean).join("");
816
+ text += `${user}:
817
+ ${userMessage}
818
+
819
+ `;
820
+ break;
821
+ }
822
+ case "assistant": {
823
+ const assistantMessage = content.map((part) => {
824
+ switch (part.type) {
825
+ case "text": {
826
+ return part.text;
827
+ }
828
+ case "tool-call": {
829
+ throw new UnsupportedFunctionalityError3({
830
+ functionality: "tool-call messages"
831
+ });
832
+ }
833
+ default: {
834
+ return "";
835
+ }
836
+ }
837
+ }).join("");
838
+ text += `${assistant}:
839
+ ${assistantMessage}
840
+
841
+ `;
842
+ break;
843
+ }
844
+ case "tool": {
845
+ throw new UnsupportedFunctionalityError3({
846
+ functionality: "tool messages"
847
+ });
848
+ }
849
+ default: {
850
+ const _exhaustiveCheck = role;
851
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
852
+ }
853
+ }
854
+ }
855
+ text += `${assistant}:
856
+ `;
857
+ return {
858
+ prompt: text,
859
+ stopSequences: [`
860
+ ${user}:`]
861
+ };
862
+ }
863
+
864
+ // src/completion/qwen-completion-api.ts
865
+ import { lazySchema as lazySchema2, zodSchema as zodSchema2 } from "@ai-sdk/provider-utils";
866
+ import { z as z3 } from "zod";
867
+ var qwenCompletionResponseSchema = lazySchema2(
868
+ () => zodSchema2(
869
+ z3.object({
870
+ id: z3.string().nullish(),
871
+ created: z3.number().nullish(),
872
+ model: z3.string().nullish(),
873
+ choices: z3.array(
874
+ z3.object({
875
+ text: z3.string(),
876
+ finish_reason: z3.string().nullish()
877
+ })
878
+ ),
879
+ usage: z3.object({
880
+ prompt_tokens: z3.number(),
881
+ completion_tokens: z3.number(),
882
+ total_tokens: z3.number()
883
+ }).nullish()
884
+ })
885
+ )
886
+ );
887
+ var qwenCompletionChunkSchema = lazySchema2(
888
+ () => zodSchema2(
889
+ z3.union([
890
+ z3.object({
891
+ id: z3.string().nullish(),
892
+ created: z3.number().nullish(),
893
+ model: z3.string().nullish(),
894
+ choices: z3.array(
895
+ z3.object({
896
+ text: z3.string(),
897
+ finish_reason: z3.string().nullish(),
898
+ index: z3.number()
899
+ })
900
+ ),
901
+ usage: z3.object({
902
+ prompt_tokens: z3.number(),
903
+ completion_tokens: z3.number(),
904
+ total_tokens: z3.number()
905
+ }).nullish()
906
+ }),
907
+ qwenErrorDataSchema
908
+ ])
909
+ )
910
+ );
911
+
912
+ // src/completion/qwen-completion-language-model.ts
913
+ var QwenCompletionLanguageModel = class {
914
+ constructor(modelId, config) {
915
+ this.specificationVersion = "v3";
916
+ this.supportedUrls = {
917
+ // No URLs are supported for completion models.
918
+ };
919
+ this.modelId = modelId;
920
+ this.config = config;
921
+ }
922
+ get provider() {
923
+ return this.config.provider;
924
+ }
925
+ async getArgs({
926
+ prompt,
927
+ maxOutputTokens,
928
+ temperature,
929
+ topP,
930
+ topK,
931
+ presencePenalty,
932
+ stopSequences: userStopSequences,
933
+ responseFormat,
934
+ tools,
935
+ toolChoice,
936
+ seed
937
+ }) {
938
+ const warnings = [];
939
+ if (topK != null) {
940
+ warnings.push({ type: "unsupported", feature: "topK" });
941
+ }
942
+ if (tools == null ? void 0 : tools.length) {
943
+ warnings.push({ type: "unsupported", feature: "tools" });
944
+ }
945
+ if (toolChoice != null) {
946
+ warnings.push({ type: "unsupported", feature: "toolChoice" });
947
+ }
948
+ if (responseFormat != null && responseFormat.type !== "text") {
949
+ warnings.push({
950
+ type: "unsupported",
951
+ feature: "responseFormat",
952
+ details: "JSON response format is not supported."
953
+ });
954
+ }
955
+ const { prompt: completionPrompt, stopSequences } = convertToQwenCompletionPrompt({ prompt });
956
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
957
+ const args = {
958
+ // model id:
959
+ model: this.modelId,
960
+ // standardized settings:
961
+ max_tokens: maxOutputTokens,
962
+ temperature,
963
+ top_p: topP,
964
+ presence_penalty: presencePenalty,
965
+ seed,
966
+ // prompt:
967
+ prompt: completionPrompt,
968
+ // stop sequences:
969
+ stop: stop.length > 0 ? stop : void 0
970
+ };
971
+ return { args, warnings };
972
+ }
973
+ async doGenerate(options) {
974
+ var _a;
975
+ const { args, warnings } = await this.getArgs(options);
976
+ const {
977
+ responseHeaders,
978
+ value: response,
979
+ rawValue: rawResponse
980
+ } = await postJsonToApi2({
981
+ url: this.config.url({
982
+ path: "/completions",
983
+ modelId: this.modelId
984
+ }),
985
+ headers: combineHeaders2(this.config.headers(), options.headers),
986
+ body: args,
987
+ failedResponseHandler: qwenFailedResponseHandler,
988
+ successfulResponseHandler: createJsonResponseHandler2(qwenCompletionResponseSchema),
989
+ abortSignal: options.abortSignal,
990
+ fetch: this.config.fetch
991
+ });
992
+ const choice = response.choices[0];
993
+ const providerMetadata = { qwen: {} };
994
+ return {
995
+ content: [{ type: "text", text: choice.text }],
996
+ usage: convertQwenCompletionUsage(response.usage),
997
+ finishReason: {
998
+ unified: mapQwenFinishReason(choice.finish_reason),
999
+ raw: (_a = choice.finish_reason) != null ? _a : void 0
1000
+ },
1001
+ request: { body: args },
1002
+ response: {
1003
+ ...getResponseMetadata(response),
1004
+ headers: responseHeaders,
1005
+ body: rawResponse
1006
+ },
1007
+ providerMetadata,
1008
+ warnings
1009
+ };
1010
+ }
1011
+ async doStream(options) {
1012
+ const { args, warnings } = await this.getArgs(options);
1013
+ const body = {
1014
+ ...args,
1015
+ stream: true,
1016
+ stream_options: {
1017
+ include_usage: true
1018
+ }
1019
+ };
1020
+ const { responseHeaders, value: response } = await postJsonToApi2({
1021
+ url: this.config.url({
1022
+ path: "/completions",
1023
+ modelId: this.modelId
1024
+ }),
1025
+ headers: combineHeaders2(this.config.headers(), options.headers),
1026
+ body,
1027
+ failedResponseHandler: qwenFailedResponseHandler,
1028
+ successfulResponseHandler: createEventSourceResponseHandler2(qwenCompletionChunkSchema),
1029
+ abortSignal: options.abortSignal,
1030
+ fetch: this.config.fetch
1031
+ });
1032
+ let finishReason = {
1033
+ unified: "other",
1034
+ raw: void 0
1035
+ };
1036
+ const providerMetadata = { qwen: {} };
1037
+ let usage;
1038
+ let isFirstChunk = true;
1039
+ return {
1040
+ stream: response.pipeThrough(
1041
+ new TransformStream({
1042
+ start(controller) {
1043
+ controller.enqueue({ type: "stream-start", warnings });
1044
+ },
1045
+ transform(chunk, controller) {
1046
+ if (options.includeRawChunks) {
1047
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1048
+ }
1049
+ if (!chunk.success) {
1050
+ finishReason = { unified: "error", raw: void 0 };
1051
+ controller.enqueue({ type: "error", error: chunk.error });
1052
+ return;
1053
+ }
1054
+ const value = chunk.value;
1055
+ if ("object" in value) {
1056
+ finishReason = { unified: "error", raw: void 0 };
1057
+ controller.enqueue({ type: "error", error: value.message });
1058
+ return;
1059
+ }
1060
+ if (isFirstChunk) {
1061
+ isFirstChunk = false;
1062
+ controller.enqueue({
1063
+ type: "response-metadata",
1064
+ ...getResponseMetadata(value)
1065
+ });
1066
+ controller.enqueue({ type: "text-start", id: "0" });
1067
+ }
1068
+ if (value.usage != null) {
1069
+ usage = value.usage;
1070
+ }
1071
+ const choice = value.choices[0];
1072
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1073
+ finishReason = {
1074
+ unified: mapQwenFinishReason(choice.finish_reason),
1075
+ raw: choice.finish_reason
1076
+ };
1077
+ }
1078
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1079
+ controller.enqueue({
1080
+ type: "text-delta",
1081
+ id: "0",
1082
+ delta: choice.text
1083
+ });
1084
+ }
1085
+ },
1086
+ flush(controller) {
1087
+ if (!isFirstChunk) {
1088
+ controller.enqueue({ type: "text-end", id: "0" });
1089
+ }
1090
+ controller.enqueue({
1091
+ type: "finish",
1092
+ finishReason,
1093
+ providerMetadata,
1094
+ usage: convertQwenCompletionUsage(usage)
1095
+ });
1096
+ }
1097
+ })
1098
+ ),
1099
+ request: { body },
1100
+ response: { headers: responseHeaders }
1101
+ };
1102
+ }
1103
+ };
1104
+
1105
+ // src/embedding/qwen-embedding-model.ts
1106
+ import { TooManyEmbeddingValuesForCallError } from "@ai-sdk/provider";
1107
+ import {
1108
+ combineHeaders as combineHeaders3,
1109
+ createJsonResponseHandler as createJsonResponseHandler3,
1110
+ parseProviderOptions,
1111
+ postJsonToApi as postJsonToApi3
1112
+ } from "@ai-sdk/provider-utils";
1113
+
1114
+ // src/embedding/qwen-embedding-api.ts
1115
+ import { lazySchema as lazySchema3, zodSchema as zodSchema3 } from "@ai-sdk/provider-utils";
1116
+ import { z as z4 } from "zod";
1117
+ var qwenTextEmbeddingResponseSchema = lazySchema3(
1118
+ () => zodSchema3(
1119
+ z4.object({
1120
+ data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1121
+ usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1122
+ })
1123
+ )
1124
+ );
1125
+
1126
+ // src/embedding/qwen-embedding-options.ts
1127
+ import { lazySchema as lazySchema4, zodSchema as zodSchema4 } from "@ai-sdk/provider-utils";
1128
+ import z5 from "zod";
1129
+ var qwenEmbeddingProviderOptions = lazySchema4(
1130
+ () => zodSchema4(
1131
+ z5.object({
1132
+ /**
1133
+ The number of dimensions the resulting output embeddings should have.
1134
+ Only supported in text-embedding-3 and later models.
1135
+ */
1136
+ dimensions: z5.number().optional()
1137
+ })
1138
+ )
1139
+ );
1140
+
1141
+ // src/embedding/qwen-embedding-model.ts
1142
+ var QwenEmbeddingModel = class {
1143
+ constructor(modelId, config) {
1144
+ this.specificationVersion = "v3";
1145
+ this.maxEmbeddingsPerCall = 2048;
1146
+ this.supportsParallelCalls = true;
1147
+ this.modelId = modelId;
1148
+ this.config = config;
1149
+ }
1150
+ get provider() {
1151
+ return this.config.provider;
1152
+ }
1153
+ async doEmbed({
1154
+ values,
1155
+ headers,
1156
+ abortSignal,
1157
+ providerOptions
1158
+ }) {
1159
+ var _a;
1160
+ if (values.length > this.maxEmbeddingsPerCall) {
1161
+ throw new TooManyEmbeddingValuesForCallError({
1162
+ provider: this.provider,
1163
+ modelId: this.modelId,
1164
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1165
+ values
1166
+ });
1167
+ }
1168
+ const qwenOptions = (_a = await parseProviderOptions({
1169
+ provider: "qwen",
1170
+ providerOptions,
1171
+ schema: qwenEmbeddingProviderOptions
1172
+ })) != null ? _a : {};
1173
+ const {
1174
+ responseHeaders,
1175
+ value: response,
1176
+ rawValue
1177
+ } = await postJsonToApi3({
1178
+ url: this.config.url({
1179
+ path: "/embeddings",
1180
+ modelId: this.modelId
1181
+ }),
1182
+ headers: combineHeaders3(this.config.headers(), headers),
1183
+ body: {
1184
+ model: this.modelId,
1185
+ input: values,
1186
+ encoding_format: "float",
1187
+ dimensions: qwenOptions.dimensions
1188
+ },
1189
+ failedResponseHandler: qwenFailedResponseHandler,
1190
+ successfulResponseHandler: createJsonResponseHandler3(qwenTextEmbeddingResponseSchema),
1191
+ abortSignal,
1192
+ fetch: this.config.fetch
1193
+ });
1194
+ return {
1195
+ warnings: [],
1196
+ embeddings: response.data.map((item) => item.embedding),
1197
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1198
+ response: { headers: responseHeaders, body: rawValue }
1199
+ };
1200
+ }
1201
+ };
1202
+
1203
+ // src/qwen-provider.ts
1204
+ function createQwen(options) {
1205
+ var _a;
1206
+ const baseURL = (_a = withoutTrailingSlash(options == null ? void 0 : options.baseURL)) != null ? _a : "https://dashscope.aliyuncs.com/compatible-mode/v1";
1207
+ const getHeaders = () => {
1208
+ const apiKey = loadApiKey({
1209
+ apiKey: options == null ? void 0 : options.apiKey,
1210
+ environmentVariableName: "DASHSCOPE_API_KEY",
1211
+ description: "Qwen API key"
1212
+ });
1213
+ return withUserAgentSuffix(
1214
+ {
1215
+ Authorization: `Bearer ${apiKey}`,
1216
+ ...options == null ? void 0 : options.headers
1217
+ },
1218
+ `ai-sdk/qwen/${VERSION}`
1219
+ );
1220
+ };
1221
+ const createChatModel = (modelId) => {
1222
+ return new QwenChatLanguageModel(modelId, {
1223
+ provider: "qwen.chat",
1224
+ baseURL,
1225
+ headers: getHeaders,
1226
+ fetch: options == null ? void 0 : options.fetch,
1227
+ url: ({ path }) => `${baseURL}${path}`
1228
+ });
1229
+ };
1230
+ const createCompletionModel = (modelId) => {
1231
+ return new QwenCompletionLanguageModel(modelId, {
1232
+ provider: `qwen.completion`,
1233
+ url: ({ path }) => `${baseURL}${path}`,
1234
+ headers: getHeaders,
1235
+ fetch: options == null ? void 0 : options.fetch
1236
+ });
1237
+ };
1238
+ const createEmbeddingModel = (modelId) => {
1239
+ return new QwenEmbeddingModel(modelId, {
1240
+ provider: `qwen.embedding`,
1241
+ url: ({ path }) => `${baseURL}${path}`,
1242
+ headers: getHeaders,
1243
+ fetch: options == null ? void 0 : options.fetch
1244
+ });
1245
+ };
1246
+ const provider = (modelId) => {
1247
+ if (new.target) {
1248
+ throw new Error("The Qwen model function cannot be called with the new keyword.");
1249
+ }
1250
+ return createChatModel(modelId);
1251
+ };
1252
+ provider.specificationVersion = "v3";
1253
+ provider.languageModel = createChatModel;
1254
+ provider.chat = createChatModel;
1255
+ provider.completion = createCompletionModel;
1256
+ provider.embedding = createEmbeddingModel;
1257
+ provider.embeddingModel = createEmbeddingModel;
1258
+ provider.textEmbedding = createEmbeddingModel;
1259
+ provider.textEmbeddingModel = createEmbeddingModel;
1260
+ provider.imageModel = (modelId) => {
1261
+ throw new NoSuchModelError({ modelId, modelType: "imageModel" });
1262
+ };
1263
+ return provider;
1264
+ }
1265
+ var qwen = createQwen();
1266
+
1267
+ // src/version.ts
1268
+ var VERSION2 = true ? "0.0.1" : "0.0.0-test";
1269
+ export {
1270
+ VERSION2 as VERSION,
1271
+ createQwen,
1272
+ qwen
1273
+ };
1274
+ //# sourceMappingURL=index.mjs.map