@runpod/ai-sdk-provider 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,10 @@
1
+ # @runpod/ai-sdk-provider
2
+
3
+ ## 0.1.0
4
+
5
+ ### Minor Changes
6
+
7
+ - 4fa63d7: first release of the Runpod provider for the AI SDK
8
+ - generateText for two llms
9
+ - deep-cogito/deep-cogito-v2-llama-70b
10
+ - qwen/qwen3-32b-awq
package/LICENSE ADDED
@@ -0,0 +1,13 @@
1
+ Copyright 2025 RunPod, Inc.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
package/README.md ADDED
@@ -0,0 +1,154 @@
1
+ # RunPod AI SDK Provider
2
+
3
+ The **RunPod provider** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for [RunPod's](https://runpod.io) public endpoints.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install @runpod/ai-sdk-provider
9
+ ```
10
+
11
+ ## Setup
12
+
13
+ The RunPod provider requires a RunPod API key. You can obtain one from the [RunPod console](https://runpod.io).
14
+
15
+ ### Environment Variable
16
+
17
+ Set your API key as an environment variable:
18
+
19
+ ```bash
20
+ export RUNPOD_API_KEY="your-api-key-here"
21
+ ```
22
+
23
+ ### Provider Instance
24
+
25
+ Import the provider:
26
+
27
+ ```ts
28
+ import { runpod } from "@runpod/ai-sdk-provider";
29
+ ```
30
+
31
+ ## Supported Models
32
+
33
+ | Model ID | Description |
34
+ | -------------------------------------- | ------------------------------------------------------------------- |
35
+ | `deep-cogito/deep-cogito-v2-llama-70b` | 70B parameter general-purpose LLM with advanced reasoning |
36
+ | `qwen/qwen3-32b-awq` | 32B parameter multilingual model with strong reasoning capabilities |
37
+
38
+ ## Usage Examples
39
+
40
+ ### Basic Text Generation
41
+
42
+ ```ts
43
+ import { runpod } from "@runpod/ai-sdk-provider";
44
+ import { generateText } from "ai";
45
+
46
+ const { text } = await generateText({
47
+ model: runpod("deep-cogito/deep-cogito-v2-llama-70b"),
48
+ prompt: "Write a Python function that sorts a list:",
49
+ });
50
+
51
+ console.log(text);
52
+ ```
53
+
54
+ ### Streaming
55
+
56
+ **Note**: Streaming is not yet supported by RunPod's public endpoints. The team is working on implementing this feature.
57
+
58
+ ### Chat Conversations
59
+
60
+ ```ts
61
+ import { runpod } from "@runpod/ai-sdk-provider";
62
+ import { generateText } from "ai";
63
+
64
+ const { text } = await generateText({
65
+ model: runpod("deep-cogito/deep-cogito-v2-llama-70b"),
66
+ messages: [
67
+ { role: "system", content: "You are a helpful assistant." },
68
+ { role: "user", content: "What is the capital of France?" },
69
+ ],
70
+ });
71
+ ```
72
+
73
+ ### Function Calling
74
+
75
+ ```ts
76
+ import { runpod } from "@runpod/ai-sdk-provider";
77
+ import { generateText, tool } from "ai";
78
+ import { z } from "zod";
79
+
80
+ const { text, toolCalls } = await generateText({
81
+ model: runpod("deep-cogito/deep-cogito-v2-llama-70b"),
82
+ prompt: "What is the weather like in San Francisco?",
83
+ tools: {
84
+ getWeather: tool({
85
+ description: "Get weather information for a city",
86
+ parameters: z.object({
87
+ city: z.string().describe("The city name"),
88
+ }),
89
+ execute: async ({ city }) => {
90
+ // Your weather API call here
91
+ return `The weather in ${city} is sunny.`;
92
+ },
93
+ }),
94
+ },
95
+ });
96
+ ```
97
+
98
+ ### Structured Output
99
+
100
+ ```ts
101
+ import { runpod } from "@runpod/ai-sdk-provider";
102
+ import { generateObject } from "ai";
103
+ import { z } from "zod";
104
+
105
+ const { object } = await generateObject({
106
+ model: runpod("qwen/qwen3-32b-awq"),
107
+ schema: z.object({
108
+ recipe: z.object({
109
+ name: z.string(),
110
+ ingredients: z.array(z.string()),
111
+ steps: z.array(z.string()),
112
+ }),
113
+ }),
114
+ prompt: "Generate a recipe for chocolate chip cookies.",
115
+ });
116
+
117
+ console.log(object.recipe);
118
+ ```
119
+
120
+ ## Model Methods
121
+
122
+ The provider supports multiple ways to create models:
123
+
124
+ ```ts
125
+ // Default chat model
126
+ const model1 = runpod("deep-cogito/deep-cogito-v2-llama-70b");
127
+
128
+ // Explicit chat model
129
+ const model2 = runpod.chatModel("deep-cogito/deep-cogito-v2-llama-70b");
130
+
131
+ // Language model (alias for chat)
132
+ const model3 = runpod.languageModel("qwen/qwen3-32b-awq");
133
+
134
+ // Completion model
135
+ const model4 = runpod.completionModel("deep-cogito/deep-cogito-v2-llama-70b");
136
+ ```
137
+
138
+ ## API Compatibility
139
+
140
+ RunPod's endpoints are OpenAI API compatible, supporting:
141
+
142
+ - Chat completions (`/chat/completions`)
143
+ - Text completions (`/completions`)
144
+ - Function/tool calling
145
+ - Structured outputs
146
+
147
+ **Note**: Streaming responses are not yet supported but are being worked on.
148
+
149
+ ## Links
150
+
151
+ - [RunPod](https://runpod.io) - Cloud platform for AI compute
152
+ - [RunPod Public Endpoints Documentation](https://docs.runpod.io/hub/public-endpoints)
153
+ - [AI SDK Documentation](https://ai-sdk.dev/docs)
154
+ - [GitHub Repository](https://github.com/runpod/ai-sdk-provider)
@@ -0,0 +1,45 @@
1
+ import { LanguageModelV2 } from '@ai-sdk/provider';
2
+ import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ export { OpenAICompatibleErrorData as RunPodErrorData } from '@ai-sdk/openai-compatible';
4
+
5
+ type RunPodChatModelId = 'deep-cogito/deep-cogito-v2-llama-70b' | 'qwen/qwen3-32b-awq' | (string & {});
6
+
7
+ type RunPodCompletionModelId = 'deep-cogito/deep-cogito-v2-llama-70b' | 'qwen/qwen3-32b-awq' | (string & {});
8
+
9
+ interface RunPodProviderSettings {
10
+ /**
11
+ RunPod API key.
12
+ */
13
+ apiKey?: string;
14
+ /**
15
+ Custom headers to include in the requests.
16
+ */
17
+ headers?: Record<string, string>;
18
+ /**
19
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
20
+ or to provide a custom fetch implementation for e.g. testing.
21
+ */
22
+ fetch?: FetchFunction;
23
+ }
24
+ interface RunPodProvider {
25
+ /**
26
+ Creates a model for text generation.
27
+ */
28
+ (modelId: RunPodChatModelId): LanguageModelV2;
29
+ /**
30
+ Creates a chat model for text generation.
31
+ */
32
+ chatModel(modelId: RunPodChatModelId): LanguageModelV2;
33
+ /**
34
+ Creates a chat model for text generation.
35
+ */
36
+ languageModel(modelId: RunPodChatModelId): LanguageModelV2;
37
+ /**
38
+ Creates a completion model for text generation.
39
+ */
40
+ completionModel(modelId: RunPodCompletionModelId): LanguageModelV2;
41
+ }
42
+ declare function createRunPod(options?: RunPodProviderSettings): RunPodProvider;
43
+ declare const runpod: RunPodProvider;
44
+
45
+ export { type RunPodChatModelId, type RunPodCompletionModelId, type RunPodProvider, type RunPodProviderSettings, createRunPod, runpod };
@@ -0,0 +1,45 @@
1
+ import { LanguageModelV2 } from '@ai-sdk/provider';
2
+ import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ export { OpenAICompatibleErrorData as RunPodErrorData } from '@ai-sdk/openai-compatible';
4
+
5
+ type RunPodChatModelId = 'deep-cogito/deep-cogito-v2-llama-70b' | 'qwen/qwen3-32b-awq' | (string & {});
6
+
7
+ type RunPodCompletionModelId = 'deep-cogito/deep-cogito-v2-llama-70b' | 'qwen/qwen3-32b-awq' | (string & {});
8
+
9
+ interface RunPodProviderSettings {
10
+ /**
11
+ RunPod API key.
12
+ */
13
+ apiKey?: string;
14
+ /**
15
+ Custom headers to include in the requests.
16
+ */
17
+ headers?: Record<string, string>;
18
+ /**
19
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
20
+ or to provide a custom fetch implementation for e.g. testing.
21
+ */
22
+ fetch?: FetchFunction;
23
+ }
24
+ interface RunPodProvider {
25
+ /**
26
+ Creates a model for text generation.
27
+ */
28
+ (modelId: RunPodChatModelId): LanguageModelV2;
29
+ /**
30
+ Creates a chat model for text generation.
31
+ */
32
+ chatModel(modelId: RunPodChatModelId): LanguageModelV2;
33
+ /**
34
+ Creates a chat model for text generation.
35
+ */
36
+ languageModel(modelId: RunPodChatModelId): LanguageModelV2;
37
+ /**
38
+ Creates a completion model for text generation.
39
+ */
40
+ completionModel(modelId: RunPodCompletionModelId): LanguageModelV2;
41
+ }
42
+ declare function createRunPod(options?: RunPodProviderSettings): RunPodProvider;
43
+ declare const runpod: RunPodProvider;
44
+
45
+ export { type RunPodChatModelId, type RunPodCompletionModelId, type RunPodProvider, type RunPodProviderSettings, createRunPod, runpod };
package/dist/index.js ADDED
@@ -0,0 +1,90 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ createRunPod: () => createRunPod,
24
+ runpod: () => runpod
25
+ });
26
+ module.exports = __toCommonJS(index_exports);
27
+
28
+ // src/runpod-provider.ts
29
+ var import_openai_compatible = require("@ai-sdk/openai-compatible");
30
+ var import_provider_utils = require("@ai-sdk/provider-utils");
31
+ var MODEL_ID_TO_ENDPOINT_URL = {
32
+ "deep-cogito/deep-cogito-v2-llama-70b": "https://api.runpod.ai/v2/deep-cogito-v2-llama-70b/openai/v1",
33
+ "qwen/qwen3-32b-awq": "https://api.runpod.ai/v2/qwen3-32b-awq/openai/v1"
34
+ };
35
+ var MODEL_ID_TO_OPENAI_NAME = {
36
+ "deep-cogito/deep-cogito-v2-llama-70b": "deepcogito/cogito-v2-preview-llama-70B",
37
+ "qwen/qwen3-32b-awq": "Qwen/Qwen3-32B-AWQ"
38
+ };
39
+ function createRunPod(options = {}) {
40
+ const getHeaders = () => ({
41
+ Authorization: `Bearer ${(0, import_provider_utils.loadApiKey)({
42
+ apiKey: options.apiKey,
43
+ environmentVariableName: "RUNPOD_API_KEY",
44
+ description: "RunPod"
45
+ })}`,
46
+ ...options.headers
47
+ });
48
+ const getModelConfig = (modelId, modelType) => {
49
+ const baseURL = MODEL_ID_TO_ENDPOINT_URL[modelId];
50
+ if (!baseURL) {
51
+ throw new Error(
52
+ `Unsupported RunPod model: ${modelId}. Supported models: ${Object.keys(
53
+ MODEL_ID_TO_ENDPOINT_URL
54
+ ).join(", ")}`
55
+ );
56
+ }
57
+ return {
58
+ provider: `runpod.${modelType}`,
59
+ url: ({ path }) => `${(0, import_provider_utils.withoutTrailingSlash)(baseURL)}${path}`,
60
+ headers: getHeaders,
61
+ fetch: options.fetch
62
+ };
63
+ };
64
+ const createChatModel = (modelId) => {
65
+ const openaiModelName = MODEL_ID_TO_OPENAI_NAME[modelId] || modelId;
66
+ return new import_openai_compatible.OpenAICompatibleChatLanguageModel(
67
+ openaiModelName,
68
+ getModelConfig(modelId, "chat")
69
+ );
70
+ };
71
+ const createCompletionModel = (modelId) => {
72
+ const openaiModelName = MODEL_ID_TO_OPENAI_NAME[modelId] || modelId;
73
+ return new import_openai_compatible.OpenAICompatibleCompletionLanguageModel(
74
+ openaiModelName,
75
+ getModelConfig(modelId, "completion")
76
+ );
77
+ };
78
+ const provider = (modelId) => createChatModel(modelId);
79
+ provider.completionModel = createCompletionModel;
80
+ provider.languageModel = createChatModel;
81
+ provider.chatModel = createChatModel;
82
+ return provider;
83
+ }
84
+ var runpod = createRunPod();
85
+ // Annotate the CommonJS export names for ESM import in node:
86
+ 0 && (module.exports = {
87
+ createRunPod,
88
+ runpod
89
+ });
90
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts","../src/runpod-provider.ts"],"sourcesContent":["export { createRunPod, runpod } from './runpod-provider';\nexport type { RunPodProvider, RunPodProviderSettings } from './runpod-provider';\nexport type { RunPodChatModelId } from './runpod-chat-options';\nexport type { RunPodCompletionModelId } from './runpod-completion-options';\nexport type { OpenAICompatibleErrorData as RunPodErrorData } from '@ai-sdk/openai-compatible';\n","import { LanguageModelV2 } from '@ai-sdk/provider';\nimport {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleCompletionLanguageModel,\n} from '@ai-sdk/openai-compatible';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport { RunPodChatModelId } from './runpod-chat-options';\nimport { RunPodCompletionModelId } from './runpod-completion-options';\n\nexport interface RunPodProviderSettings {\n /**\nRunPod API key.\n*/\n apiKey?: string;\n /**\nCustom headers to include in the requests.\n*/\n headers?: Record<string, string>;\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n*/\n fetch?: FetchFunction;\n}\n\nexport interface RunPodProvider {\n /**\nCreates a model for text generation.\n*/\n (modelId: RunPodChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(modelId: RunPodChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n languageModel(modelId: RunPodChatModelId): LanguageModelV2;\n\n /**\nCreates a completion model for text generation.\n*/\n completionModel(modelId: RunPodCompletionModelId): LanguageModelV2;\n}\n\n// Mapping of RunPod model IDs to their endpoint URLs\nconst MODEL_ID_TO_ENDPOINT_URL: Record<string, string> = {\n 'deep-cogito/deep-cogito-v2-llama-70b':\n 'https://api.runpod.ai/v2/deep-cogito-v2-llama-70b/openai/v1',\n 'qwen/qwen3-32b-awq': 'https://api.runpod.ai/v2/qwen3-32b-awq/openai/v1',\n};\n\n// Mapping of RunPod model IDs to their OpenAI model names\nconst MODEL_ID_TO_OPENAI_NAME: Record<string, string> = {\n 'deep-cogito/deep-cogito-v2-llama-70b':\n 'deepcogito/cogito-v2-preview-llama-70B',\n 'qwen/qwen3-32b-awq': 'Qwen/Qwen3-32B-AWQ',\n};\n\nexport function createRunPod(\n options: RunPodProviderSettings = {}\n): RunPodProvider {\n const getHeaders = () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'RUNPOD_API_KEY',\n description: 'RunPod',\n })}`,\n ...options.headers,\n });\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getModelConfig = (\n modelId: string,\n modelType: string\n ): CommonModelConfig => {\n const baseURL = MODEL_ID_TO_ENDPOINT_URL[modelId];\n if (!baseURL) {\n throw new Error(\n `Unsupported RunPod model: ${modelId}. Supported models: ${Object.keys(\n MODEL_ID_TO_ENDPOINT_URL\n ).join(', ')}`\n );\n }\n\n return {\n provider: `runpod.${modelType}`,\n url: ({ path }) => `${withoutTrailingSlash(baseURL)}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n };\n };\n\n const createChatModel = (modelId: RunPodChatModelId) => {\n const openaiModelName = MODEL_ID_TO_OPENAI_NAME[modelId] || modelId;\n return new OpenAICompatibleChatLanguageModel(\n openaiModelName,\n getModelConfig(modelId, 'chat')\n );\n };\n\n const createCompletionModel = (modelId: RunPodCompletionModelId) => {\n const openaiModelName = MODEL_ID_TO_OPENAI_NAME[modelId] || modelId;\n return new OpenAICompatibleCompletionLanguageModel(\n openaiModelName,\n getModelConfig(modelId, 'completion')\n );\n };\n\n const provider = (modelId: RunPodChatModelId) => createChatModel(modelId);\n\n provider.completionModel = createCompletionModel;\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n\n return provider;\n}\n\nexport const runpod = createRunPod();\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACCA,+BAGO;AACP,4BAIO;AA2CP,IAAM,2BAAmD;AAAA,EACvD,wCACE;AAAA,EACF,sBAAsB;AACxB;AAGA,IAAM,0BAAkD;AAAA,EACtD,wCACE;AAAA,EACF,sBAAsB;AACxB;AAEO,SAAS,aACd,UAAkC,CAAC,GACnB;AAChB,QAAM,aAAa,OAAO;AAAA,IACxB,eAAe,cAAU,kCAAW;AAAA,MAClC,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC,CAAC;AAAA,IACF,GAAG,QAAQ;AAAA,EACb;AASA,QAAM,iBAAiB,CACrB,SACA,cACsB;AACtB,UAAM,UAAU,yBAAyB,OAAO;AAChD,QAAI,CAAC,SAAS;AACZ,YAAM,IAAI;AAAA,QACR,6BAA6B,OAAO,uBAAuB,OAAO;AAAA,UAChE;AAAA,QACF,EAAE,KAAK,IAAI,CAAC;AAAA,MACd;AAAA,IACF;AAEA,WAAO;AAAA,MACL,UAAU,UAAU,SAAS;AAAA,MAC7B,KAAK,CAAC,EAAE,KAAK,MAAM,OAAG,4CAAqB,OAAO,CAAC,GAAG,IAAI;AAAA,MAC1D,SAAS;AAAA,MACT,OAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAEA,QAAM,kBAAkB,CAAC,YAA+B;AACtD,UAAM,kBAAkB,wBAAwB,OAAO,KAAK;AAC5D,WAAO,IAAI;AAAA,MACT;AAAA,MACA,eAAe,SAAS,MAAM;AAAA,IAChC;AAAA,EACF;AAEA,QAAM,wBAAwB,CAAC,YAAqC;AAClE,UAAM,kBAAkB,wBAAwB,OAAO,KAAK;AAC5D,WAAO,IAAI;AAAA,MACT;AAAA,MACA,eAAe,SAAS,YAAY;AAAA,IACtC;AAAA,EACF;AAEA,QAAM,WAAW,CAAC,YAA+B,gBAAgB,OAAO;AAExE,WAAS,kBAAkB;AAC3B,WAAS,gBAAgB;AACzB,WAAS,YAAY;AAErB,SAAO;AACT;AAEO,IAAM,SAAS,aAAa;","names":[]}
package/dist/index.mjs ADDED
@@ -0,0 +1,68 @@
1
+ // src/runpod-provider.ts
2
+ import {
3
+ OpenAICompatibleChatLanguageModel,
4
+ OpenAICompatibleCompletionLanguageModel
5
+ } from "@ai-sdk/openai-compatible";
6
+ import {
7
+ loadApiKey,
8
+ withoutTrailingSlash
9
+ } from "@ai-sdk/provider-utils";
10
+ var MODEL_ID_TO_ENDPOINT_URL = {
11
+ "deep-cogito/deep-cogito-v2-llama-70b": "https://api.runpod.ai/v2/deep-cogito-v2-llama-70b/openai/v1",
12
+ "qwen/qwen3-32b-awq": "https://api.runpod.ai/v2/qwen3-32b-awq/openai/v1"
13
+ };
14
+ var MODEL_ID_TO_OPENAI_NAME = {
15
+ "deep-cogito/deep-cogito-v2-llama-70b": "deepcogito/cogito-v2-preview-llama-70B",
16
+ "qwen/qwen3-32b-awq": "Qwen/Qwen3-32B-AWQ"
17
+ };
18
+ function createRunPod(options = {}) {
19
+ const getHeaders = () => ({
20
+ Authorization: `Bearer ${loadApiKey({
21
+ apiKey: options.apiKey,
22
+ environmentVariableName: "RUNPOD_API_KEY",
23
+ description: "RunPod"
24
+ })}`,
25
+ ...options.headers
26
+ });
27
+ const getModelConfig = (modelId, modelType) => {
28
+ const baseURL = MODEL_ID_TO_ENDPOINT_URL[modelId];
29
+ if (!baseURL) {
30
+ throw new Error(
31
+ `Unsupported RunPod model: ${modelId}. Supported models: ${Object.keys(
32
+ MODEL_ID_TO_ENDPOINT_URL
33
+ ).join(", ")}`
34
+ );
35
+ }
36
+ return {
37
+ provider: `runpod.${modelType}`,
38
+ url: ({ path }) => `${withoutTrailingSlash(baseURL)}${path}`,
39
+ headers: getHeaders,
40
+ fetch: options.fetch
41
+ };
42
+ };
43
+ const createChatModel = (modelId) => {
44
+ const openaiModelName = MODEL_ID_TO_OPENAI_NAME[modelId] || modelId;
45
+ return new OpenAICompatibleChatLanguageModel(
46
+ openaiModelName,
47
+ getModelConfig(modelId, "chat")
48
+ );
49
+ };
50
+ const createCompletionModel = (modelId) => {
51
+ const openaiModelName = MODEL_ID_TO_OPENAI_NAME[modelId] || modelId;
52
+ return new OpenAICompatibleCompletionLanguageModel(
53
+ openaiModelName,
54
+ getModelConfig(modelId, "completion")
55
+ );
56
+ };
57
+ const provider = (modelId) => createChatModel(modelId);
58
+ provider.completionModel = createCompletionModel;
59
+ provider.languageModel = createChatModel;
60
+ provider.chatModel = createChatModel;
61
+ return provider;
62
+ }
63
+ var runpod = createRunPod();
64
+ export {
65
+ createRunPod,
66
+ runpod
67
+ };
68
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/runpod-provider.ts"],"sourcesContent":["import { LanguageModelV2 } from '@ai-sdk/provider';\nimport {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleCompletionLanguageModel,\n} from '@ai-sdk/openai-compatible';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport { RunPodChatModelId } from './runpod-chat-options';\nimport { RunPodCompletionModelId } from './runpod-completion-options';\n\nexport interface RunPodProviderSettings {\n /**\nRunPod API key.\n*/\n apiKey?: string;\n /**\nCustom headers to include in the requests.\n*/\n headers?: Record<string, string>;\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n*/\n fetch?: FetchFunction;\n}\n\nexport interface RunPodProvider {\n /**\nCreates a model for text generation.\n*/\n (modelId: RunPodChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(modelId: RunPodChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n languageModel(modelId: RunPodChatModelId): LanguageModelV2;\n\n /**\nCreates a completion model for text generation.\n*/\n completionModel(modelId: RunPodCompletionModelId): LanguageModelV2;\n}\n\n// Mapping of RunPod model IDs to their endpoint URLs\nconst MODEL_ID_TO_ENDPOINT_URL: Record<string, string> = {\n 'deep-cogito/deep-cogito-v2-llama-70b':\n 'https://api.runpod.ai/v2/deep-cogito-v2-llama-70b/openai/v1',\n 'qwen/qwen3-32b-awq': 'https://api.runpod.ai/v2/qwen3-32b-awq/openai/v1',\n};\n\n// Mapping of RunPod model IDs to their OpenAI model names\nconst MODEL_ID_TO_OPENAI_NAME: Record<string, string> = {\n 'deep-cogito/deep-cogito-v2-llama-70b':\n 'deepcogito/cogito-v2-preview-llama-70B',\n 'qwen/qwen3-32b-awq': 'Qwen/Qwen3-32B-AWQ',\n};\n\nexport function createRunPod(\n options: RunPodProviderSettings = {}\n): RunPodProvider {\n const getHeaders = () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'RUNPOD_API_KEY',\n description: 'RunPod',\n })}`,\n ...options.headers,\n });\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getModelConfig = (\n modelId: string,\n modelType: string\n ): CommonModelConfig => {\n const baseURL = MODEL_ID_TO_ENDPOINT_URL[modelId];\n if (!baseURL) {\n throw new Error(\n `Unsupported RunPod model: ${modelId}. Supported models: ${Object.keys(\n MODEL_ID_TO_ENDPOINT_URL\n ).join(', ')}`\n );\n }\n\n return {\n provider: `runpod.${modelType}`,\n url: ({ path }) => `${withoutTrailingSlash(baseURL)}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n };\n };\n\n const createChatModel = (modelId: RunPodChatModelId) => {\n const openaiModelName = MODEL_ID_TO_OPENAI_NAME[modelId] || modelId;\n return new OpenAICompatibleChatLanguageModel(\n openaiModelName,\n getModelConfig(modelId, 'chat')\n );\n };\n\n const createCompletionModel = (modelId: RunPodCompletionModelId) => {\n const openaiModelName = MODEL_ID_TO_OPENAI_NAME[modelId] || modelId;\n return new OpenAICompatibleCompletionLanguageModel(\n openaiModelName,\n getModelConfig(modelId, 'completion')\n );\n };\n\n const provider = (modelId: RunPodChatModelId) => createChatModel(modelId);\n\n provider.completionModel = createCompletionModel;\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n\n return provider;\n}\n\nexport const runpod = createRunPod();\n"],"mappings":";AACA;AAAA,EACE;AAAA,EACA;AAAA,OACK;AACP;AAAA,EAEE;AAAA,EACA;AAAA,OACK;AA2CP,IAAM,2BAAmD;AAAA,EACvD,wCACE;AAAA,EACF,sBAAsB;AACxB;AAGA,IAAM,0BAAkD;AAAA,EACtD,wCACE;AAAA,EACF,sBAAsB;AACxB;AAEO,SAAS,aACd,UAAkC,CAAC,GACnB;AAChB,QAAM,aAAa,OAAO;AAAA,IACxB,eAAe,UAAU,WAAW;AAAA,MAClC,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC,CAAC;AAAA,IACF,GAAG,QAAQ;AAAA,EACb;AASA,QAAM,iBAAiB,CACrB,SACA,cACsB;AACtB,UAAM,UAAU,yBAAyB,OAAO;AAChD,QAAI,CAAC,SAAS;AACZ,YAAM,IAAI;AAAA,QACR,6BAA6B,OAAO,uBAAuB,OAAO;AAAA,UAChE;AAAA,QACF,EAAE,KAAK,IAAI,CAAC;AAAA,MACd;AAAA,IACF;AAEA,WAAO;AAAA,MACL,UAAU,UAAU,SAAS;AAAA,MAC7B,KAAK,CAAC,EAAE,KAAK,MAAM,GAAG,qBAAqB,OAAO,CAAC,GAAG,IAAI;AAAA,MAC1D,SAAS;AAAA,MACT,OAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAEA,QAAM,kBAAkB,CAAC,YAA+B;AACtD,UAAM,kBAAkB,wBAAwB,OAAO,KAAK;AAC5D,WAAO,IAAI;AAAA,MACT;AAAA,MACA,eAAe,SAAS,MAAM;AAAA,IAChC;AAAA,EACF;AAEA,QAAM,wBAAwB,CAAC,YAAqC;AAClE,UAAM,kBAAkB,wBAAwB,OAAO,KAAK;AAC5D,WAAO,IAAI;AAAA,MACT;AAAA,MACA,eAAe,SAAS,YAAY;AAAA,IACtC;AAAA,EACF;AAEA,QAAM,WAAW,CAAC,YAA+B,gBAAgB,OAAO;AAExE,WAAS,kBAAkB;AAC3B,WAAS,gBAAgB;AACzB,WAAS,YAAY;AAErB,SAAO;AACT;AAEO,IAAM,SAAS,aAAa;","names":[]}
package/package.json ADDED
@@ -0,0 +1,79 @@
1
+ {
2
+ "name": "@runpod/ai-sdk-provider",
3
+ "version": "0.1.0",
4
+ "license": "Apache-2.0",
5
+ "sideEffects": false,
6
+ "main": "./dist/index.js",
7
+ "module": "./dist/index.mjs",
8
+ "types": "./dist/index.d.ts",
9
+ "files": [
10
+ "dist/**/*",
11
+ "CHANGELOG.md"
12
+ ],
13
+ "exports": {
14
+ "./package.json": "./package.json",
15
+ ".": {
16
+ "types": "./dist/index.d.ts",
17
+ "import": "./dist/index.mjs",
18
+ "require": "./dist/index.js"
19
+ }
20
+ },
21
+ "dependencies": {
22
+ "@ai-sdk/openai-compatible": "^1.0.7",
23
+ "@ai-sdk/provider": "^2.0.0",
24
+ "@ai-sdk/provider-utils": "^3.0.0"
25
+ },
26
+ "devDependencies": {
27
+ "@changesets/cli": "^2.29.6",
28
+ "@edge-runtime/vm": "^5.0.0",
29
+ "@types/node": "^20.0.0",
30
+ "@typescript-eslint/eslint-plugin": "^8.39.1",
31
+ "@typescript-eslint/parser": "^8.39.1",
32
+ "eslint": "^9.33.0",
33
+ "prettier": "^3.6.2",
34
+ "tsup": "^8.0.0",
35
+ "typescript": "^5.0.0",
36
+ "vitest": "^2.0.0",
37
+ "zod": "^3.25.0"
38
+ },
39
+ "peerDependencies": {
40
+ "zod": "^3.25.76 || ^4"
41
+ },
42
+ "engines": {
43
+ "node": ">=18"
44
+ },
45
+ "publishConfig": {
46
+ "access": "public"
47
+ },
48
+ "homepage": "https://runpod.io",
49
+ "repository": {
50
+ "type": "git",
51
+ "url": "git+https://github.com/runpod/ai-sdk-provider.git"
52
+ },
53
+ "bugs": {
54
+ "url": "https://github.com/runpod/ai-sdk-provider/issues"
55
+ },
56
+ "keywords": [
57
+ "ai",
58
+ "runpod",
59
+ "ai-sdk",
60
+ "llm",
61
+ "language-model"
62
+ ],
63
+ "scripts": {
64
+ "build": "pnpm clean && tsup --tsconfig tsconfig.build.json",
65
+ "build:watch": "pnpm clean && tsup --watch",
66
+ "clean": "rm -rf dist *.tsbuildinfo",
67
+ "lint": "eslint \"./**/*.ts*\"",
68
+ "type-check": "tsc --build",
69
+ "prettier-check": "prettier --check \"./**/*.ts*\"",
70
+ "test": "pnpm test:node && pnpm test:edge",
71
+ "test:update": "pnpm test:node -u",
72
+ "test:watch": "vitest --config vitest.node.config.js",
73
+ "test:edge": "vitest --config vitest.edge.config.js --run",
74
+ "test:node": "vitest --config vitest.node.config.js --run",
75
+ "changeset": "changeset",
76
+ "changeset:version": "changeset version",
77
+ "changeset:publish": "changeset publish"
78
+ }
79
+ }