opencode-litellm-plugin 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json ADDED
@@ -0,0 +1,44 @@
1
+ {
2
+ "name": "opencode-litellm-plugin",
3
+ "version": "1.0.0",
4
+ "description": "OpenCode plugin for LiteLLM — auto-discovers models, manages multiple servers, and supports reasoning variants",
5
+ "main": "dist/index.js",
6
+ "types": "dist/index.d.ts",
7
+ "author": "Dan Jeffries",
8
+ "license": "MIT",
9
+ "repository": {
10
+ "type": "git",
11
+ "url": "git+https://github.com/dan-jeff/opencode-litellm-provider.git"
12
+ },
13
+ "bugs": {
14
+ "url": "https://github.com/dan-jeff/opencode-litellm-provider/issues"
15
+ },
16
+ "homepage": "https://github.com/dan-jeff/opencode-litellm-provider#readme",
17
+ "keywords": [
18
+ "opencode",
19
+ "litellm",
20
+ "plugin",
21
+ "llm",
22
+ "ai",
23
+ "provider"
24
+ ],
25
+ "files": [
26
+ "dist/index.js",
27
+ "src/",
28
+ "LICENSE"
29
+ ],
30
+ "scripts": {
31
+ "build": "rm -rf dist && npx --yes bun build ./src/index.ts --outfile=./dist/index.js --target=bun",
32
+ "deploy": "node -e \"const fs = require('fs'); const os = require('os'); const path = require('path'); const targetDir = path.join(os.homedir(), '.config', 'opencode', 'plugins'); fs.mkdirSync(targetDir, { recursive: true }); fs.copyFileSync(path.join('dist', 'index.js'), path.join(targetDir, 'litellm.js'));\"",
33
+ "test": "bun test"
34
+ },
35
+ "dependencies": {
36
+ "@opencode-ai/plugin": "latest",
37
+ "zod": "latest"
38
+ },
39
+ "devDependencies": {
40
+ "typescript": "latest",
41
+ "@types/node": "latest",
42
+ "bun-types": "latest"
43
+ }
44
+ }
package/src/client.ts ADDED
@@ -0,0 +1,99 @@
1
+ export interface LiteLLMModel {
2
+ id: string;
3
+ name: string;
4
+ contextWindow: number;
5
+ maxOutputTokens: number;
6
+ supportsVision: boolean;
7
+ supportsToolCalls: boolean;
8
+ supportsReasoning: boolean;
9
+ litellmProvider: string | null;
10
+ supportedParams: string[];
11
+ }
12
+
13
+ export function normalizeUrl(url: string): string {
14
+ let normalized = url.trim();
15
+ if (normalized.endsWith('/')) {
16
+ normalized = normalized.slice(0, -1);
17
+ }
18
+ if (normalized.endsWith('/v1')) {
19
+ normalized = normalized.slice(0, -3);
20
+ }
21
+ return normalized;
22
+ }
23
+
24
+ /**
25
+ * Fetches available models from the LiteLLM server via /v1/models,
26
+ * then enriches with capability data from /model/info when available.
27
+ */
28
+ export async function fetchModels(url: string, key: string): Promise<LiteLLMModel[]> {
29
+ const baseUrl = normalizeUrl(url);
30
+ const headers = {
31
+ 'Authorization': `Bearer ${key}`,
32
+ 'Content-Type': 'application/json',
33
+ };
34
+
35
+ // Fetch the model list
36
+ const response = await fetch(`${baseUrl}/v1/models`, {
37
+ method: 'GET',
38
+ headers,
39
+ });
40
+
41
+ if (!response.ok) {
42
+ let errorMessage = `Failed to fetch models: ${response.status} ${response.statusText}`;
43
+ try {
44
+ const errorData = await response.json();
45
+ if (errorData?.error) {
46
+ errorMessage += `. ${errorData.error.message || JSON.stringify(errorData.error)}`;
47
+ }
48
+ } catch (_) {}
49
+ throw new Error(errorMessage);
50
+ }
51
+
52
+ const data = await response.json();
53
+ if (!data || !Array.isArray(data.data)) {
54
+ return [];
55
+ }
56
+
57
+ // Try to fetch detailed model info for capabilities
58
+ let modelInfo: Record<string, any> = {};
59
+ try {
60
+ const infoResponse = await fetch(`${baseUrl}/model/info`, {
61
+ method: 'GET',
62
+ headers,
63
+ });
64
+ if (infoResponse.ok) {
65
+ const infoData = await infoResponse.json();
66
+ if (infoData?.data && Array.isArray(infoData.data)) {
67
+ for (const entry of infoData.data) {
68
+ const info = entry.model_info || {};
69
+ const id = entry.model_name || info.id;
70
+ if (id) modelInfo[id] = info;
71
+ }
72
+ }
73
+ }
74
+ } catch (_) {
75
+ // /model/info is optional — fall back to conservative defaults
76
+ }
77
+
78
+ return data.data.map((model: any) => {
79
+ const info = modelInfo[model.id] || {};
80
+ const params: string[] = info.supported_openai_params || [];
81
+
82
+ // supports_reasoning is often null even for reasoning models;
83
+ // check if reasoning_effort is an accepted param as a stronger signal
84
+ const reasoning = info.supports_reasoning === true
85
+ || params.includes('reasoning_effort');
86
+
87
+ return {
88
+ id: model.id,
89
+ name: model.name || model.id,
90
+ contextWindow: info.max_input_tokens || info.max_tokens || model.context_window || 128000,
91
+ maxOutputTokens: info.max_output_tokens || 4096,
92
+ supportsVision: info.supports_vision ?? false,
93
+ supportsToolCalls: info.supports_function_calling ?? true,
94
+ supportsReasoning: reasoning,
95
+ litellmProvider: info.litellm_provider || null,
96
+ supportedParams: params,
97
+ };
98
+ });
99
+ }
package/src/config.ts ADDED
@@ -0,0 +1,80 @@
1
+ import * as fs from 'fs';
2
+ import * as os from 'os';
3
+ import * as path from 'path';
4
+
5
+ export interface LiteLLMConfig {
6
+ alias: string;
7
+ url: string;
8
+ key: string;
9
+ }
10
+
11
+ const CONFIG_DIR = path.join(os.homedir(), '.config', 'opencode');
12
+ const CONFIG_FILE = path.join(CONFIG_DIR, 'auth.json');
13
+
14
+ function ensureConfigExists(): void {
15
+ if (!fs.existsSync(CONFIG_DIR)) {
16
+ fs.mkdirSync(CONFIG_DIR, { recursive: true });
17
+ }
18
+ if (!fs.existsSync(CONFIG_FILE)) {
19
+ fs.writeFileSync(CONFIG_FILE, JSON.stringify({ litellm: [] }, null, 2));
20
+ } else {
21
+ try {
22
+ const data = fs.readFileSync(CONFIG_FILE, 'utf-8');
23
+ const json = JSON.parse(data);
24
+ if (!json.litellm) {
25
+ json.litellm = [];
26
+ fs.writeFileSync(CONFIG_FILE, JSON.stringify(json, null, 2));
27
+ }
28
+ } catch (error) {
29
+ fs.writeFileSync(CONFIG_FILE, JSON.stringify({ litellm: [] }, null, 2));
30
+ }
31
+ }
32
+ }
33
+
34
+ export function getConfigs(): LiteLLMConfig[] {
35
+ ensureConfigExists();
36
+ try {
37
+ const data = fs.readFileSync(CONFIG_FILE, 'utf-8');
38
+ const json = JSON.parse(data);
39
+ return json.litellm || [];
40
+ } catch (error) {
41
+ return [];
42
+ }
43
+ }
44
+
45
+ export function addConfig(config: LiteLLMConfig): void {
46
+ ensureConfigExists();
47
+ try {
48
+ const data = fs.readFileSync(CONFIG_FILE, 'utf-8');
49
+ const json = JSON.parse(data);
50
+ const configs: LiteLLMConfig[] = json.litellm || [];
51
+
52
+ const index = configs.findIndex((c) => c.alias === config.alias);
53
+ if (index !== -1) {
54
+ configs[index] = config;
55
+ } else {
56
+ configs.push(config);
57
+ }
58
+
59
+ json.litellm = configs;
60
+ fs.writeFileSync(CONFIG_FILE, JSON.stringify(json, null, 2), 'utf-8');
61
+ } catch (error) {
62
+ console.error('Error adding configuration:', error);
63
+ }
64
+ }
65
+
66
+ export function removeConfig(alias: string): void {
67
+ ensureConfigExists();
68
+ try {
69
+ const data = fs.readFileSync(CONFIG_FILE, 'utf-8');
70
+ const json = JSON.parse(data);
71
+ const configs: LiteLLMConfig[] = json.litellm || [];
72
+
73
+ const filteredConfigs = configs.filter((c) => c.alias !== alias);
74
+
75
+ json.litellm = filteredConfigs;
76
+ fs.writeFileSync(CONFIG_FILE, JSON.stringify(json, null, 2), 'utf-8');
77
+ } catch (error) {
78
+ console.error('Error removing configuration:', error);
79
+ }
80
+ }
package/src/index.ts ADDED
@@ -0,0 +1,231 @@
1
+ import { Plugin, PluginInput } from '@opencode-ai/plugin';
2
+ import { getConfigs, addConfig, removeConfig, LiteLLMConfig } from './config';
3
+ import { fetchModels, normalizeUrl, LiteLLMModel } from './client';
4
+ import { createConnectTool } from './tools';
5
+
6
+ // Provider-specific reasoning effort levels
7
+ const REASONING_VARIANTS: Record<string, Record<string, any>> = {
8
+ openai: {
9
+ off: { reasoningEffort: "off" },
10
+ low: { reasoningEffort: "low" },
11
+ medium: { reasoningEffort: "medium" },
12
+ high: { reasoningEffort: "high" },
13
+ xhigh: { reasoningEffort: "xhigh" },
14
+ },
15
+ chatgpt: {
16
+ off: { reasoningEffort: "off" },
17
+ low: { reasoningEffort: "low" },
18
+ medium: { reasoningEffort: "medium" },
19
+ high: { reasoningEffort: "high" },
20
+ xhigh: { reasoningEffort: "xhigh" },
21
+ },
22
+ gemini: {
23
+ off: { reasoningEffort: "off" },
24
+ low: { reasoningEffort: "low" },
25
+ medium: { reasoningEffort: "medium" },
26
+ high: { reasoningEffort: "high" },
27
+ },
28
+ anthropic: {
29
+ off: { reasoningEffort: "off" },
30
+ low: { reasoningEffort: "low" },
31
+ medium: { reasoningEffort: "medium" },
32
+ high: { reasoningEffort: "high" },
33
+ },
34
+ deepseek: {
35
+ off: { reasoningEffort: "off" },
36
+ low: { reasoningEffort: "low" },
37
+ medium: { reasoningEffort: "medium" },
38
+ high: { reasoningEffort: "high" },
39
+ },
40
+ };
41
+
42
+ const DEFAULT_REASONING_VARIANTS: Record<string, any> = {
43
+ low: { reasoningEffort: "low" },
44
+ medium: { reasoningEffort: "medium" },
45
+ high: { reasoningEffort: "high" },
46
+ };
47
+
48
+ function getReasoningVariants(m: LiteLLMModel): Record<string, any> {
49
+ if (m.litellmProvider && REASONING_VARIANTS[m.litellmProvider]) {
50
+ return REASONING_VARIANTS[m.litellmProvider];
51
+ }
52
+ return DEFAULT_REASONING_VARIANTS;
53
+ }
54
+
55
+ /**
56
+ * OpenCode LiteLLM provider plugin entry point.
57
+ */
58
+ export const litellmPlugin: Plugin = async (_ctx: PluginInput) => {
59
+ console.log('[litellm] Plugin initializing...');
60
+
61
+ // Cache for fetched models to avoid redundant API calls
62
+ const modelCache = new Map<string, LiteLLMModel[]>();
63
+
64
+ return {
65
+ /**
66
+ * Config hook — dynamically registers providers and models from all
67
+ * configured LiteLLM servers.
68
+ */
69
+ config: async (config: any) => {
70
+ if (!config.provider) config.provider = {};
71
+
72
+ const serverConfigs = getConfigs();
73
+
74
+ for (const sc of serverConfigs) {
75
+ const providerID = `litellm-${sc.alias}`;
76
+ const baseUrl = normalizeUrl(sc.url);
77
+
78
+ // Fetch models if not cached
79
+ let models: LiteLLMModel[] = modelCache.get(sc.alias) || [];
80
+ if (models.length === 0) {
81
+ try {
82
+ console.log(`[litellm] Fetching models for ${sc.alias}...`);
83
+ models = await fetchModels(sc.url, sc.key);
84
+ modelCache.set(sc.alias, models);
85
+ } catch (error) {
86
+ console.error(`[litellm] Model fetch failed for ${sc.alias}:`, error);
87
+ }
88
+ }
89
+
90
+ config.provider[providerID] = {
91
+ id: providerID,
92
+ name: `LiteLLM (${sc.alias})`,
93
+ npm: "@ai-sdk/openai",
94
+ api: "openai",
95
+ options: {
96
+ baseURL: `${baseUrl}/v1`,
97
+ apiKey: sc.key,
98
+ },
99
+ models: {},
100
+ };
101
+
102
+ if (models.length > 0) {
103
+ for (const m of models) {
104
+ const modelConfig: any = {
105
+ id: m.id,
106
+ name: m.name || m.id,
107
+ limit: {
108
+ context: m.contextWindow,
109
+ output: m.maxOutputTokens,
110
+ },
111
+ capabilities: {
112
+ temperature: true,
113
+ reasoning: m.supportsReasoning,
114
+ attachment: m.supportsVision,
115
+ toolcall: m.supportsToolCalls,
116
+ input: {
117
+ text: true,
118
+ image: m.supportsVision,
119
+ pdf: m.supportsVision,
120
+ },
121
+ output: { text: true },
122
+ },
123
+ };
124
+
125
+ if (m.supportsReasoning) {
126
+ modelConfig.variants = getReasoningVariants(m);
127
+ }
128
+
129
+ config.provider[providerID].models[m.id] = modelConfig;
130
+ }
131
+ } else {
132
+ config.provider[providerID].models["placeholder"] = {
133
+ id: "placeholder",
134
+ name: "No models found (check connection)",
135
+ limit: { context: 4096, output: 4096 },
136
+ };
137
+ }
138
+ }
139
+
140
+ console.log(`[litellm] Config hook completed. Registered ${serverConfigs.length} providers.`);
141
+ },
142
+
143
+ /**
144
+ * Auth hook — handles initial provider setup and credential verification.
145
+ * Management (list, remove) lives in the litellm:connect tool.
146
+ */
147
+ auth: {
148
+ provider: "litellm",
149
+ methods: [
150
+ {
151
+ type: "api" as const,
152
+ label: "Connect to LiteLLM",
153
+ prompts: [
154
+ {
155
+ type: "text" as const,
156
+ key: "alias",
157
+ message: "Alias for this server (e.g. 'work', 'staging')",
158
+ placeholder: "my-server",
159
+ validate(value: string) {
160
+ if (!value || value.trim().length === 0) return "Alias is required";
161
+ if (!/^[a-zA-Z0-9_-]+$/.test(value.trim()))
162
+ return "Alias must be alphanumeric (hyphens and underscores allowed)";
163
+ return undefined;
164
+ },
165
+ },
166
+ {
167
+ type: "text" as const,
168
+ key: "url",
169
+ message: "LiteLLM base URL",
170
+ placeholder: "https://litellm.example.com",
171
+ validate(value: string) {
172
+ if (!value || value.trim().length === 0) return "URL is required";
173
+ try {
174
+ new URL(value.trim());
175
+ } catch {
176
+ return "Must be a valid URL (e.g. https://litellm.example.com)";
177
+ }
178
+ return undefined;
179
+ },
180
+ },
181
+ {
182
+ type: "text" as const,
183
+ key: "apiKey",
184
+ message: "API key",
185
+ placeholder: "sk-...",
186
+ validate(value: string) {
187
+ if (!value || value.trim().length === 0) return "API key is required";
188
+ return undefined;
189
+ },
190
+ },
191
+ ],
192
+ async authorize(inputs?: Record<string, string>) {
193
+ const alias = inputs?.alias?.trim();
194
+ const url = inputs?.url?.trim();
195
+ const apiKey = inputs?.apiKey?.trim();
196
+
197
+ if (!alias || !url || !apiKey) {
198
+ return { type: "failed" as const };
199
+ }
200
+
201
+ try {
202
+ console.log(`[litellm] Verifying connection for ${alias} at ${url}...`);
203
+ const models = await fetchModels(url, apiKey);
204
+ console.log(`[litellm] Verified ${alias}: ${models.length} models available.`);
205
+
206
+ // Persist to our config and cache
207
+ modelCache.set(alias, models);
208
+ addConfig({ alias, url, key: apiKey });
209
+
210
+ return {
211
+ type: "success" as const,
212
+ key: apiKey,
213
+ provider: `litellm-${alias}`,
214
+ };
215
+ } catch (error: any) {
216
+ console.error(`[litellm] Verification failed for ${alias}:`, error.message);
217
+ return { type: "failed" as const };
218
+ }
219
+ },
220
+ },
221
+ ],
222
+ },
223
+
224
+ /**
225
+ * Register management tools.
226
+ */
227
+ tool: {
228
+ 'litellm_connect': createConnectTool(modelCache),
229
+ },
230
+ };
231
+ };
package/src/tools.ts ADDED
@@ -0,0 +1,67 @@
1
+ import { tool, ToolDefinition } from '@opencode-ai/plugin';
2
+ import { fetchModels, LiteLLMModel } from './client';
3
+ import { addConfig, getConfigs, removeConfig } from './config';
4
+
5
+ const z = tool.schema;
6
+
7
+ /**
8
+ * Creates the LiteLLM connection tool with access to the model cache.
9
+ */
10
+ export const createConnectTool = (modelCache: Map<string, LiteLLMModel[]>): ToolDefinition => {
11
+ return tool({
12
+ description: 'Manage LiteLLM server connections',
13
+ args: {
14
+ action: z.enum(['add', 'list', 'remove']),
15
+ alias: z.string().optional(),
16
+ url: z.string().optional(),
17
+ key: z.string().optional(),
18
+ },
19
+ async execute(args) {
20
+ const action = args.action as string;
21
+ const alias = args.alias as string | undefined;
22
+ const url = args.url as string | undefined;
23
+ const key = args.key as string | undefined;
24
+
25
+ if (action === 'add') {
26
+ if (!alias || !url || !key) {
27
+ throw new Error('Missing required arguments for "add" action: alias, url, and key are required.');
28
+ }
29
+
30
+ // Test the connection by fetching models.
31
+ const models = await fetchModels(url, key);
32
+
33
+ // If successful, save the configuration.
34
+ addConfig({ alias, url, key });
35
+
36
+ // Update the cache so the config hook can use it immediately.
37
+ modelCache.set(alias, models);
38
+
39
+ return `Successfully connected to LiteLLM server "${alias}" with ${models.length} models available.`;
40
+ }
41
+
42
+ if (action === 'list') {
43
+ const configs = getConfigs();
44
+
45
+ if (configs.length === 0) {
46
+ return 'No LiteLLM servers configured.';
47
+ }
48
+
49
+ const list = configs.map((c) => `- ${c.alias}: ${c.url}`).join('\n');
50
+ return `Configured LiteLLM servers:\n${list}`;
51
+ }
52
+
53
+ if (action === 'remove') {
54
+ if (!alias) {
55
+ throw new Error('Missing required argument for "remove" action: alias is required.');
56
+ }
57
+
58
+ removeConfig(alias);
59
+ modelCache.delete(alias); // Clear the cache for this alias.
60
+
61
+ return `Successfully removed LiteLLM server "${alias}".`;
62
+ }
63
+
64
+ throw new Error(`Unsupported action: ${action}`);
65
+ },
66
+ });
67
+ };