opencode-pollinations-plugin 5.8.4-beta.14 → 5.8.4-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -99,16 +99,12 @@ export const PollinationsPlugin = async (ctx) => {
99
99
  // Dynamic Provider Name
100
100
  const version = require('../package.json').version;
101
101
  config.provider['pollinations'] = {
102
- id: 'openai',
102
+ id: 'pollinations',
103
103
  name: `Pollinations AI (v${version})`,
104
- options: {
105
- baseURL: localBaseUrl,
106
- apiKey: 'dummy-key', // Required by OpenAI provider
107
- },
104
+ options: { baseURL: localBaseUrl },
108
105
  models: modelsObj
109
106
  };
110
107
  log(`[Hook] Registered ${Object.keys(modelsObj).length} models.`);
111
- log(`[Hook] Keys: ${Object.keys(modelsObj).join(', ')}`);
112
108
  },
113
109
  ...toastHooks,
114
110
  ...createStatusHooks(ctx.client),
@@ -1,6 +1,8 @@
1
1
  interface OpenCodeModel {
2
2
  id: string;
3
3
  name: string;
4
+ object: string;
5
+ variants?: any;
4
6
  options?: any;
5
7
  limit?: {
6
8
  context?: number;
@@ -10,7 +12,6 @@ interface OpenCodeModel {
10
12
  input?: string[];
11
13
  output?: string[];
12
14
  };
13
- tool_call?: boolean;
14
15
  }
15
16
  export declare function generatePollinationsConfig(forceApiKey?: string, forceStrict?: boolean): Promise<OpenCodeModel[]>;
16
17
  export {};
@@ -7,8 +7,15 @@ const HOMEDIR = os.homedir();
7
7
  const CONFIG_DIR_POLLI = path.join(HOMEDIR, '.pollinations');
8
8
  const CACHE_FILE = path.join(CONFIG_DIR_POLLI, 'models-cache.json');
9
9
  // --- CONSTANTS ---
10
- // Seed from models-seed.ts
11
- import { FREE_MODELS_SEED } from './models-seed.js';
10
+ // Seed from _archs/debug_free.json
11
+ const DEFAULT_FREE_MODELS = [
12
+ { "name": "gemini", "description": "Gemini 2.5 Flash Lite", "tier": "anonymous", "tools": true, "input_modalities": ["text", "image"], "output_modalities": ["text"], "vision": true },
13
+ { "name": "mistral", "description": "Mistral Small 3.2 24B", "tier": "anonymous", "tools": true, "input_modalities": ["text"], "output_modalities": ["text"], "vision": false },
14
+ { "name": "openai-fast", "description": "GPT-OSS 20B Reasoning LLM (OVH)", "tier": "anonymous", "tools": true, "input_modalities": ["text"], "output_modalities": ["text"], "vision": false, "reasoning": true },
15
+ { "name": "bidara", "description": "BIDARA (Biomimetic Designer)", "tier": "anonymous", "community": true, "input_modalities": ["text", "image"], "output_modalities": ["text"], "vision": true },
16
+ { "name": "chickytutor", "description": "ChickyTutor AI Language Tutor", "tier": "anonymous", "community": true, "input_modalities": ["text"], "output_modalities": ["text"] },
17
+ { "name": "midijourney", "description": "MIDIjourney", "tier": "anonymous", "community": true, "input_modalities": ["text"], "output_modalities": ["text"] }
18
+ ];
12
19
  // --- LOGGING ---
13
20
  const LOG_FILE = '/tmp/opencode_pollinations_config.log';
14
21
  function log(msg) {
@@ -98,7 +105,7 @@ function saveCache(models, etag) {
98
105
  export async function generatePollinationsConfig(forceApiKey, forceStrict = false) {
99
106
  const config = loadConfig();
100
107
  const modelsOutput = [];
101
- log(`Starting Configuration (v5.8.4-Debug-Tools)...`);
108
+ log(`Starting Configuration (v5.8.4-Beta2)...`);
102
109
  const effectiveKey = forceApiKey || config.apiKey;
103
110
  // 1. FREE UNIVERSE (Smart Cache System)
104
111
  let freeModelsList = [];
@@ -150,7 +157,7 @@ export async function generatePollinationsConfig(forceApiKey, forceStrict = fals
150
157
  }
151
158
  else {
152
159
  log('Using DEFAULT SEED models (Offline + No Cache).');
153
- freeModelsList = FREE_MODELS_SEED;
160
+ freeModelsList = DEFAULT_FREE_MODELS;
154
161
  }
155
162
  }
156
163
  }
@@ -163,7 +170,7 @@ export async function generatePollinationsConfig(forceApiKey, forceStrict = fals
163
170
  // Tag (Offline) only if we explicitly failed a fetch attempt or are using Fallback SEED when fetch failed.
164
171
  // If we use cache because it's valid (Skipped fetch), we don't tag (Offline).
165
172
  const suffix = isOffline ? ' (Offline)' : '';
166
- const mapped = mapModel(m, 'free-', `[Free] `, suffix);
173
+ const mapped = mapModel(m, 'free/', `[Free] `, suffix);
167
174
  modelsOutput.push(mapped);
168
175
  });
169
176
  // 2. ENTERPRISE UNIVERSE
@@ -176,17 +183,16 @@ export async function generatePollinationsConfig(forceApiKey, forceStrict = fals
176
183
  enterList.forEach((m) => {
177
184
  if (m.tools === false)
178
185
  return;
179
- const mapped = mapModel(m, 'enter-', '[Enter] ');
186
+ const mapped = mapModel(m, 'enter/', '[Enter] ');
180
187
  modelsOutput.push(mapped);
181
188
  });
182
189
  log(`Total models (Free+Pro): ${modelsOutput.length}`);
183
- log(`Generated IDs: ${modelsOutput.map(m => m.id).join(', ')}`);
184
190
  }
185
191
  catch (e) {
186
192
  log(`Error fetching Enterprise models: ${e}`);
187
193
  if (forceStrict)
188
194
  throw e;
189
- // STRICT: No Fallback for Enterprise. If API is down, we have 0 Enter models.
195
+ modelsOutput.push({ id: "enter/gpt-4o", name: "[Enter] GPT-4o (Fallback)", object: "model", variants: {} });
190
196
  }
191
197
  }
192
198
  return modelsOutput;
@@ -234,16 +240,14 @@ function mapModel(raw, prefix, namePrefix, nameSuffix = '') {
234
240
  const modelObj = {
235
241
  id: fullId,
236
242
  name: finalName,
237
- // object: 'model',
238
- // variants: {}, // POTENTIAL SCHEMA VIOLATION
243
+ object: 'model',
244
+ variants: {},
239
245
  modalities: {
240
246
  input: raw.input_modalities || ['text'],
241
247
  output: raw.output_modalities || ['text']
242
- },
243
- tool_call: false // FORCE DEBUG DISABLE
248
+ }
244
249
  };
245
250
  // Enrichissements
246
- /*
247
251
  if (raw.reasoning === true || rawId.includes('thinking') || rawId.includes('reasoning')) {
248
252
  modelObj.variants = { ...modelObj.variants, high_reasoning: { options: { reasoningEffort: "high", budgetTokens: 16000 } } };
249
253
  }
@@ -255,30 +259,19 @@ function mapModel(raw, prefix, namePrefix, nameSuffix = '') {
255
259
  if (rawId.includes('claude') || rawId.includes('mistral') || rawId.includes('llama')) {
256
260
  modelObj.variants.safe_tokens = { options: { maxTokens: 8000 } };
257
261
  }
258
- */
259
262
  if (rawId.includes('nova')) {
260
- if (rawId.includes('nova')) {
261
- modelObj.limit = { output: 8000, context: 128000 };
262
- }
263
- if (rawId.includes('nomnom') || rawId.includes('scrape')) {
264
- modelObj.limit = { output: 2048, context: 32768 };
265
- }
266
- if (rawId.includes('chicky') || rawId.includes('mistral')) {
267
- modelObj.limit = { output: 4096, context: 8192 };
268
- modelObj.options = { ...modelObj.options, maxTokens: 4096 };
269
- log(`[LimitConfig] Applied strict limit to ${fullId}: output=4096, context=8192`);
270
- }
271
- /*
263
+ modelObj.limit = { output: 8000, context: 128000 };
264
+ }
265
+ if (rawId.includes('nomnom') || rawId.includes('scrape')) {
266
+ modelObj.limit = { output: 2048, context: 32768 };
267
+ }
268
+ if (rawId.includes('chicky')) {
269
+ modelObj.limit = { output: 8192, context: 8192 };
270
+ }
272
271
  if (rawId.includes('fast') || rawId.includes('flash')) {
273
272
  if (!rawId.includes('gemini')) {
274
273
  modelObj.variants.speed = { options: { thinking: { disabled: true } } };
275
274
  }
276
275
  }
277
- */
278
- }
279
- // DEBUG LIMITS
280
- if (modelObj.limit) {
281
- log(`[Model] ${modelObj.id} Limit: ${JSON.stringify(modelObj.limit)}`);
282
- }
283
276
  return modelObj;
284
277
  }
@@ -153,11 +153,6 @@ export async function handleChatCompletion(req, res, bodyRaw) {
153
153
  const config = loadConfig();
154
154
  // DEBUG: Trace Config State for Hot Reload verification
155
155
  log(`[Proxy Request] Config Loaded. Mode: ${config.mode}, HasKey: ${!!config.apiKey}, KeyLength: ${config.apiKey ? config.apiKey.length : 0}`);
156
- // SPY LOGGING
157
- try {
158
- fs.appendFileSync('/tmp/opencode_spy.log', `\n\n=== REQUEST ${new Date().toISOString()} ===\nMODEL: ${body.model}\nBODY:\n${JSON.stringify(body, null, 2)}\n==========================\n`);
159
- }
160
- catch (e) { }
161
156
  // 0. COMMAND HANDLING
162
157
  if (body.messages && body.messages.length > 0) {
163
158
  const lastMsg = body.messages[body.messages.length - 1];
@@ -217,14 +212,13 @@ export async function handleChatCompletion(req, res, bodyRaw) {
217
212
  const { getQuotaStatus, formatQuotaForToast } = await import('./quota.js');
218
213
  const quota = await getQuotaStatus(false);
219
214
  // A. Resolve Base Target
220
- // A. Resolve Base Target
221
- if (actualModel.startsWith('enter/') || actualModel.startsWith('enter-')) {
215
+ if (actualModel.startsWith('enter/')) {
222
216
  isEnterprise = true;
223
- actualModel = actualModel.replace(/^enter[-/]/, '');
217
+ actualModel = actualModel.replace('enter/', '');
224
218
  }
225
- else if (actualModel.startsWith('free/') || actualModel.startsWith('free-')) {
219
+ else if (actualModel.startsWith('free/')) {
226
220
  isEnterprise = false;
227
- actualModel = actualModel.replace(/^free[-/]/, '');
221
+ actualModel = actualModel.replace('free/', '');
228
222
  }
229
223
  // A.1 PAID MODEL ENFORCEMENT (V5.5 Strategy)
230
224
  // Check dynamic list saved by generate-config.ts
@@ -422,32 +416,13 @@ export async function handleChatCompletion(req, res, bodyRaw) {
422
416
  // Restore Tools but REMOVE conflicting ones (Search)
423
417
  // B. GEMINI UNIFIED FIX (Free, Fast, Pro, Enterprise, Legacy)
424
418
  // Handles: "tools" vs "grounding" conflicts, and "infinite loops" via Stop Sequences.
425
- // GLOBAL BEDROCK FIX (All Models)
426
- // Check if history has tools but current request misses tools definition.
427
- // This happens when OpenCode sends the Tool Result (optimisation),
428
- // but Bedrock requires toolConfig to validate the history.
429
- const hasToolHistory = proxyBody.messages?.some((m) => m.role === 'tool' || m.tool_calls);
430
- if (hasToolHistory && (!proxyBody.tools || proxyBody.tools.length === 0)) {
431
- // Inject Shim Tool to satisfy Bedrock
432
- proxyBody.tools = [{
433
- type: 'function',
434
- function: {
435
- name: '_bedrock_compatibility_shim',
436
- description: 'Internal system tool to satisfy Bedrock strict toolConfig requirement. Do not use.',
437
- parameters: { type: 'object', properties: {} }
438
- }
439
- }];
440
- log(`[Proxy] Bedrock Fix: Injected shim tool for ${actualModel} (History has tools, Request missing tools)`);
441
- }
442
419
  // B. GEMINI UNIFIED FIX (Free, Fast, Pro, Enterprise, Legacy)
443
420
  // Fixes "Multiple tools" error (Vertex) and "JSON body validation failed" (v5.3.5 regression)
444
- // Added ChickyTutor (Claude/Gemini based) to fix "toolConfig must be defined" error.
445
- else if (actualModel.includes("gemini") || actualModel.includes("chickytutor")) {
421
+ else if (actualModel.includes("gemini")) {
446
422
  let hasFunctions = false;
447
423
  if (proxyBody.tools && Array.isArray(proxyBody.tools)) {
448
424
  hasFunctions = proxyBody.tools.some((t) => t.type === 'function' || t.function);
449
425
  }
450
- // Old Shim logic removed (moved up)
451
426
  if (hasFunctions) {
452
427
  // 1. Strict cleanup of 'google_search' tool
453
428
  proxyBody.tools = proxyBody.tools.filter((t) => {
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "opencode-pollinations-plugin",
3
3
  "displayName": "Pollinations AI (V5.6)",
4
- "version": "5.8.4-beta.14",
4
+ "version": "5.8.4-beta.2",
5
5
  "description": "Native Pollinations.ai Provider Plugin for OpenCode",
6
6
  "publisher": "pollinations",
7
7
  "repository": {
@@ -55,4 +55,4 @@
55
55
  "@types/node": "^20.0.0",
56
56
  "typescript": "^5.0.0"
57
57
  }
58
- }
58
+ }
@@ -1,18 +0,0 @@
1
- export interface PollinationsModel {
2
- name: string;
3
- description?: string;
4
- type?: string;
5
- tools?: boolean;
6
- reasoning?: boolean;
7
- context?: number;
8
- context_window?: number;
9
- input_modalities?: string[];
10
- output_modalities?: string[];
11
- paid_only?: boolean;
12
- vision?: boolean;
13
- audio?: boolean;
14
- community?: boolean;
15
- censored?: boolean;
16
- [key: string]: any;
17
- }
18
- export declare const FREE_MODELS_SEED: PollinationsModel[];
@@ -1,55 +0,0 @@
1
- export const FREE_MODELS_SEED = [
2
- {
3
- "name": "gemini",
4
- "description": "Gemini 2.5 Flash Lite",
5
- "tier": "anonymous",
6
- "tools": true,
7
- "input_modalities": ["text", "image"],
8
- "output_modalities": ["text"],
9
- "vision": true
10
- },
11
- {
12
- "name": "mistral",
13
- "description": "Mistral Small 3.2 24B",
14
- "tier": "anonymous",
15
- "tools": true,
16
- "input_modalities": ["text"],
17
- "output_modalities": ["text"],
18
- "vision": false
19
- },
20
- {
21
- "name": "openai-fast",
22
- "description": "GPT-OSS 20B Reasoning LLM (OVH)",
23
- "tier": "anonymous",
24
- "tools": true,
25
- "input_modalities": ["text"],
26
- "output_modalities": ["text"],
27
- "vision": false,
28
- "reasoning": true
29
- },
30
- {
31
- "name": "bidara",
32
- "description": "BIDARA (Biomimetic Designer)",
33
- "tier": "anonymous",
34
- "community": true,
35
- "input_modalities": ["text", "image"],
36
- "output_modalities": ["text"],
37
- "vision": true
38
- },
39
- {
40
- "name": "chickytutor",
41
- "description": "ChickyTutor AI Language Tutor",
42
- "tier": "anonymous",
43
- "community": true,
44
- "input_modalities": ["text"],
45
- "output_modalities": ["text"]
46
- },
47
- {
48
- "name": "midijourney",
49
- "description": "MIDIjourney",
50
- "tier": "anonymous",
51
- "community": true,
52
- "input_modalities": ["text"],
53
- "output_modalities": ["text"]
54
- }
55
- ];