opencode-pollinations-plugin 5.8.2 → 5.8.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,17 @@ import * as path from 'path';
5
5
  import { loadConfig } from './config.js';
6
6
  const HOMEDIR = os.homedir();
7
7
  const CONFIG_DIR_POLLI = path.join(HOMEDIR, '.pollinations');
8
- const CONFIG_FILE = path.join(CONFIG_DIR_POLLI, 'config.json');
8
+ const CACHE_FILE = path.join(CONFIG_DIR_POLLI, 'models-cache.json');
9
+ // --- CONSTANTS ---
10
+ // Seed from _archs/debug_free.json
11
+ const DEFAULT_FREE_MODELS = [
12
+ { "name": "gemini", "description": "Gemini 2.5 Flash Lite", "tier": "anonymous", "tools": true, "input_modalities": ["text", "image"], "output_modalities": ["text"], "vision": true },
13
+ { "name": "mistral", "description": "Mistral Small 3.2 24B", "tier": "anonymous", "tools": true, "input_modalities": ["text"], "output_modalities": ["text"], "vision": false },
14
+ { "name": "openai-fast", "description": "GPT-OSS 20B Reasoning LLM (OVH)", "tier": "anonymous", "tools": true, "input_modalities": ["text"], "output_modalities": ["text"], "vision": false, "reasoning": true },
15
+ { "name": "bidara", "description": "BIDARA (Biomimetic Designer)", "tier": "anonymous", "community": true, "input_modalities": ["text", "image"], "output_modalities": ["text"], "vision": true },
16
+ { "name": "chickytutor", "description": "ChickyTutor AI Language Tutor", "tier": "anonymous", "community": true, "input_modalities": ["text"], "output_modalities": ["text"] },
17
+ { "name": "midijourney", "description": "MIDIjourney", "tier": "anonymous", "community": true, "input_modalities": ["text"], "output_modalities": ["text"] }
18
+ ];
9
19
  // --- LOGGING ---
10
20
  const LOG_FILE = '/tmp/opencode_pollinations_config.log';
11
21
  function log(msg) {
@@ -16,15 +26,13 @@ function log(msg) {
16
26
  fs.appendFileSync(LOG_FILE, `[ConfigGen] ${ts} ${msg}\n`);
17
27
  }
18
28
  catch (e) { }
19
- // Force output to stderr for CLI visibility if needed, but clean.
20
29
  }
21
- // Fetch Helper
22
- // Fetch Helper
30
+ // --- NETWORK HELPER ---
23
31
  function fetchJson(url, headers = {}) {
24
32
  return new Promise((resolve, reject) => {
25
33
  const finalHeaders = {
26
34
  ...headers,
27
- 'User-Agent': 'Mozilla/5.0 (compatible; OpenCode/5.8.1; +https://opencode.ai)'
35
+ 'User-Agent': 'Mozilla/5.0 (compatible; OpenCode/5.8.2; +https://opencode.ai)'
28
36
  };
29
37
  const req = https.get(url, { headers: finalHeaders }, (res) => {
30
38
  let data = '';
@@ -36,7 +44,7 @@ function fetchJson(url, headers = {}) {
36
44
  }
37
45
  catch (e) {
38
46
  log(`JSON Parse Error for ${url}: ${e}`);
39
- resolve([]); // Fail safe -> empty list
47
+ resolve([]); // Fail safe -> empty list to trigger fallback logic
40
48
  }
41
49
  });
42
50
  });
@@ -50,167 +58,145 @@ function fetchJson(url, headers = {}) {
50
58
  });
51
59
  });
52
60
  }
53
- function formatName(id, censored = true) {
54
- let clean = id.replace(/^pollinations\//, '').replace(/-/g, ' ');
55
- clean = clean.replace(/\b\w/g, l => l.toUpperCase());
56
- if (!censored)
57
- clean += " (Uncensored)";
58
- return clean;
61
+ // --- CACHE MANAGER ---
62
+ function loadCache() {
63
+ try {
64
+ if (fs.existsSync(CACHE_FILE)) {
65
+ const content = fs.readFileSync(CACHE_FILE, 'utf-8');
66
+ return JSON.parse(content);
67
+ }
68
+ }
69
+ catch (e) {
70
+ log(`Error loading cache: ${e}`);
71
+ }
72
+ return null;
59
73
  }
60
- // --- MAIN GENERATOR logic ---
74
+ function saveCache(models) {
75
+ try {
76
+ const data = {
77
+ timestamp: Date.now(),
78
+ models: models
79
+ };
80
+ if (!fs.existsSync(CONFIG_DIR_POLLI))
81
+ fs.mkdirSync(CONFIG_DIR_POLLI, { recursive: true });
82
+ fs.writeFileSync(CACHE_FILE, JSON.stringify(data, null, 2));
83
+ }
84
+ catch (e) {
85
+ log(`Error saving cache: ${e}`);
86
+ }
87
+ }
88
+ // --- GENERATOR LOGIC ---
61
89
  export async function generatePollinationsConfig(forceApiKey, forceStrict = false) {
62
90
  const config = loadConfig();
63
91
  const modelsOutput = [];
64
- log(`Starting Configuration (v5.8.2)...`);
65
- // Use forced key (from Hook) or cached key
92
+ log(`Starting Configuration (v5.8.2-Robust)...`);
66
93
  const effectiveKey = forceApiKey || config.apiKey;
67
- // 1. FREE UNIVERSE
68
- try {
69
- // Switch to main models endpoint
70
- const freeList = await fetchJson('https://text.pollinations.ai/models');
71
- const list = Array.isArray(freeList) ? freeList : (freeList.data || []);
72
- if (list.length > 0) {
73
- list.forEach((m) => {
74
- const mapped = mapModel(m, 'free/', '[Free] ');
75
- modelsOutput.push(mapped);
76
- });
77
- log(`Fetched ${modelsOutput.length} Free models.`);
94
+ // 1. FREE UNIVERSE (Cache System)
95
+ let freeModelsList = [];
96
+ let isOffline = false;
97
+ let cache = loadCache();
98
+ const CACHE_TTL = 7 * 24 * 3600 * 1000; // 7 days
99
+ // Decision: Fetch or Cache?
100
+ const now = Date.now();
101
+ let shouldFetch = !cache || (now - cache.timestamp > CACHE_TTL);
102
+ if (shouldFetch) {
103
+ log('Attempting to fetch fresh Free models...');
104
+ try {
105
+ const raw = await fetchJson('https://text.pollinations.ai/models');
106
+ const list = Array.isArray(raw) ? raw : (raw.data || []);
107
+ if (list.length > 0) {
108
+ freeModelsList = list;
109
+ saveCache(list);
110
+ log(`Fetched and cached ${list.length} models.`);
111
+ }
112
+ else {
113
+ throw new Error('API returned empty list');
114
+ }
78
115
  }
79
- else {
80
- throw new Error('Empty list returned from Free API');
116
+ catch (e) {
117
+ log(`Fetch failed: ${e}.`);
118
+ isOffline = true;
119
+ // Fallback to Cache or Default
120
+ if (cache && cache.models.length > 0) {
121
+ log('Using cached models (Offline).');
122
+ freeModelsList = cache.models;
123
+ }
124
+ else {
125
+ log('Using DEFAULT SEED models (Offline + No Cache).');
126
+ freeModelsList = DEFAULT_FREE_MODELS;
127
+ }
81
128
  }
82
129
  }
83
- catch (e) {
84
- log(`Error fetching Free models: ${e}`);
85
- // Fallback Robust (Offline support) - NOW WITH ICONS
86
- modelsOutput.push({
87
- id: "free/mistral",
88
- name: "[Free] Mistral Nemo (Fallback) 💻",
89
- object: "model",
90
- variants: { safe_tokens: { options: { maxTokens: 8000 } } },
91
- modalities: { input: ['text'], output: ['text'] }
92
- });
93
- modelsOutput.push({
94
- id: "free/openai",
95
- name: "[Free] OpenAI (Fallback) 👁️💻",
96
- object: "model",
97
- variants: {},
98
- modalities: { input: ['text', 'image'], output: ['text'] }
99
- });
100
- modelsOutput.push({
101
- id: "free/gemini",
102
- name: "[Free] Gemini Flash (Fallback) 👁️💻",
103
- object: "model",
104
- variants: { high_reasoning: { options: { reasoningEffort: "high", budgetTokens: 16000 } } },
105
- modalities: { input: ['text', 'image'], output: ['text'] }
106
- });
107
- modelsOutput.push({
108
- id: "free/searchgpt",
109
- name: "[Free] SearchGPT (Fallback) 🔍",
110
- object: "model",
111
- variants: {},
112
- modalities: { input: ['text'], output: ['text'] }
113
- });
114
- }
115
- // 1.5 FORCE ENSURE CRITICAL MODELS
116
- // Sometimes the API list changes or is cached weirdly. We force vital models.
117
- const hasGemini = modelsOutput.find(m => m.id === 'free/gemini');
118
- if (!hasGemini) {
119
- log(`[ConfigGen] Force-injecting free/gemini.`);
120
- modelsOutput.push({
121
- id: "free/gemini",
122
- name: "[Free] Gemini Flash (Force) 👁️💻",
123
- object: "model",
124
- variants: {},
125
- modalities: { input: ['text', 'image'], output: ['text'] }
126
- });
130
+ else {
131
+ log('Cache is recent. Using cached models.');
132
+ freeModelsList = cache.models;
127
133
  }
128
- // ALIAS Removed for Clean Config
129
- // const hasGeminiAlias = modelsOutput.find(m => m.id === 'pollinations/free/gemini');
130
- // if (!hasGeminiAlias) {
131
- // modelsOutput.push({ id: "pollinations/free/gemini", name: "[Free] Gemini Flash (Alias)", object: "model", variants: {} });
132
- // }
134
+ // Map Free Models
135
+ freeModelsList.forEach((m) => {
136
+ // Appending (Offline) if we are in offline mode due to error,
137
+ // OR (Cache) if we just used cache? User said: "rajoutant dans les noms de modeles à la fin (down)"
138
+ // when valid list is reached but date > 8 days (deprecated) or fallback used?
139
+ // Let's mark it only if we tried to fetch and failed.
140
+ const suffix = isOffline ? ' (Offline)' : '';
141
+ const mapped = mapModel(m, 'free/', `[Free] `, suffix);
142
+ modelsOutput.push(mapped);
143
+ });
133
144
  // 2. ENTERPRISE UNIVERSE
134
145
  if (effectiveKey && effectiveKey.length > 5 && effectiveKey !== 'dummy') {
135
146
  try {
136
- // Use /text/models for full metadata (input_modalities, tools, reasoning, pricing)
137
147
  const enterListRaw = await fetchJson('https://gen.pollinations.ai/text/models', {
138
148
  'Authorization': `Bearer ${effectiveKey}`
139
149
  });
140
150
  const enterList = Array.isArray(enterListRaw) ? enterListRaw : (enterListRaw.data || []);
141
- const paidModels = [];
142
151
  enterList.forEach((m) => {
143
152
  if (m.tools === false)
144
153
  return;
145
154
  const mapped = mapModel(m, 'enter/', '[Enter] ');
146
155
  modelsOutput.push(mapped);
147
- if (m.paid_only) {
148
- paidModels.push(mapped.id.replace('enter/', '')); // Store bare ID "gemini-large"
149
- }
150
156
  });
151
157
  log(`Total models (Free+Pro): ${modelsOutput.length}`);
152
- // Save Paid Models List for Proxy
153
- try {
154
- const paidListPath = path.join(config.gui ? path.dirname(CONFIG_FILE) : '/tmp', 'pollinations-paid-models.json');
155
- // Ensure dir exists (re-use config dir logic from config.ts if possible, or just assume it exists since config loaded)
156
- if (fs.existsSync(path.dirname(paidListPath))) {
157
- fs.writeFileSync(paidListPath, JSON.stringify(paidModels));
158
- }
159
- }
160
- catch (e) {
161
- log(`Error saving paid models list: ${e}`);
162
- }
163
158
  }
164
159
  catch (e) {
165
160
  log(`Error fetching Enterprise models: ${e}`);
166
- // STRICT MODE (Validation): Do not return fake fallback models.
167
161
  if (forceStrict)
168
162
  throw e;
169
- // Fallback Robust for Enterprise (User has Key but discovery failed)
163
+ // Fallback Enter (could be cached too in future)
170
164
  modelsOutput.push({ id: "enter/gpt-4o", name: "[Enter] GPT-4o (Fallback)", object: "model", variants: {} });
171
- // ...
172
- modelsOutput.push({ id: "enter/claude-3-5-sonnet", name: "[Enter] Claude 3.5 Sonnet (Fallback)", object: "model", variants: {} });
173
- modelsOutput.push({ id: "enter/deepseek-reasoner", name: "[Enter] DeepSeek R1 (Fallback)", object: "model", variants: {} });
174
165
  }
175
166
  }
176
167
  return modelsOutput;
177
168
  }
178
- // --- CAPABILITY ICONS ---
169
+ // --- UTILS ---
179
170
  function getCapabilityIcons(raw) {
180
171
  const icons = [];
181
- // Vision: accepts images
182
- if (raw.input_modalities?.includes('image'))
172
+ if (raw.input_modalities?.includes('image') || raw.vision === true)
183
173
  icons.push('👁️');
184
- // Audio Input
185
- if (raw.input_modalities?.includes('audio'))
174
+ if (raw.input_modalities?.includes('audio') || raw.audio === true)
186
175
  icons.push('🎙️');
187
- // Audio Output
188
176
  if (raw.output_modalities?.includes('audio'))
189
177
  icons.push('🔊');
190
- // Reasoning capability
191
178
  if (raw.reasoning === true)
192
179
  icons.push('🧠');
193
- // Web Search (from description)
194
- if (raw.description?.toLowerCase().includes('search') ||
195
- raw.name?.includes('search') ||
196
- raw.name?.includes('perplexity')) {
180
+ if (raw.description?.toLowerCase().includes('search') || raw.name?.includes('search'))
197
181
  icons.push('🔍');
198
- }
199
- // Tool/Function calling
200
182
  if (raw.tools === true)
201
183
  icons.push('💻');
202
184
  return icons.length > 0 ? ` ${icons.join('')}` : '';
203
185
  }
204
- // --- MAPPING ENGINE ---
205
- function mapModel(raw, prefix, namePrefix) {
186
+ function formatName(id, censored = true) {
187
+ let clean = id.replace(/^pollinations\//, '').replace(/-/g, ' ');
188
+ clean = clean.replace(/\b\w/g, l => l.toUpperCase());
189
+ if (!censored)
190
+ clean += " (Uncensored)";
191
+ return clean;
192
+ }
193
+ function mapModel(raw, prefix, namePrefix, nameSuffix = '') {
206
194
  const rawId = raw.id || raw.name;
207
- const fullId = prefix + rawId; // ex: "free/gemini" or "enter/nomnom" (prefix passed is "enter/")
195
+ const fullId = prefix + rawId;
208
196
  let baseName = raw.description;
209
197
  if (!baseName || baseName === rawId) {
210
198
  baseName = formatName(rawId, raw.censored !== false);
211
199
  }
212
- // CLEANUP: Simple Truncation Rule (Requested by User)
213
- // "Start from left, find ' - ', delete everything after."
214
200
  if (baseName && baseName.includes(' - ')) {
215
201
  baseName = baseName.split(' - ')[0].trim();
216
202
  }
@@ -218,16 +204,19 @@ function mapModel(raw, prefix, namePrefix) {
218
204
  if (raw.paid_only) {
219
205
  namePrefixFinal = namePrefix.replace('[Enter]', '[💎 Paid]');
220
206
  }
221
- // Get capability icons from API metadata
222
207
  const capabilityIcons = getCapabilityIcons(raw);
223
- const finalName = `${namePrefixFinal}${baseName}${capabilityIcons}`;
208
+ const finalName = `${namePrefixFinal}${baseName}${nameSuffix}${capabilityIcons}`;
224
209
  const modelObj = {
225
210
  id: fullId,
226
211
  name: finalName,
227
212
  object: 'model',
228
- variants: {}
213
+ variants: {},
214
+ modalities: {
215
+ input: raw.input_modalities || ['text'],
216
+ output: raw.output_modalities || ['text']
217
+ }
229
218
  };
230
- // --- ENRICHISSEMENT ---
219
+ // Enrichissements
231
220
  if (raw.reasoning === true || rawId.includes('thinking') || rawId.includes('reasoning')) {
232
221
  modelObj.variants = { ...modelObj.variants, high_reasoning: { options: { reasoningEffort: "high", budgetTokens: 16000 } } };
233
222
  }
@@ -239,25 +228,13 @@ function mapModel(raw, prefix, namePrefix) {
239
228
  if (rawId.includes('claude') || rawId.includes('mistral') || rawId.includes('llama')) {
240
229
  modelObj.variants.safe_tokens = { options: { maxTokens: 8000 } };
241
230
  }
242
- // NOVA FIX: Bedrock limit ~10k (User reported error > 10000)
243
- // We MUST set the limit on the model object itself so OpenCode respects it by default.
244
231
  if (rawId.includes('nova')) {
245
- modelObj.limit = {
246
- output: 8000,
247
- context: 128000 // Nova Micro/Lite/Pro usually 128k
248
- };
249
- // Also keep variant just in case
250
- modelObj.variants.bedrock_safe = { options: { maxTokens: 8000 } };
232
+ modelObj.limit = { output: 8000, context: 128000 };
251
233
  }
252
- // NOMNOM FIX: User reported error if max_tokens is missing.
253
- // Also it is a 'Gemini-scrape' model, so we treat it similar to Gemini but with strict limit.
254
234
  if (rawId.includes('nomnom') || rawId.includes('scrape')) {
255
- modelObj.limit = {
256
- output: 2048, // User used 1500 successfully
257
- context: 32768
258
- };
235
+ modelObj.limit = { output: 2048, context: 32768 };
259
236
  }
260
- if (rawId.includes('fast') || rawId.includes('flash') || rawId.includes('lite')) {
237
+ if (rawId.includes('fast') || rawId.includes('flash')) {
261
238
  if (!rawId.includes('gemini')) {
262
239
  modelObj.variants.speed = { options: { thinking: { disabled: true } } };
263
240
  }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "opencode-pollinations-plugin",
3
3
  "displayName": "Pollinations AI (V5.6)",
4
- "version": "5.8.2",
4
+ "version": "5.8.3",
5
5
  "description": "Native Pollinations.ai Provider Plugin for OpenCode",
6
6
  "publisher": "pollinations",
7
7
  "repository": {