@iservu-inc/adf-cli 0.12.0 → 0.12.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +12 -6
- package/CHANGELOG.md +109 -0
- package/README.md +33 -14
- package/bin/adf.js +339 -1
- package/lib/ai/ai-client.js +161 -44
- package/lib/ai/ai-config.js +276 -104
- package/lib/commands/deploy.js +36 -8
- package/lib/generators/deepagent-generator.js +144 -0
- package/lib/generators/gemini-cli-generator.js +241 -0
- package/lib/generators/index.js +33 -0
- package/lib/generators/opencode-generator.js +153 -0
- package/package.json +1 -1
package/lib/ai/ai-client.js
CHANGED
|
@@ -97,8 +97,19 @@ class AIClient {
|
|
|
97
97
|
]
|
|
98
98
|
});
|
|
99
99
|
|
|
100
|
+
// Validate response structure
|
|
101
|
+
if (!response.content || response.content.length === 0) {
|
|
102
|
+
throw new Error(`Anthropic model '${this.model}' returned no content.`);
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
const content = response.content[0].text;
|
|
106
|
+
|
|
107
|
+
if (!content || content.trim().length === 0) {
|
|
108
|
+
throw new Error(`Anthropic model '${this.model}' returned empty text.`);
|
|
109
|
+
}
|
|
110
|
+
|
|
100
111
|
return {
|
|
101
|
-
content
|
|
112
|
+
content,
|
|
102
113
|
model: this.model,
|
|
103
114
|
provider: 'anthropic',
|
|
104
115
|
usage: {
|
|
@@ -113,9 +124,9 @@ class AIClient {
|
|
|
113
124
|
* OpenAI GPT request
|
|
114
125
|
*/
|
|
115
126
|
async openaiRequest(prompt, maxTokens, temperature) {
|
|
116
|
-
//
|
|
117
|
-
//
|
|
118
|
-
const
|
|
127
|
+
// Newer OpenAI models use max_completion_tokens instead of max_tokens
|
|
128
|
+
// This includes: o-series (o1, o3), gpt-5 series, and potentially others
|
|
129
|
+
const usesNewParameters = /^(o1|o3|gpt-5)/i.test(this.model);
|
|
119
130
|
|
|
120
131
|
const requestParams = {
|
|
121
132
|
model: this.model,
|
|
@@ -127,27 +138,56 @@ class AIClient {
|
|
|
127
138
|
]
|
|
128
139
|
};
|
|
129
140
|
|
|
130
|
-
//
|
|
131
|
-
if (
|
|
141
|
+
// Newer models use different parameter names
|
|
142
|
+
if (usesNewParameters) {
|
|
132
143
|
requestParams.max_completion_tokens = maxTokens;
|
|
133
|
-
//
|
|
144
|
+
// Newer reasoning models don't support temperature parameter
|
|
134
145
|
} else {
|
|
135
146
|
requestParams.max_tokens = maxTokens;
|
|
136
147
|
requestParams.temperature = temperature;
|
|
137
148
|
}
|
|
138
149
|
|
|
139
|
-
|
|
150
|
+
try {
|
|
151
|
+
const response = await this.client.chat.completions.create(requestParams);
|
|
140
152
|
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
153
|
+
return {
|
|
154
|
+
content: response.choices[0].message.content,
|
|
155
|
+
model: this.model,
|
|
156
|
+
provider: 'openai',
|
|
157
|
+
usage: {
|
|
158
|
+
promptTokens: response.usage.prompt_tokens,
|
|
159
|
+
completionTokens: response.usage.completion_tokens,
|
|
160
|
+
totalTokens: response.usage.total_tokens
|
|
161
|
+
}
|
|
162
|
+
};
|
|
163
|
+
} catch (error) {
|
|
164
|
+
// If we get a 400 error about max_tokens not being supported,
|
|
165
|
+
// automatically retry with max_completion_tokens
|
|
166
|
+
if (error.status === 400 && error.message.includes('max_tokens')) {
|
|
167
|
+
const retryParams = {
|
|
168
|
+
model: this.model,
|
|
169
|
+
messages: requestParams.messages,
|
|
170
|
+
max_completion_tokens: maxTokens
|
|
171
|
+
// Don't include temperature for new parameter format models
|
|
172
|
+
};
|
|
173
|
+
|
|
174
|
+
const response = await this.client.chat.completions.create(retryParams);
|
|
175
|
+
|
|
176
|
+
return {
|
|
177
|
+
content: response.choices[0].message.content,
|
|
178
|
+
model: this.model,
|
|
179
|
+
provider: 'openai',
|
|
180
|
+
usage: {
|
|
181
|
+
promptTokens: response.usage.prompt_tokens,
|
|
182
|
+
completionTokens: response.usage.completion_tokens,
|
|
183
|
+
totalTokens: response.usage.total_tokens
|
|
184
|
+
}
|
|
185
|
+
};
|
|
149
186
|
}
|
|
150
|
-
|
|
187
|
+
|
|
188
|
+
// Re-throw other errors
|
|
189
|
+
throw error;
|
|
190
|
+
}
|
|
151
191
|
}
|
|
152
192
|
|
|
153
193
|
/**
|
|
@@ -165,8 +205,20 @@ class AIClient {
|
|
|
165
205
|
const result = await model.generateContent(prompt);
|
|
166
206
|
const response = result.response;
|
|
167
207
|
|
|
208
|
+
// Validate response
|
|
209
|
+
let content;
|
|
210
|
+
try {
|
|
211
|
+
content = response.text();
|
|
212
|
+
} catch (error) {
|
|
213
|
+
throw new Error(`Google Gemini model '${this.model}' failed to generate text: ${error.message}`);
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
if (!content || content.trim().length === 0) {
|
|
217
|
+
throw new Error(`Google Gemini model '${this.model}' returned empty content.`);
|
|
218
|
+
}
|
|
219
|
+
|
|
168
220
|
return {
|
|
169
|
-
content
|
|
221
|
+
content,
|
|
170
222
|
model: this.model,
|
|
171
223
|
provider: 'google',
|
|
172
224
|
usage: {
|
|
@@ -181,28 +233,85 @@ class AIClient {
|
|
|
181
233
|
* OpenRouter request (uses OpenAI-compatible API)
|
|
182
234
|
*/
|
|
183
235
|
async openrouterRequest(prompt, maxTokens, temperature) {
|
|
184
|
-
|
|
236
|
+
// Newer OpenAI models on OpenRouter may use max_completion_tokens
|
|
237
|
+
// This includes: o-series (o1, o3), gpt-5 series
|
|
238
|
+
const usesNewParameters = /^openai\/(o1|o3|gpt-5)/i.test(this.model);
|
|
239
|
+
|
|
240
|
+
const requestParams = {
|
|
185
241
|
model: this.model,
|
|
186
|
-
max_tokens: maxTokens,
|
|
187
|
-
temperature,
|
|
188
242
|
messages: [
|
|
189
243
|
{
|
|
190
244
|
role: 'user',
|
|
191
245
|
content: prompt
|
|
192
246
|
}
|
|
193
247
|
]
|
|
194
|
-
}
|
|
248
|
+
};
|
|
195
249
|
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
250
|
+
// Newer models use different parameter names
|
|
251
|
+
if (usesNewParameters) {
|
|
252
|
+
requestParams.max_completion_tokens = maxTokens;
|
|
253
|
+
// Newer reasoning models don't support temperature parameter
|
|
254
|
+
} else {
|
|
255
|
+
requestParams.max_tokens = maxTokens;
|
|
256
|
+
requestParams.temperature = temperature;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
try {
|
|
260
|
+
const response = await this.client.chat.completions.create(requestParams);
|
|
261
|
+
|
|
262
|
+
// Validate response structure
|
|
263
|
+
if (!response.choices || response.choices.length === 0) {
|
|
264
|
+
throw new Error(`OpenRouter model '${this.model}' returned no choices. The model may not exist or be unavailable.`);
|
|
204
265
|
}
|
|
205
|
-
|
|
266
|
+
|
|
267
|
+
if (!response.choices[0].message) {
|
|
268
|
+
throw new Error(`OpenRouter model '${this.model}' returned invalid response structure.`);
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
const content = response.choices[0].message.content;
|
|
272
|
+
|
|
273
|
+
if (!content || content.trim().length === 0) {
|
|
274
|
+
throw new Error(`OpenRouter model '${this.model}' returned empty content. The model may not support this request format.`);
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
return {
|
|
278
|
+
content,
|
|
279
|
+
model: this.model,
|
|
280
|
+
provider: 'openrouter',
|
|
281
|
+
usage: {
|
|
282
|
+
promptTokens: response.usage?.prompt_tokens || 0,
|
|
283
|
+
completionTokens: response.usage?.completion_tokens || 0,
|
|
284
|
+
totalTokens: response.usage?.total_tokens || 0
|
|
285
|
+
}
|
|
286
|
+
};
|
|
287
|
+
} catch (error) {
|
|
288
|
+
// If we get a 400 error about max_tokens not being supported,
|
|
289
|
+
// automatically retry with max_completion_tokens
|
|
290
|
+
if (error.status === 400 && error.message.includes('max_tokens')) {
|
|
291
|
+
const retryParams = {
|
|
292
|
+
model: this.model,
|
|
293
|
+
messages: requestParams.messages,
|
|
294
|
+
max_completion_tokens: maxTokens
|
|
295
|
+
// Don't include temperature for new parameter format models
|
|
296
|
+
};
|
|
297
|
+
|
|
298
|
+
const response = await this.client.chat.completions.create(retryParams);
|
|
299
|
+
|
|
300
|
+
return {
|
|
301
|
+
content: response.choices[0].message.content,
|
|
302
|
+
model: this.model,
|
|
303
|
+
provider: 'openrouter',
|
|
304
|
+
usage: {
|
|
305
|
+
promptTokens: response.usage?.prompt_tokens || 0,
|
|
306
|
+
completionTokens: response.usage?.completion_tokens || 0,
|
|
307
|
+
totalTokens: response.usage?.total_tokens || 0
|
|
308
|
+
}
|
|
309
|
+
};
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
// Re-throw other errors
|
|
313
|
+
throw error;
|
|
314
|
+
}
|
|
206
315
|
}
|
|
207
316
|
|
|
208
317
|
/**
|
|
@@ -211,21 +320,29 @@ class AIClient {
|
|
|
211
320
|
async test() {
|
|
212
321
|
const testPrompt = 'Respond with exactly: "Connection successful"';
|
|
213
322
|
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
323
|
+
try {
|
|
324
|
+
const response = await this.sendMessage(testPrompt, {
|
|
325
|
+
maxTokens: 50,
|
|
326
|
+
temperature: 0
|
|
327
|
+
});
|
|
218
328
|
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
329
|
+
if (!response.content) {
|
|
330
|
+
throw new Error(`Model '${this.model}' on ${this.provider} returned no content. The model may not exist or be unavailable.`);
|
|
331
|
+
}
|
|
222
332
|
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
333
|
+
return {
|
|
334
|
+
success: true,
|
|
335
|
+
provider: this.provider,
|
|
336
|
+
model: this.model,
|
|
337
|
+
response: response.content
|
|
338
|
+
};
|
|
339
|
+
} catch (error) {
|
|
340
|
+
// Add context to the error message
|
|
341
|
+
if (error.message.includes('returned no content') || error.message.includes('returned no choices')) {
|
|
342
|
+
throw error; // Already has good context
|
|
343
|
+
}
|
|
344
|
+
throw new Error(`Test failed for ${this.provider} model '${this.model}': ${error.message}`);
|
|
345
|
+
}
|
|
229
346
|
}
|
|
230
347
|
|
|
231
348
|
/**
|