veogent 1.0.22 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/README.md +54 -4
  2. package/index.js +287 -7
  3. package/package.json +1 -1
  4. package/skills/SKILL.md +833 -117
package/README.md CHANGED
@@ -69,8 +69,13 @@ veogent create-project -n "Cyberpunk T-Rex" -k "T-rex, Neon, Sci-fi" -d "A massi
69
69
  # Get all chapters within a project ID
70
70
  veogent chapters <projectId>
71
71
 
72
- # View characters cast for the project
72
+ # View characters cast for the project (includes readiness info)
73
73
  veogent characters <projectId>
74
+ # Response includes: { characters: [{id, name, imageUri, ready}], characterReadiness: {total, ready, allReady} }
75
+
76
+ # Check scene materialization status (how many scenes are ready)
77
+ veogent scene-materialization-status -p <projectId> -c <chapterId>
78
+ # Response: { expectedScenes, materializedScenes, status: "PROCESSING"|"READY"|"EMPTY" }
74
79
 
75
80
  # Create scenes automatically using AI-generated narrative scripts
76
81
  veogent create-scene -p <projectId> -c <chapterId> --flowkey -C "The T-Rex looks up at the sky." "A meteor shower begins."
@@ -91,8 +96,12 @@ Queue generation jobs directly from the terminal.
91
96
  *Note: VEOGENT uses strict validation depending on the request type.*
92
97
 
93
98
  ```bash
99
+ # List supported models
100
+ veogent image-models # → { models: ["imagen3.5"] }
101
+ veogent video-models # → { models: ["veo_3_1_fast", "veo_3_1_fast_r2v"] }
102
+
94
103
  # Generate Image (Supports: imagen3.5)
95
- veogent request -t "GENERATE_IMAGES" -p <proj> -c <chap> -s <scene> -i "imagen4"
104
+ veogent request -t "GENERATE_IMAGES" -p <proj> -c <chap> -s <scene> -i "imagen3.5"
96
105
 
97
106
  # Generate Video (Supports: veo_3_1_fast, veo_3_1_fast_r2v)
98
107
  # Default/recommended model: veo_3_1_fast
@@ -105,6 +114,43 @@ veogent request -t "VIDEO_UPSCALE" -p <proj> -c <chap> -s <scene> -o "HORIZONTAL
105
114
  veogent upscale -p <proj> -c <chap> -s <scene> -o "VERTICAL" -r "VIDEO_RESOLUTION_4K"
106
115
  ```
107
116
 
117
+ ### 📊 Monitoring & Status
118
+ ```bash
119
+ # View all requests (most recent first)
120
+ veogent requests
121
+
122
+ # Get N most recent requests
123
+ veogent requests -n 10
124
+
125
+ # Filter by project / chapter / status
126
+ veogent requests -p <projectId> -c <chapterId> -n 5
127
+ veogent requests -s FAILED -n 20
128
+ veogent requests -s COMPLETED -p <projectId>
129
+
130
+ # Scene-level status with embedded asset URLs (image + video)
131
+ veogent scene-status -p <projectId> -c <chapterId>
132
+ # Each scene returns: { sceneId, image: { status, url }, video: { status, url } }
133
+
134
+ # Full workflow snapshot (scenes + requests + assets)
135
+ veogent workflow-status -p <projectId> -c <chapterId>
136
+
137
+ # Wait for all images to finish processing
138
+ veogent wait-images -p <projectId> -c <chapterId>
139
+
140
+ # Wait and verify all images succeeded (not just finished)
141
+ veogent wait-images -p <projectId> -c <chapterId> --require-success
142
+
143
+ # Same for videos
144
+ veogent wait-videos -p <projectId> -c <chapterId> --require-success
145
+
146
+ # Queue concurrency status
147
+ veogent queue-status
148
+
149
+ # Google Flow credit/plan info (requires flow key)
150
+ veogent flow-credits
151
+ veogent flow-credits -f "ya29.a0ATk..."
152
+ ```
153
+
108
154
  ---
109
155
 
110
156
  ## 🤖 For AI Agents
@@ -126,8 +172,9 @@ Veogent CLI ships with a comprehensive **[`skills/SKILL.md`](./skills/SKILL.md)*
126
172
  6. Create scenes from returned script list:
127
173
  - `veogent create-scene -p <projectId> -c <chapterId> -C "scene 1" "scene 2" ... --flowkey`
128
174
  7. Wait for character generation completion (`imageUri` required for all characters):
129
- - `veogent characters <projectId>`
130
- - If missing/fail: inspect `veogent requests`, recover via `veogent edit-character`.
175
+ - `veogent characters <projectId>` — check `characterReadiness.allReady === true`
176
+ - `veogent scene-materialization-status -p <projectId> -c <chapterId>` verify `status: "READY"`
177
+ - If missing/fail: inspect `veogent requests -n 20`, recover via `veogent edit-character`.
131
178
  8. Generate scene images:
132
179
  - `veogent request -t "GENERATE_IMAGES" ...`
133
180
  - If image is reported wrong, decode image to Base64 and send to AI reviewer for evaluation.
@@ -140,6 +187,9 @@ Veogent CLI ships with a comprehensive **[`skills/SKILL.md`](./skills/SKILL.md)*
140
187
  > 📖 For the full detailed guide with all commands, options tables, and examples, see **[`skills/SKILL.md`](./skills/SKILL.md)**.
141
188
 
142
189
  **Important:** `veogent requests` is the primary status board for image/video/edit workflows.
190
+ - Use `-n <N>` to get only the N most recent requests.
191
+ - Use `-s FAILED` / `-s COMPLETED` to filter by status.
192
+ - Use `--require-success` on `wait-images` / `wait-videos` to ensure assets actually exist (not just "finished").
143
193
 
144
194
  **Concurrency:** maximum **5** requests can be processed simultaneously. If the API reports maximum limit reached, treat it as queue-full (wait/retry), not a hard failure.
145
195
 
package/index.js CHANGED
@@ -6,7 +6,7 @@ import { setConfig, clearConfig, getToken } from './config.js';
6
6
 
7
7
  const program = new Command();
8
8
 
9
- const IMAGE_MODELS = ['imagen3.5', 'imagen4'];
9
+ const IMAGE_MODELS = ['imagen3.5'];
10
10
  const VIDEO_MODELS = ['veo_3_1_fast', 'veo_3_1_fast_r2v'];
11
11
 
12
12
  function globalOpts() {
@@ -696,7 +696,7 @@ program
696
696
  .requiredOption('-c, --chapter <chapter>', 'Chapter ID')
697
697
  .requiredOption('-s, --scene <scene>', 'Scene ID')
698
698
  .option('-o, --orientation <orientation>', 'Request orientation (HORIZONTAL, VERTICAL)')
699
- .option('-i, --imagemodel <imagemodel>', 'Image Model (imagen4, imagen3.5)', 'imagen4')
699
+ .option('-i, --imagemodel <imagemodel>', 'Image Model (imagen3.5)', 'imagen3.5')
700
700
  .option('-v, --videomodel <videomodel>', 'Video Model (veo_3_1_fast, veo_3_1_fast_r2v). Default: veo_3_1_fast', 'veo_3_1_fast')
701
701
  .option('-S, --speed <speed>', 'Video Speed (normal, timelapse, slowmotion)', 'normal')
702
702
  .option('-E, --endscene <endscene>', 'End Scene ID for continuous video generation')
@@ -744,7 +744,11 @@ program
744
744
  }
745
745
 
746
746
  const data = await api.post('/app/request', payload);
747
- const requestResult = unwrapData(data);
747
+ let requestResult = unwrapData(data);
748
+ // Ensure requestResult is an object (backend may return boolean/primitive)
749
+ if (typeof requestResult !== 'object' || requestResult === null) {
750
+ requestResult = { raw: requestResult };
751
+ }
748
752
  // P2-7: Persist endScene metadata when chained video generation
749
753
  if (options.endscene) {
750
754
  requestResult.end_scene_id = options.endscene;
@@ -758,13 +762,45 @@ program
758
762
 
759
763
  program
760
764
  .command('requests')
761
- .description('Get all generation requests/jobs status for the current user')
762
- .action(async () => {
765
+ .description('Get generation requests/jobs status for the current user')
766
+ .option('-n, --limit <n>', 'Return only the N most recent requests', null)
767
+ .option('-p, --project <projectId>', 'Filter by project ID')
768
+ .option('-c, --chapter <chapterId>', 'Filter by chapter ID')
769
+ .option('-s, --status <status>', 'Filter by status (e.g. COMPLETED, FAILED, PROCESSING)')
770
+ .action(async (options) => {
763
771
  try {
764
772
  const data = await api.get('/app/requests');
765
- console.log(JSON.stringify(unwrapData(data), null, 2));
773
+ let items = unwrapData(data);
774
+ items = Array.isArray(items) ? items : (items?.items || []);
775
+
776
+ // Filter by project
777
+ if (options.project) {
778
+ items = items.filter((r) => r?.projectId === options.project || r?.project_id === options.project);
779
+ }
780
+ // Filter by chapter
781
+ if (options.chapter) {
782
+ items = items.filter((r) => r?.chapterId === options.chapter || r?.chapter_id === options.chapter);
783
+ }
784
+ // Filter by status
785
+ if (options.status) {
786
+ const s = options.status.toUpperCase();
787
+ items = items.filter((r) => String(r?.status || '').toUpperCase() === s);
788
+ }
789
+ // Sort by createdAt desc (most recent first)
790
+ items = items.sort((a, b) => {
791
+ const ta = a?.createdAt || a?.created_at || 0;
792
+ const tb = b?.createdAt || b?.created_at || 0;
793
+ return (Number(tb) || 0) - (Number(ta) || 0);
794
+ });
795
+ // Limit to N most recent
796
+ if (options.limit !== null && options.limit !== undefined) {
797
+ const n = parseInt(options.limit, 10);
798
+ if (!isNaN(n) && n > 0) items = items.slice(0, n);
799
+ }
800
+
801
+ emitJson({ status: 'success', total: items.length, data: items });
766
802
  } catch (error) {
767
- console.log(JSON.stringify({ status: "error", ...formatCliError(error) }));
803
+ emitJson({ status: 'error', ...formatCliError(error) });
768
804
  }
769
805
  });
770
806
 
@@ -1014,6 +1050,184 @@ program
1014
1050
  }
1015
1051
  });
1016
1052
 
1053
+ // --- Standalone Generate (Image / Video from text) ---
1054
+ program
1055
+ .command('gen-image')
1056
+ .description('Generate an image from a text prompt (standalone, no project needed). Costs 3 credits. Uses Imagen 3.5.')
1057
+ .requiredOption('-p, --prompt <prompt>', 'Text prompt to generate image from (10-2000 chars)')
1058
+ .option('-n, --negative <negative>', 'Negative prompt — things to avoid')
1059
+ .option('-o, --orientation <orientation>', 'Orientation: HORIZONTAL or VERTICAL', 'HORIZONTAL')
1060
+ .option('-w, --wait', 'Wait for completion (poll until done)', false)
1061
+ .option('-i, --interval <sec>', 'Polling interval in seconds (with --wait)', '5')
1062
+ .option('-t, --timeout <sec>', 'Timeout in seconds (with --wait)', '300')
1063
+ .action(async (options) => {
1064
+ try {
1065
+ const payload = {
1066
+ prompt: options.prompt,
1067
+ orientation: options.orientation.toUpperCase(),
1068
+ };
1069
+ if (options.negative) payload.negativePrompt = options.negative;
1070
+
1071
+ humanLog('🎨 Submitting standalone image generation request...');
1072
+ const data = await api.post('/app/standalone/generate-image', payload);
1073
+ const result = unwrapData(data);
1074
+ const requestId = result?.requestId || result?.request_id;
1075
+
1076
+ if (!requestId) {
1077
+ emitJson({ status: 'error', message: 'No requestId returned', data: result });
1078
+ return;
1079
+ }
1080
+
1081
+ humanLog(`✅ Request created: ${requestId}`);
1082
+
1083
+ if (!options.wait) {
1084
+ emitJson({ status: 'success', requestId, message: 'Request submitted. Use --wait to poll for completion, or check with: veogent standalone-request ' + requestId });
1085
+ return;
1086
+ }
1087
+
1088
+ // Poll for completion
1089
+ const intervalMs = Math.max(1, Number(options.interval || 5)) * 1000;
1090
+ const timeoutMs = Math.max(10, Number(options.timeout || 300)) * 1000;
1091
+ const startedAt = Date.now();
1092
+
1093
+ while (Date.now() - startedAt < timeoutMs) {
1094
+ await new Promise((resolve) => setTimeout(resolve, intervalMs));
1095
+ try {
1096
+ const statusData = await api.get(`/app/standalone/request/${requestId}`);
1097
+ const req = unwrapData(statusData);
1098
+ const status = String(req?.status || '').toUpperCase();
1099
+ humanLog(`⏳ Status: ${status}`);
1100
+
1101
+ if (status === 'COMPLETED') {
1102
+ emitJson({ status: 'success', requestId, imageUri: req?.outputData?.imageUri || null, data: req });
1103
+ return;
1104
+ }
1105
+ if (status === 'FAILED') {
1106
+ emitJson({ status: 'error', requestId, message: req?.message || 'Generation failed', data: req });
1107
+ process.exit(1);
1108
+ }
1109
+ } catch (pollErr) {
1110
+ humanError(`⚠️ Poll error: ${pollErr.message}`);
1111
+ }
1112
+ }
1113
+
1114
+ emitJson({ status: 'error', code: 'WAIT_TIMEOUT', requestId, message: `Timeout after ${options.timeout}s` });
1115
+ process.exit(1);
1116
+ } catch (error) {
1117
+ emitJson({ status: 'error', ...formatCliError(error) });
1118
+ process.exit(1);
1119
+ }
1120
+ });
1121
+
1122
+ program
1123
+ .command('gen-video')
1124
+ .description('Generate a video from a text prompt (standalone, no project needed). Costs 5 credits. Uses Veo 3.1 Fast.')
1125
+ .requiredOption('-p, --prompt <prompt>', 'Text prompt to generate video from (10-2000 chars)')
1126
+ .option('-n, --negative <negative>', 'Negative prompt — things to avoid')
1127
+ .option('-o, --orientation <orientation>', 'Orientation: HORIZONTAL or VERTICAL', 'HORIZONTAL')
1128
+ .option('-r, --reference <imageUri>', 'Reference image URI for image-to-video generation')
1129
+ .option('-w, --wait', 'Wait for completion (poll until done)', false)
1130
+ .option('-i, --interval <sec>', 'Polling interval in seconds (with --wait)', '10')
1131
+ .option('-t, --timeout <sec>', 'Timeout in seconds (with --wait)', '600')
1132
+ .action(async (options) => {
1133
+ try {
1134
+ const payload = {
1135
+ prompt: options.prompt,
1136
+ orientation: options.orientation.toUpperCase(),
1137
+ };
1138
+ if (options.negative) payload.negativePrompt = options.negative;
1139
+ if (options.reference) payload.referenceImageUri = options.reference;
1140
+
1141
+ humanLog('🎬 Submitting standalone video generation request...');
1142
+ const data = await api.post('/app/standalone/generate-video', payload);
1143
+ const result = unwrapData(data);
1144
+ const requestId = result?.requestId || result?.request_id;
1145
+
1146
+ if (!requestId) {
1147
+ emitJson({ status: 'error', message: 'No requestId returned', data: result });
1148
+ return;
1149
+ }
1150
+
1151
+ humanLog(`✅ Request created: ${requestId}`);
1152
+
1153
+ if (!options.wait) {
1154
+ emitJson({ status: 'success', requestId, message: 'Request submitted. Use --wait to poll for completion, or check with: veogent standalone-request ' + requestId });
1155
+ return;
1156
+ }
1157
+
1158
+ // Poll for completion
1159
+ const intervalMs = Math.max(1, Number(options.interval || 10)) * 1000;
1160
+ const timeoutMs = Math.max(30, Number(options.timeout || 600)) * 1000;
1161
+ const startedAt = Date.now();
1162
+
1163
+ while (Date.now() - startedAt < timeoutMs) {
1164
+ await new Promise((resolve) => setTimeout(resolve, intervalMs));
1165
+ try {
1166
+ const statusData = await api.get(`/app/standalone/request/${requestId}`);
1167
+ const req = unwrapData(statusData);
1168
+ const status = String(req?.status || '').toUpperCase();
1169
+ humanLog(`⏳ Status: ${status}`);
1170
+
1171
+ if (status === 'COMPLETED') {
1172
+ emitJson({ status: 'success', requestId, videoUri: req?.outputData?.videoUri || null, data: req });
1173
+ return;
1174
+ }
1175
+ if (status === 'FAILED') {
1176
+ emitJson({ status: 'error', requestId, message: req?.message || 'Generation failed', data: req });
1177
+ process.exit(1);
1178
+ }
1179
+ } catch (pollErr) {
1180
+ humanError(`⚠️ Poll error: ${pollErr.message}`);
1181
+ }
1182
+ }
1183
+
1184
+ emitJson({ status: 'error', code: 'WAIT_TIMEOUT', requestId, message: `Timeout after ${options.timeout}s` });
1185
+ process.exit(1);
1186
+ } catch (error) {
1187
+ emitJson({ status: 'error', ...formatCliError(error) });
1188
+ process.exit(1);
1189
+ }
1190
+ });
1191
+
1192
+ program
1193
+ .command('standalone-requests')
1194
+ .description('List your standalone image/video generation requests')
1195
+ .option('-n, --limit <n>', 'Return only the N most recent', null)
1196
+ .option('-s, --status <status>', 'Filter by status (PENDING, PROCESSING, COMPLETED, FAILED)')
1197
+ .action(async (options) => {
1198
+ try {
1199
+ const data = await api.get('/app/standalone/requests');
1200
+ let items = unwrapData(data);
1201
+ items = Array.isArray(items) ? items : (items?.items || []);
1202
+
1203
+ if (options.status) {
1204
+ const s = options.status.toUpperCase();
1205
+ items = items.filter((r) => String(r?.status || '').toUpperCase() === s);
1206
+ }
1207
+
1208
+ if (options.limit !== null && options.limit !== undefined) {
1209
+ const n = parseInt(options.limit, 10);
1210
+ if (!isNaN(n) && n > 0) items = items.slice(0, n);
1211
+ }
1212
+
1213
+ emitJson({ status: 'success', total: items.length, data: items });
1214
+ } catch (error) {
1215
+ emitJson({ status: 'error', ...formatCliError(error) });
1216
+ }
1217
+ });
1218
+
1219
+ program
1220
+ .command('standalone-request <requestId>')
1221
+ .description('Get details of a specific standalone request')
1222
+ .action(async (requestId) => {
1223
+ try {
1224
+ const data = await api.get(`/app/standalone/request/${requestId}`);
1225
+ emitJson({ status: 'success', data: unwrapData(data) });
1226
+ } catch (error) {
1227
+ emitJson({ status: 'error', ...formatCliError(error) });
1228
+ }
1229
+ });
1230
+
1017
1231
  // --- System ---
1018
1232
  program
1019
1233
  .command('skill')
@@ -1070,4 +1284,70 @@ program
1070
1284
  }
1071
1285
  });
1072
1286
 
1287
+ // Flow Credits — fetch plan and credit info from Google AI Sandbox using flow key
1288
+ program
1289
+ .command('flow-credits')
1290
+ .description('Fetch plan and credit info from Google AI Sandbox using your Flow key (Bearer token)')
1291
+ .option('-f, --flowkey <flowkey>', 'Flow key (ya29. token). If omitted, uses stored flow key from account.')
1292
+ .action(async (options) => {
1293
+ try {
1294
+ // Determine which flow key to use
1295
+ let flowKey = options.flowkey;
1296
+
1297
+ if (!flowKey) {
1298
+ // Try to get from stored account
1299
+ const token = getToken();
1300
+ if (!token) {
1301
+ emitJson({ status: 'error', code: 'NO_TOKEN', message: 'Not logged in. Run: veogent login' });
1302
+ process.exit(1);
1303
+ }
1304
+ const accountData = unwrapData(await api.get('/app/flow-key'));
1305
+ flowKey = accountData?.flowKey;
1306
+ if (!flowKey) {
1307
+ emitJson({ status: 'error', code: 'NO_FLOW_KEY', message: 'No flow key found. Set one with: veogent setup-flow -f <token>' });
1308
+ process.exit(1);
1309
+ }
1310
+ }
1311
+
1312
+ const response = await fetch('https://aisandbox-pa.googleapis.com/v1/credits', {
1313
+ method: 'GET',
1314
+ headers: {
1315
+ 'accept': '*/*',
1316
+ 'accept-language': 'en-US,en;q=0.9',
1317
+ 'authorization': `Bearer ${flowKey}`,
1318
+ 'content-type': 'application/json',
1319
+ 'origin': 'https://veogent.com',
1320
+ 'referer': 'https://veogent.com/',
1321
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/145.0.0.0 Safari/537.36',
1322
+ 'x-browser-channel': 'stable',
1323
+ 'x-browser-year': '2026',
1324
+ },
1325
+ });
1326
+
1327
+ if (!response.ok) {
1328
+ const errText = await response.text();
1329
+ emitJson({ status: 'error', code: `HTTP_${response.status}`, message: errText || response.statusText });
1330
+ process.exit(1);
1331
+ }
1332
+
1333
+ const data = await response.json();
1334
+
1335
+ // Normalize output
1336
+ const result = {
1337
+ status: 'success',
1338
+ plan: data?.plan || data?.tier || data?.subscriptionTier || null,
1339
+ credits: data?.credits ?? data?.remainingCredits ?? data?.balance ?? null,
1340
+ totalCredits: data?.totalCredits ?? data?.maxCredits ?? null,
1341
+ usedCredits: data?.usedCredits ?? null,
1342
+ resetAt: data?.resetAt ?? data?.renewalDate ?? null,
1343
+ raw: data,
1344
+ };
1345
+
1346
+ emitJson(result);
1347
+ } catch (error) {
1348
+ emitJson({ status: 'error', ...formatCliError(error) });
1349
+ process.exit(1);
1350
+ }
1351
+ });
1352
+
1073
1353
  program.parse(process.argv);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "veogent",
3
- "version": "1.0.22",
3
+ "version": "1.1.0",
4
4
  "description": "The official CLI to interact with the VEOGENT API - AI Video and Image generation platform",
5
5
  "main": "index.js",
6
6
  "bin": {