seacloud-sdk 0.5.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -144,36 +144,379 @@ var SeacloudClient = class {
144
144
  return { ...this.config };
145
145
  }
146
146
  };
147
+
148
+ // src/core/global-config.ts
149
+ var globalConfig = {
150
+ client: null,
151
+ defaultPollingOptions: {
152
+ intervalMs: 3e3,
153
+ // 3秒轮询间隔
154
+ maxAttempts: 100
155
+ // 最多尝试100次 (约5分钟)
156
+ }
157
+ };
158
+ function initSeacloud(apiKey, options) {
159
+ if (!apiKey) {
160
+ throw new Error("API key is required. Please provide a valid API key.");
161
+ }
162
+ globalConfig.client = new SeacloudClient({
163
+ apiKey,
164
+ baseUrl: options?.baseUrl,
165
+ timeout: options?.timeout
166
+ });
167
+ if (options?.intervalMs !== void 0) {
168
+ globalConfig.defaultPollingOptions.intervalMs = options.intervalMs;
169
+ }
170
+ if (options?.maxAttempts !== void 0) {
171
+ globalConfig.defaultPollingOptions.maxAttempts = options.maxAttempts;
172
+ }
173
+ return globalConfig.client;
174
+ }
175
+ function getClient() {
176
+ if (!globalConfig.client) {
177
+ throw new Error(
178
+ "SeaCloud SDK is not initialized. Please call initSeacloud(apiKey) first."
179
+ );
180
+ }
181
+ return globalConfig.client;
182
+ }
183
+
184
+ // src/api/llm_chat_completions.ts
185
+ async function llmChatCompletions(params) {
186
+ const client = getClient();
187
+ const config = client.getConfig();
188
+ const url = `${config.baseUrl}/llm/chat/completions`;
189
+ const controller = new AbortController();
190
+ const timeoutId = setTimeout(() => controller.abort(), config.timeout);
191
+ try {
192
+ const response = await config.fetch(url, {
193
+ method: "POST",
194
+ headers: {
195
+ "Content-Type": "application/json",
196
+ "Authorization": `Bearer ${config.apiKey}`
197
+ },
198
+ body: JSON.stringify(params),
199
+ signal: controller.signal
200
+ });
201
+ clearTimeout(timeoutId);
202
+ if (!response.ok) {
203
+ const errorBody = await response.text();
204
+ throw new SeacloudError(
205
+ `HTTP ${response.status}: ${errorBody}`,
206
+ response.status,
207
+ errorBody
208
+ );
209
+ }
210
+ if (params.stream) {
211
+ return parseStreamingResponse(response);
212
+ }
213
+ const result = await response.json();
214
+ return result;
215
+ } catch (error) {
216
+ clearTimeout(timeoutId);
217
+ if (error instanceof SeacloudError) {
218
+ throw error;
219
+ }
220
+ if (error.name === "AbortError") {
221
+ throw new SeacloudError(`Request timeout after ${config.timeout}ms`);
222
+ }
223
+ throw new SeacloudError(
224
+ `Request failed: ${error.message}`,
225
+ void 0,
226
+ error
227
+ );
228
+ }
229
+ }
230
+ async function* parseStreamingResponse(response) {
231
+ const reader = response.body?.getReader();
232
+ if (!reader) {
233
+ throw new SeacloudError("Response body is not readable");
234
+ }
235
+ const decoder = new TextDecoder();
236
+ let buffer = "";
237
+ try {
238
+ while (true) {
239
+ const { done, value } = await reader.read();
240
+ if (done) {
241
+ break;
242
+ }
243
+ buffer += decoder.decode(value, { stream: true });
244
+ const lines = buffer.split("\n");
245
+ buffer = lines.pop() || "";
246
+ for (const line of lines) {
247
+ const trimmedLine = line.trim();
248
+ if (!trimmedLine) continue;
249
+ if (trimmedLine === "data: [DONE]") continue;
250
+ if (trimmedLine.startsWith("data: ")) {
251
+ const jsonStr = trimmedLine.slice(6);
252
+ try {
253
+ const chunk = JSON.parse(jsonStr);
254
+ yield chunk;
255
+ } catch (error) {
256
+ console.warn("Failed to parse SSE chunk:", jsonStr);
257
+ }
258
+ }
259
+ }
260
+ }
261
+ } finally {
262
+ reader.releaseLock();
263
+ }
264
+ }
265
+
266
+ // src/api/agent_chat_completions.ts
267
+ async function agentChatCompletions(params) {
268
+ const client = getClient();
269
+ const config = client.getConfig();
270
+ const url = `${config.baseUrl}/agent/api/v1/chat/completions`;
271
+ const model = params.model || "custom_openai/vertex-ai-claude-sonnet-4.5";
272
+ const userWantsStreaming = params.stream !== false;
273
+ const requestBody = {
274
+ ...params,
275
+ model,
276
+ stream: true
277
+ // Always request SSE from API
278
+ };
279
+ const controller = new AbortController();
280
+ const timeoutId = setTimeout(() => controller.abort(), config.timeout);
281
+ try {
282
+ const response = await config.fetch(url, {
283
+ method: "POST",
284
+ headers: {
285
+ "Content-Type": "application/json",
286
+ "Authorization": `Bearer ${config.apiKey}`,
287
+ "X-Project": "SeaArt"
288
+ // Required header for agent API
289
+ },
290
+ body: JSON.stringify(requestBody),
291
+ signal: controller.signal
292
+ });
293
+ clearTimeout(timeoutId);
294
+ if (!response.ok) {
295
+ const errorBody = await response.text();
296
+ throw new SeacloudError(
297
+ `HTTP ${response.status}: ${errorBody}`,
298
+ response.status,
299
+ errorBody
300
+ );
301
+ }
302
+ if (userWantsStreaming) {
303
+ return parseAgentStreamingResponse(response);
304
+ }
305
+ return await parseAgentNonStreamingResponse(response);
306
+ } catch (error) {
307
+ clearTimeout(timeoutId);
308
+ if (error instanceof SeacloudError) {
309
+ throw error;
310
+ }
311
+ if (error.name === "AbortError") {
312
+ throw new SeacloudError(`Request timeout after ${config.timeout}ms`);
313
+ }
314
+ throw new SeacloudError(
315
+ `Request failed: ${error.message}`,
316
+ void 0,
317
+ error
318
+ );
319
+ }
320
+ }
321
+ async function parseAgentNonStreamingResponse(response) {
322
+ const reader = response.body?.getReader();
323
+ if (!reader) {
324
+ throw new SeacloudError("Response body is not readable");
325
+ }
326
+ const decoder = new TextDecoder();
327
+ let buffer = "";
328
+ let fullContent = "";
329
+ let artifacts = [];
330
+ let finishReason = "";
331
+ let sessionId = "";
332
+ let msgId = "";
333
+ let lastChunk = null;
334
+ let usage;
335
+ try {
336
+ while (true) {
337
+ const { done, value } = await reader.read();
338
+ if (done) {
339
+ break;
340
+ }
341
+ buffer += decoder.decode(value, { stream: true });
342
+ const lines = buffer.split("\n");
343
+ buffer = lines.pop() || "";
344
+ for (const line of lines) {
345
+ const trimmedLine = line.trim();
346
+ if (!trimmedLine) continue;
347
+ if (trimmedLine === "data: [DONE]") continue;
348
+ if (trimmedLine === "event: heartbeat") continue;
349
+ if (trimmedLine.startsWith("data: ")) {
350
+ const data = trimmedLine.slice(6).trim();
351
+ try {
352
+ const parsed = JSON.parse(data);
353
+ lastChunk = parsed;
354
+ const delta = parsed.choices?.[0]?.delta;
355
+ if (delta?.content) {
356
+ if (typeof delta.content === "string") {
357
+ fullContent += delta.content;
358
+ } else if (Array.isArray(delta.content)) {
359
+ for (const item of delta.content) {
360
+ if (item.type === "text" && item.text) {
361
+ fullContent += item.text;
362
+ }
363
+ }
364
+ }
365
+ }
366
+ if (delta?.artifacts) {
367
+ artifacts.push(...delta.artifacts);
368
+ }
369
+ if (parsed.choices?.[0]?.finish_reason) {
370
+ finishReason = parsed.choices[0].finish_reason;
371
+ }
372
+ if (parsed.session_id) {
373
+ sessionId = parsed.session_id;
374
+ }
375
+ if (parsed.msg_id) {
376
+ msgId = parsed.msg_id;
377
+ }
378
+ if (parsed.usage) {
379
+ usage = parsed.usage;
380
+ }
381
+ } catch (e) {
382
+ console.warn("Failed to parse SSE chunk:", data.substring(0, 100));
383
+ }
384
+ }
385
+ }
386
+ }
387
+ } finally {
388
+ reader.releaseLock();
389
+ }
390
+ if (!lastChunk) {
391
+ throw new SeacloudError("No valid response chunks received");
392
+ }
393
+ return {
394
+ id: lastChunk.id,
395
+ object: "chat.completion",
396
+ created: lastChunk.created,
397
+ model: lastChunk.model,
398
+ system_fingerprint: lastChunk.system_fingerprint,
399
+ choices: [
400
+ {
401
+ index: 0,
402
+ message: {
403
+ role: "assistant",
404
+ content: fullContent
405
+ },
406
+ finish_reason: finishReason || null
407
+ }
408
+ ],
409
+ usage,
410
+ session_id: sessionId || void 0,
411
+ msg_id: msgId || void 0,
412
+ artifacts: artifacts.length > 0 ? artifacts : void 0
413
+ };
414
+ }
415
+ async function* parseAgentStreamingResponse(response) {
416
+ const reader = response.body?.getReader();
417
+ if (!reader) {
418
+ throw new SeacloudError("Response body is not readable");
419
+ }
420
+ const decoder = new TextDecoder();
421
+ let buffer = "";
422
+ try {
423
+ while (true) {
424
+ const { done, value } = await reader.read();
425
+ if (done) {
426
+ break;
427
+ }
428
+ buffer += decoder.decode(value, { stream: true });
429
+ const lines = buffer.split("\n");
430
+ buffer = lines.pop() || "";
431
+ for (const line of lines) {
432
+ const trimmedLine = line.trim();
433
+ if (!trimmedLine || trimmedLine.startsWith(":")) continue;
434
+ if (trimmedLine.startsWith("event: ")) {
435
+ const eventType = trimmedLine.slice(7).trim();
436
+ if (eventType === "heartbeat") {
437
+ continue;
438
+ }
439
+ }
440
+ if (trimmedLine.startsWith("data: ")) {
441
+ const data = trimmedLine.slice(6).trim();
442
+ if (data === "[DONE]") {
443
+ break;
444
+ }
445
+ try {
446
+ const chunk = JSON.parse(data);
447
+ yield chunk;
448
+ } catch (error) {
449
+ console.warn("Failed to parse SSE chunk:", data);
450
+ }
451
+ }
452
+ }
453
+ }
454
+ } finally {
455
+ reader.releaseLock();
456
+ }
457
+ }
458
+ function createTextMessage(role, text) {
459
+ return {
460
+ role,
461
+ content: [{ type: "text", text }]
462
+ };
463
+ }
147
464
  var __filename$1 = fileURLToPath(import.meta.url);
148
465
  dirname(__filename$1);
149
466
  function showHelp() {
150
467
  console.log(`
151
- SeaCloud CLI - Test AI models from command line
468
+ SeaCloud CLI - Test AI models, LLM, and Agent from command line
152
469
 
153
470
  Usage:
154
- seacloud <model> [options]
471
+ seacloud <command> [options]
472
+
473
+ Commands:
474
+ llm <prompt> Chat with LLM models
475
+ agent <prompt> Chat with Fast Agent (supports image/video generation)
476
+ <model> Test specific model generation
155
477
 
156
- Options:
478
+ LLM Options:
479
+ --model <name> Model name (default: seaart-mix-sonnet-4-5)
480
+ --stream Enable streaming mode
481
+ --temperature <n> Temperature 0-2 (default: 0.7)
482
+ --max-tokens <n> Max tokens to generate
483
+
484
+ Agent Options:
485
+ --agent-id <id> Agent ID (default: seagen_agent)
486
+ --model <name> Model name (default: gpt-4o)
487
+ --stream Enable streaming mode
488
+ --session-id <id> Session ID for multi-turn conversation
489
+
490
+ Model Generation Options:
157
491
  --api-key <key> API key (or set API_SERVICE_TOKEN env var)
158
- --base-url <url> Base URL (or set API_BASE_URL env var)
492
+ --base-url <url> Base URL (default: http://proxy.sg.seaverse.dev)
159
493
  --params <json> JSON parameters for the model
160
494
 
161
495
  Examples:
162
- # Test mureka song generator
163
- seacloud mureka_song_generator --params '{"lyrics":"Happy birthday","style":"pop"}'
496
+ # Chat with LLM (non-streaming)
497
+ seacloud llm "What is the capital of France?"
498
+
499
+ # Chat with LLM (streaming)
500
+ seacloud llm "Tell me a story" --stream
501
+
502
+ # Chat with LLM using specific model
503
+ seacloud llm "Hello" --model deepseek-v3.1 --temperature 1.0
164
504
 
165
- # Test minimax TTS
166
- seacloud minimax_tts --params '{"text":"Hello world","voice_id":"male-qn-qingse"}'
505
+ # Chat with Agent (can generate images)
506
+ seacloud agent "Generate an image of a sunset"
167
507
 
168
- # Test flux image generation
508
+ # Chat with Agent (streaming)
509
+ seacloud agent "Create a cat image" --stream
510
+
511
+ # Test model generation
169
512
  seacloud flux_1_1_pro --params '{"prompt":"a beautiful sunset"}'
170
513
 
171
- # Use custom API key and base URL
172
- seacloud flux_1_1_pro --api-key sa-xxx --base-url http://localhost:8080 --params '{"prompt":"test"}'
514
+ # Use custom API key
515
+ seacloud llm "Hello" --api-key sa-xxx
173
516
 
174
517
  Environment Variables:
175
- API_SERVICE_TOKEN API authentication token
176
- API_BASE_URL Base URL for API endpoints
518
+ API_SERVICE_TOKEN API authentication token (required)
519
+ SEACLOUD_BASE_URL Base URL for API endpoints
177
520
  `);
178
521
  }
179
522
  function parseArgs(args) {
@@ -277,20 +620,151 @@ async function testModel(model, options) {
277
620
  process.exit(1);
278
621
  }
279
622
  }
623
+ async function runLlm(prompt, args) {
624
+ const options = { stream: false, model: "seaart-mix-sonnet-4-5", temperature: 0.7 };
625
+ for (let i = 0; i < args.length; i++) {
626
+ const arg = args[i];
627
+ if (arg === "--model") options.model = args[++i];
628
+ else if (arg === "--stream") options.stream = true;
629
+ else if (arg === "--temperature") options.temperature = parseFloat(args[++i]);
630
+ else if (arg === "--max-tokens") options.maxTokens = parseInt(args[++i]);
631
+ else if (arg === "--api-key") options.apiKey = args[++i];
632
+ else if (arg === "--base-url") options.baseUrl = args[++i];
633
+ }
634
+ const apiKey = options.apiKey || process.env.API_SERVICE_TOKEN;
635
+ const baseUrl = options.baseUrl || process.env.SEACLOUD_BASE_URL || "http://proxy.sg.seaverse.dev";
636
+ if (!apiKey) {
637
+ console.error("Error: API key not provided. Use --api-key or set API_SERVICE_TOKEN env var");
638
+ process.exit(1);
639
+ }
640
+ initSeacloud(apiKey, { baseUrl, timeout: 12e4 });
641
+ console.log(`Model: ${options.model}`);
642
+ console.log(`Prompt: ${prompt}
643
+ `);
644
+ if (options.stream) {
645
+ const stream = await llmChatCompletions({
646
+ model: options.model,
647
+ messages: [{ role: "user", content: prompt }],
648
+ stream: true,
649
+ temperature: options.temperature,
650
+ max_tokens: options.maxTokens
651
+ });
652
+ process.stdout.write("Response: ");
653
+ for await (const chunk of stream) {
654
+ const content = chunk.choices[0]?.delta?.content;
655
+ if (typeof content === "string") process.stdout.write(content);
656
+ }
657
+ console.log("\n");
658
+ } else {
659
+ const response = await llmChatCompletions({
660
+ model: options.model,
661
+ messages: [{ role: "user", content: prompt }],
662
+ stream: false,
663
+ temperature: options.temperature,
664
+ max_tokens: options.maxTokens
665
+ });
666
+ console.log("Response:", response.choices[0].message.content);
667
+ console.log("\nUsage:", response.usage);
668
+ }
669
+ }
670
+ async function runAgent(prompt, args) {
671
+ const options = {
672
+ stream: false,
673
+ model: "gpt-4o",
674
+ agentId: "seagen_agent"
675
+ };
676
+ for (let i = 0; i < args.length; i++) {
677
+ const arg = args[i];
678
+ if (arg === "--model") options.model = args[++i];
679
+ else if (arg === "--stream") options.stream = true;
680
+ else if (arg === "--agent-id") options.agentId = args[++i];
681
+ else if (arg === "--session-id") options.sessionId = args[++i];
682
+ else if (arg === "--api-key") options.apiKey = args[++i];
683
+ else if (arg === "--base-url") options.baseUrl = args[++i];
684
+ }
685
+ const apiKey = options.apiKey || process.env.API_SERVICE_TOKEN;
686
+ const baseUrl = options.baseUrl || process.env.SEACLOUD_BASE_URL || "http://proxy.sg.seaverse.dev";
687
+ if (!apiKey) {
688
+ console.error("Error: API key not provided. Use --api-key or set API_SERVICE_TOKEN env var");
689
+ process.exit(1);
690
+ }
691
+ initSeacloud(apiKey, { baseUrl, timeout: 3e5 });
692
+ console.log(`Agent: ${options.agentId}`);
693
+ console.log(`Model: ${options.model}`);
694
+ console.log(`Prompt: ${prompt}
695
+ `);
696
+ if (options.stream) {
697
+ const stream = await agentChatCompletions({
698
+ agent_id: options.agentId,
699
+ messages: [createTextMessage("user", prompt)],
700
+ model: options.model,
701
+ stream: true,
702
+ session_id: options.sessionId,
703
+ seq: 0
704
+ });
705
+ process.stdout.write("Response: ");
706
+ for await (const chunk of stream) {
707
+ const content = chunk.choices[0]?.delta?.content;
708
+ if (typeof content === "string") process.stdout.write(content);
709
+ }
710
+ console.log("\n");
711
+ } else {
712
+ const response = await agentChatCompletions({
713
+ agent_id: options.agentId,
714
+ messages: [createTextMessage("user", prompt)],
715
+ model: options.model,
716
+ stream: false,
717
+ session_id: options.sessionId,
718
+ seq: 0
719
+ });
720
+ console.log("Response:", response.choices[0].message.content);
721
+ if (response.artifacts && response.artifacts.length > 0) {
722
+ console.log("\nGenerated Artifacts:");
723
+ response.artifacts.forEach((artifact, i) => {
724
+ console.log(` ${i + 1}. ${artifact.name}`);
725
+ console.log(` URL: ${artifact.url}`);
726
+ });
727
+ }
728
+ console.log("\nSession ID:", response.session_id);
729
+ console.log("Message ID:", response.msg_id);
730
+ }
731
+ }
280
732
  async function main() {
281
733
  const args = process.argv.slice(2);
282
734
  if (args.length === 0 || args[0] === "--help" || args[0] === "-h") {
283
735
  showHelp();
284
736
  process.exit(0);
285
737
  }
286
- const model = args[0];
287
- if (model.startsWith("--")) {
288
- console.error("Error: model name required");
289
- console.log("Usage: seacloud <model> --params <json>");
738
+ const command = args[0];
739
+ try {
740
+ if (command === "llm") {
741
+ if (args.length < 2) {
742
+ console.error("Error: prompt required for llm command");
743
+ console.log('Usage: seacloud llm "<prompt>" [options]');
744
+ process.exit(1);
745
+ }
746
+ await runLlm(args[1], args.slice(2));
747
+ } else if (command === "agent") {
748
+ if (args.length < 2) {
749
+ console.error("Error: prompt required for agent command");
750
+ console.log('Usage: seacloud agent "<prompt>" [options]');
751
+ process.exit(1);
752
+ }
753
+ await runAgent(args[1], args.slice(2));
754
+ } else {
755
+ const model = command;
756
+ if (model.startsWith("--")) {
757
+ console.error("Error: command or model name required");
758
+ console.log("Usage: seacloud <command> [options]");
759
+ process.exit(1);
760
+ }
761
+ const options = parseArgs(args.slice(1));
762
+ await testModel(model, options);
763
+ }
764
+ } catch (error) {
765
+ console.error("\nError:", error.message);
290
766
  process.exit(1);
291
767
  }
292
- const options = parseArgs(args.slice(1));
293
- await testModel(model, options);
294
768
  }
295
769
  main().catch(console.error);
296
770
  //# sourceMappingURL=cli.js.map