seacloud-sdk 0.11.10 → 0.12.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -12167,6 +12167,13 @@ interface LlmChatCompletionsParams {
12167
12167
  * A unique identifier representing your end-user
12168
12168
  */
12169
12169
  user?: string;
12170
+ /**
12171
+ * Request timeout in milliseconds
12172
+ * Overrides the global timeout set in initSeacloud()
12173
+ * @default 30000 (from global config)
12174
+ * @example 60000 for 60 seconds timeout
12175
+ */
12176
+ timeout?: number;
12170
12177
  }
12171
12178
  /**
12172
12179
  * Usage statistics for the completion
@@ -12331,6 +12338,18 @@ interface ChatCompletionChunk {
12331
12338
  * console.log(response.choices[0].message.content);
12332
12339
  * ```
12333
12340
  *
12341
+ * @example With custom timeout
12342
+ * ```typescript
12343
+ * // Set 60 seconds timeout for this specific call
12344
+ * const response = await llmChatCompletions({
12345
+ * model: 'deepseek-v3.1',
12346
+ * messages: [
12347
+ * { role: 'user', content: 'Write a long article' }
12348
+ * ],
12349
+ * timeout: 60000 // 60 seconds
12350
+ * });
12351
+ * ```
12352
+ *
12334
12353
  * @param params Request parameters
12335
12354
  * @returns Chat completion response
12336
12355
  */
@@ -12401,7 +12420,7 @@ declare function llmChatCompletions(params: LlmChatCompletionsParams & {
12401
12420
  * - For tool calls (image/video generation), streaming is RECOMMENDED
12402
12421
  *
12403
12422
  * 4. AUTHENTICATION:
12404
- * - Requires 'X-Project' header (configured via initSeacloud({ xProject: 'SeaArt' }))
12423
+ * - Requires 'X-Project' header (configured via initSeacloud({ xProject: 'SeaVerse' }))
12405
12424
  * - Uses Bearer token authentication
12406
12425
  *
12407
12426
  * 5. SESSION MANAGEMENT:
@@ -12650,6 +12669,14 @@ interface AgentChatCompletionsParams {
12650
12669
  * Top-p nucleus sampling (0-1)
12651
12670
  */
12652
12671
  top_p?: number;
12672
+ /**
12673
+ * Request timeout in milliseconds
12674
+ * Overrides the global timeout set in initSeacloud()
12675
+ * @default 30000 (from global config)
12676
+ * @recommended 120000 or more for agent operations with tool calls
12677
+ * @example 120000 for 120 seconds timeout
12678
+ */
12679
+ timeout?: number;
12653
12680
  }
12654
12681
  /**
12655
12682
  * Artifact (generated file)
package/dist/index.js CHANGED
@@ -1598,10 +1598,17 @@ async function googleGeminiImage(params) {
1598
1598
  pollingOptions
1599
1599
  );
1600
1600
  const resources = [];
1601
+ let firstTextResource = null;
1601
1602
  if (result.output) {
1602
1603
  for (const item of result.output) {
1603
1604
  if (item.content) {
1604
1605
  for (const resource of item.content) {
1606
+ if (resource.type === "text") {
1607
+ if (!firstTextResource) {
1608
+ firstTextResource = resource;
1609
+ }
1610
+ continue;
1611
+ }
1605
1612
  validateResourceUrl(resource, "image");
1606
1613
  resources.push({
1607
1614
  type: resource.type || "unknown",
@@ -1614,6 +1621,10 @@ async function googleGeminiImage(params) {
1614
1621
  }
1615
1622
  }
1616
1623
  }
1624
+ if (resources.length === 0) {
1625
+ const errorResource = firstTextResource || { type: "text", url: "" };
1626
+ validateResourceUrl(errorResource, "image");
1627
+ }
1617
1628
  return resources;
1618
1629
  }
1619
1630
 
@@ -5335,8 +5346,9 @@ async function llmChatCompletions(params) {
5335
5346
  const config = client.getConfig();
5336
5347
  const url = `${config.baseUrl}/llm/chat/completions`;
5337
5348
  const token = await getApiToken(config.apiKey);
5349
+ const timeoutMs = params.timeout ?? config.timeout;
5338
5350
  const controller = new AbortController();
5339
- const timeoutId = setTimeout(() => controller.abort(), config.timeout);
5351
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
5340
5352
  try {
5341
5353
  const response = await config.fetch(url, {
5342
5354
  method: "POST",
@@ -5367,7 +5379,7 @@ async function llmChatCompletions(params) {
5367
5379
  throw error;
5368
5380
  }
5369
5381
  if (error.name === "AbortError") {
5370
- throw new SeacloudError(`Request timeout after ${config.timeout}ms`);
5382
+ throw new SeacloudError(`Request timeout after ${timeoutMs}ms`);
5371
5383
  }
5372
5384
  throw new SeacloudError(
5373
5385
  `Request failed: ${error.message}`,
@@ -5425,8 +5437,9 @@ async function agentChatCompletions(params) {
5425
5437
  stream: true
5426
5438
  // Always request SSE from API
5427
5439
  };
5440
+ const timeoutMs = params.timeout ?? config.timeout;
5428
5441
  const controller = new AbortController();
5429
- const timeoutId = setTimeout(() => controller.abort(), config.timeout);
5442
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
5430
5443
  try {
5431
5444
  const headers = {
5432
5445
  "Content-Type": "application/json",
@@ -5460,7 +5473,7 @@ async function agentChatCompletions(params) {
5460
5473
  throw error;
5461
5474
  }
5462
5475
  if (error.name === "AbortError") {
5463
- throw new SeacloudError(`Request timeout after ${config.timeout}ms`);
5476
+ throw new SeacloudError(`Request timeout after ${timeoutMs}ms`);
5464
5477
  }
5465
5478
  throw new SeacloudError(
5466
5479
  `Request failed: ${error.message}`,