@threaded/ai 1.0.12 → 1.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -232,6 +232,7 @@ var generateOpenAICompatible = async (endpoint, modelName, prompt, apiKey, confi
232
232
  if (config?.outputFormat) body.output_format = config.outputFormat;
233
233
  if (config?.outputCompression != null) body.output_compression = config.outputCompression;
234
234
  if (config?.background) body.background = config.background;
235
+ if (config?.moderation) body.moderation = config.moderation;
235
236
  }
236
237
  const response = await fetch(endpoint, {
237
238
  method: "POST",
@@ -263,6 +264,9 @@ var generateGoogle = async (modelName, prompt, apiKey, config) => {
263
264
  if (config?.aspectRatio) {
264
265
  body.generationConfig.aspectRatio = config.aspectRatio;
265
266
  }
267
+ if (config?.imageSize) {
268
+ body.generationConfig.imageSize = config.imageSize;
269
+ }
266
270
  const response = await fetch(endpoint, {
267
271
  method: "POST",
268
272
  headers: {
@@ -315,6 +319,100 @@ var generateImage = async (model2, prompt, config) => {
315
319
  }
316
320
  };
317
321
 
322
+ // src/image-model-schema.ts
323
+ var IMAGE_MODEL_SCHEMA = {
324
+ openai: {
325
+ "dall-e-3": {
326
+ size: {
327
+ values: ["1024x1024", "1024x1792", "1792x1024"],
328
+ default: "1024x1024",
329
+ description: "Image dimensions"
330
+ },
331
+ quality: {
332
+ values: ["standard", "hd"],
333
+ default: "standard",
334
+ description: "Image quality level"
335
+ },
336
+ style: {
337
+ values: ["vivid", "natural"],
338
+ default: "vivid",
339
+ description: "Image style"
340
+ }
341
+ },
342
+ "gpt-image-1.5": {
343
+ size: {
344
+ values: ["1024x1024", "1536x1024", "1024x1536", "auto"],
345
+ default: "auto",
346
+ description: "Image dimensions"
347
+ },
348
+ quality: {
349
+ values: ["low", "medium", "high", "auto"],
350
+ default: "auto",
351
+ description: "Image quality level"
352
+ },
353
+ background: {
354
+ values: ["transparent", "opaque", "auto"],
355
+ default: "auto",
356
+ description: "Background type"
357
+ },
358
+ moderation: {
359
+ values: ["auto", "low"],
360
+ default: "auto",
361
+ description: "Content moderation level"
362
+ }
363
+ }
364
+ },
365
+ google: {
366
+ "gemini-2.5-flash-image": {
367
+ aspectRatio: {
368
+ values: ["1:1", "3:4", "4:3", "9:16", "16:9"],
369
+ default: "1:1",
370
+ description: "Image aspect ratio"
371
+ }
372
+ },
373
+ "gemini-3-pro-image-preview": {
374
+ aspectRatio: {
375
+ values: ["1:1", "3:4", "4:3", "9:16", "16:9"],
376
+ default: "1:1",
377
+ description: "Image aspect ratio"
378
+ },
379
+ imageSize: {
380
+ values: ["1K", "2K"],
381
+ default: "1K",
382
+ description: "Output image size"
383
+ }
384
+ },
385
+ "nano-banana-pro-preview": {
386
+ aspectRatio: {
387
+ values: ["1:1", "3:4", "4:3", "9:16", "16:9"],
388
+ default: "1:1",
389
+ description: "Image aspect ratio"
390
+ }
391
+ }
392
+ },
393
+ xai: {
394
+ "grok-2-image-1212": {
395
+ size: {
396
+ values: ["1024x1024"],
397
+ default: "1024x1024",
398
+ description: "Image dimensions"
399
+ }
400
+ }
401
+ }
402
+ };
403
+ function getModelConfig(provider, model2) {
404
+ return IMAGE_MODEL_SCHEMA[provider]?.[model2] || null;
405
+ }
406
+ function getDefaultConfig(provider, model2) {
407
+ const schema = getModelConfig(provider, model2);
408
+ if (!schema) return {};
409
+ const defaults = {};
410
+ for (const [key, option] of Object.entries(schema)) {
411
+ defaults[key] = option.default;
412
+ }
413
+ return defaults;
414
+ }
415
+
318
416
  // src/providers/openai.ts
319
417
  var getApiKey2 = (configApiKey) => {
320
418
  if (configApiKey) return configApiKey;
@@ -691,23 +789,27 @@ var callGoogle = async (config, ctx) => {
691
789
  for (let i = 0; i < ctx.history.length; i++) {
692
790
  const msg2 = ctx.history[i];
693
791
  if (msg2.role === "assistant") {
694
- const parts = [];
792
+ const parts2 = [];
695
793
  if (msg2.content) {
696
- parts.push({ text: msg2.content });
794
+ parts2.push({ text: msg2.content });
697
795
  }
698
796
  if (msg2.tool_calls?.length) {
699
797
  for (const tc of msg2.tool_calls) {
700
798
  toolCallMap.set(tc.id, tc.function.name);
701
- parts.push({
799
+ const part = {
702
800
  functionCall: {
703
801
  name: tc.function.name,
704
802
  args: JSON.parse(tc.function.arguments)
705
803
  }
706
- });
804
+ };
805
+ if (tc.thoughtSignature) {
806
+ part.thoughtSignature = tc.thoughtSignature;
807
+ }
808
+ parts2.push(part);
707
809
  }
708
810
  }
709
- if (parts.length > 0) {
710
- contents.push({ role: "model", parts });
811
+ if (parts2.length > 0) {
812
+ contents.push({ role: "model", parts: parts2 });
711
813
  }
712
814
  } else if (msg2.role === "tool") {
713
815
  const responseParts = [];
@@ -776,22 +878,33 @@ var callGoogle = async (config, ctx) => {
776
878
  }
777
879
  const data = await response.json();
778
880
  const candidate = data.candidates[0];
779
- const part = candidate.content.parts[0];
881
+ const parts = candidate.content.parts || [];
780
882
  const msg = {
781
883
  role: "assistant",
782
- content: part.text || ""
884
+ content: ""
783
885
  };
784
- if (part.functionCall) {
785
- msg.tool_calls = [
786
- {
886
+ const toolCalls = [];
887
+ for (const part of parts) {
888
+ if (part.text) {
889
+ msg.content += part.text;
890
+ }
891
+ if (part.functionCall) {
892
+ const tc = {
787
893
  id: Math.random().toString(36).substring(2, 9),
788
894
  type: "function",
789
895
  function: {
790
896
  name: part.functionCall.name,
791
897
  arguments: JSON.stringify(part.functionCall.args)
792
898
  }
899
+ };
900
+ if (part.thoughtSignature) {
901
+ tc.thoughtSignature = part.thoughtSignature;
793
902
  }
794
- ];
903
+ toolCalls.push(tc);
904
+ }
905
+ }
906
+ if (toolCalls.length > 0) {
907
+ msg.tool_calls = toolCalls;
795
908
  }
796
909
  return {
797
910
  ...ctx,
@@ -831,14 +944,18 @@ var handleGoogleStream = async (response, ctx) => {
831
944
  }
832
945
  }
833
946
  if (part?.functionCall) {
834
- toolCalls.push({
947
+ const tc = {
835
948
  id: Math.random().toString(36).substring(2, 9),
836
949
  type: "function",
837
950
  function: {
838
951
  name: part.functionCall.name,
839
952
  arguments: JSON.stringify(part.functionCall.args)
840
953
  }
841
- });
954
+ };
955
+ if (part.thoughtSignature) {
956
+ tc.thoughtSignature = part.thoughtSignature;
957
+ }
958
+ toolCalls.push(tc);
842
959
  }
843
960
  }
844
961
  } catch (e) {
@@ -1623,6 +1740,7 @@ var rateLimited = (config) => (fn) => {
1623
1740
  });
1624
1741
  };
1625
1742
  export {
1743
+ IMAGE_MODEL_SCHEMA,
1626
1744
  Inherit,
1627
1745
  appendToLastRequest,
1628
1746
  compose,
@@ -1635,7 +1753,9 @@ export {
1635
1753
  everyNTokens,
1636
1754
  generateApprovalToken,
1637
1755
  generateImage,
1756
+ getDefaultConfig,
1638
1757
  getKey,
1758
+ getModelConfig,
1639
1759
  getOrCreateThread,
1640
1760
  isStandardSchema,
1641
1761
  maxCalls,