@wix/auto_sdk_ai-gateway_prompts 1.0.11 → 1.0.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/cjs/index.d.ts +1 -1
- package/build/cjs/index.js +130 -0
- package/build/cjs/index.js.map +1 -1
- package/build/cjs/index.typings.d.ts +271 -1
- package/build/cjs/index.typings.js +122 -0
- package/build/cjs/index.typings.js.map +1 -1
- package/build/cjs/meta.d.ts +258 -2
- package/build/cjs/meta.js +109 -0
- package/build/cjs/meta.js.map +1 -1
- package/build/es/index.d.mts +1 -1
- package/build/es/index.mjs +126 -0
- package/build/es/index.mjs.map +1 -1
- package/build/es/index.typings.d.mts +271 -1
- package/build/es/index.typings.mjs +118 -0
- package/build/es/index.typings.mjs.map +1 -1
- package/build/es/meta.d.mts +258 -2
- package/build/es/meta.mjs +105 -0
- package/build/es/meta.mjs.map +1 -1
- package/build/internal/cjs/index.d.ts +13 -5
- package/build/internal/cjs/index.js +130 -0
- package/build/internal/cjs/index.js.map +1 -1
- package/build/internal/cjs/index.typings.d.ts +279 -3
- package/build/internal/cjs/index.typings.js +122 -0
- package/build/internal/cjs/index.typings.js.map +1 -1
- package/build/internal/cjs/meta.d.ts +258 -2
- package/build/internal/cjs/meta.js +109 -0
- package/build/internal/cjs/meta.js.map +1 -1
- package/build/internal/es/index.d.mts +13 -5
- package/build/internal/es/index.mjs +126 -0
- package/build/internal/es/index.mjs.map +1 -1
- package/build/internal/es/index.typings.d.mts +279 -3
- package/build/internal/es/index.typings.mjs +118 -0
- package/build/internal/es/index.typings.mjs.map +1 -1
- package/build/internal/es/meta.d.mts +258 -2
- package/build/internal/es/meta.mjs +105 -0
- package/build/internal/es/meta.mjs.map +1 -1
- package/package.json +2 -2
package/build/cjs/meta.js
CHANGED
|
@@ -31,6 +31,7 @@ __export(meta_exports, {
|
|
|
31
31
|
CreatePredictionModelOriginal: () => CreatePredictionModel,
|
|
32
32
|
DynamicRetrievalConfigModeOriginal: () => DynamicRetrievalConfigMode,
|
|
33
33
|
EditImageWithPromptRequestModelOriginal: () => EditImageWithPromptRequestModel,
|
|
34
|
+
ElevenLabsTextToSpeechModelOriginal: () => ElevenLabsTextToSpeechModel,
|
|
34
35
|
FinishReasonOriginal: () => FinishReason,
|
|
35
36
|
GatewayMessageDefinitionRoleOriginal: () => GatewayMessageDefinitionRole,
|
|
36
37
|
GenerateAnImageModelOriginal: () => GenerateAnImageModel,
|
|
@@ -74,6 +75,7 @@ __export(meta_exports, {
|
|
|
74
75
|
ResponsesModelOriginal: () => ResponsesModel,
|
|
75
76
|
RoleOriginal: () => Role,
|
|
76
77
|
SamplerOriginal: () => Sampler,
|
|
78
|
+
SpeechModelOriginal: () => SpeechModel,
|
|
77
79
|
StylePresetOriginal: () => StylePreset,
|
|
78
80
|
TextBisonModelOriginal: () => TextBisonModel,
|
|
79
81
|
TextToImageRequestModelOriginal: () => TextToImageRequestModel,
|
|
@@ -91,9 +93,11 @@ __export(meta_exports, {
|
|
|
91
93
|
V1ResponseTypeTypeOriginal: () => V1ResponseTypeType,
|
|
92
94
|
V1ResponsesModelOriginal: () => V1ResponsesModel,
|
|
93
95
|
V1ToolChoiceTypeOriginal: () => V1ToolChoiceType,
|
|
96
|
+
V1VideoModelOriginal: () => V1VideoModel,
|
|
94
97
|
VideoGenModelOriginal: () => VideoGenModel,
|
|
95
98
|
VideoModelOriginal: () => VideoModel,
|
|
96
99
|
WebhookIdentityTypeOriginal: () => WebhookIdentityType,
|
|
100
|
+
generateAudioStreamed: () => generateAudioStreamed2,
|
|
97
101
|
generateContentByPromptObject: () => generateContentByPromptObject2,
|
|
98
102
|
generateContentByPromptObjectAsync: () => generateContentByPromptObjectAsync2,
|
|
99
103
|
generateTextByPromptObjectStreamed: () => generateTextByPromptObjectStreamed2
|
|
@@ -845,6 +849,70 @@ function generateContentByPromptObjectAsync(payload) {
|
|
|
845
849
|
}
|
|
846
850
|
return __generateContentByPromptObjectAsync;
|
|
847
851
|
}
|
|
852
|
+
function generateAudioStreamed(payload) {
|
|
853
|
+
function __generateAudioStreamed({ host }) {
|
|
854
|
+
const serializedData = (0, import_transform_paths.transformPaths)(payload, [
|
|
855
|
+
{
|
|
856
|
+
transformFn: import_float.transformSDKFloatToRESTFloat,
|
|
857
|
+
paths: [
|
|
858
|
+
{ path: "openAiCreateSpeechRequest.speed" },
|
|
859
|
+
{ path: "elevenlabsTextToSpeechRequest.voiceSettings.style" },
|
|
860
|
+
{ path: "elevenlabsTextToSpeechRequest.voiceSettings.stability" },
|
|
861
|
+
{
|
|
862
|
+
path: "elevenlabsTextToSpeechRequest.voiceSettings.similarityBoost"
|
|
863
|
+
}
|
|
864
|
+
]
|
|
865
|
+
}
|
|
866
|
+
]);
|
|
867
|
+
const metadata = {
|
|
868
|
+
entityFqdn: "wix.api_infra.v1.prompt_proxy",
|
|
869
|
+
method: "POST",
|
|
870
|
+
methodFqn: "wix.api_infra.v1.WixAiExternalGateway.GenerateAudioStreamed",
|
|
871
|
+
packageName: PACKAGE_NAME,
|
|
872
|
+
migrationOptions: {
|
|
873
|
+
optInTransformResponse: true
|
|
874
|
+
},
|
|
875
|
+
url: resolveWixApiInfraV1WixAiExternalGatewayUrl({
|
|
876
|
+
protoPath: "/v1/generate-audio-streamed",
|
|
877
|
+
data: serializedData,
|
|
878
|
+
host
|
|
879
|
+
}),
|
|
880
|
+
data: serializedData,
|
|
881
|
+
transformResponse: (payload2) => (0, import_transform_paths.transformPaths)(payload2, [
|
|
882
|
+
{
|
|
883
|
+
transformFn: import_bytes.transformRESTBytesToSDKBytes,
|
|
884
|
+
paths: [
|
|
885
|
+
{ path: "openAiSpeechChunk.content" },
|
|
886
|
+
{ path: "elevenlabsSpeechChunk.audioBase64" }
|
|
887
|
+
]
|
|
888
|
+
},
|
|
889
|
+
{
|
|
890
|
+
transformFn: import_float2.transformRESTFloatToSDKFloat,
|
|
891
|
+
paths: [
|
|
892
|
+
{
|
|
893
|
+
path: "elevenlabsSpeechChunk.alignment.characterStartTimesSeconds",
|
|
894
|
+
isRepeated: true
|
|
895
|
+
},
|
|
896
|
+
{
|
|
897
|
+
path: "elevenlabsSpeechChunk.alignment.characterEndTimesSeconds",
|
|
898
|
+
isRepeated: true
|
|
899
|
+
},
|
|
900
|
+
{
|
|
901
|
+
path: "elevenlabsSpeechChunk.normalizedAlignment.characterStartTimesSeconds",
|
|
902
|
+
isRepeated: true
|
|
903
|
+
},
|
|
904
|
+
{
|
|
905
|
+
path: "elevenlabsSpeechChunk.normalizedAlignment.characterEndTimesSeconds",
|
|
906
|
+
isRepeated: true
|
|
907
|
+
}
|
|
908
|
+
]
|
|
909
|
+
}
|
|
910
|
+
])
|
|
911
|
+
};
|
|
912
|
+
return metadata;
|
|
913
|
+
}
|
|
914
|
+
return __generateAudioStreamed;
|
|
915
|
+
}
|
|
848
916
|
|
|
849
917
|
// src/api-infra-v1-prompt-proxy-prompts.types.ts
|
|
850
918
|
var OpenaiproxyV1ChatCompletionMessageMessageRole = /* @__PURE__ */ ((OpenaiproxyV1ChatCompletionMessageMessageRole2) => {
|
|
@@ -1489,6 +1557,12 @@ var ResponsesMessageRole = /* @__PURE__ */ ((ResponsesMessageRole2) => {
|
|
|
1489
1557
|
ResponsesMessageRole2["DEVELOPER"] = "DEVELOPER";
|
|
1490
1558
|
return ResponsesMessageRole2;
|
|
1491
1559
|
})(ResponsesMessageRole || {});
|
|
1560
|
+
var V1VideoModel = /* @__PURE__ */ ((V1VideoModel2) => {
|
|
1561
|
+
V1VideoModel2["UNKNOWN_VIDEO_MODEL"] = "UNKNOWN_VIDEO_MODEL";
|
|
1562
|
+
V1VideoModel2["SORA_2"] = "SORA_2";
|
|
1563
|
+
V1VideoModel2["SORA_2_PRO"] = "SORA_2_PRO";
|
|
1564
|
+
return V1VideoModel2;
|
|
1565
|
+
})(V1VideoModel || {});
|
|
1492
1566
|
var GatewayMessageDefinitionRole = /* @__PURE__ */ ((GatewayMessageDefinitionRole2) => {
|
|
1493
1567
|
GatewayMessageDefinitionRole2["UNKNOWN"] = "UNKNOWN";
|
|
1494
1568
|
GatewayMessageDefinitionRole2["USER"] = "USER";
|
|
@@ -1498,6 +1572,19 @@ var GatewayMessageDefinitionRole = /* @__PURE__ */ ((GatewayMessageDefinitionRol
|
|
|
1498
1572
|
GatewayMessageDefinitionRole2["DEVELOPER"] = "DEVELOPER";
|
|
1499
1573
|
return GatewayMessageDefinitionRole2;
|
|
1500
1574
|
})(GatewayMessageDefinitionRole || {});
|
|
1575
|
+
var SpeechModel = /* @__PURE__ */ ((SpeechModel2) => {
|
|
1576
|
+
SpeechModel2["UNKNOWN_SPEECH_MODEL"] = "UNKNOWN_SPEECH_MODEL";
|
|
1577
|
+
SpeechModel2["TTS_1"] = "TTS_1";
|
|
1578
|
+
SpeechModel2["TTS_1_HD"] = "TTS_1_HD";
|
|
1579
|
+
return SpeechModel2;
|
|
1580
|
+
})(SpeechModel || {});
|
|
1581
|
+
var ElevenLabsTextToSpeechModel = /* @__PURE__ */ ((ElevenLabsTextToSpeechModel2) => {
|
|
1582
|
+
ElevenLabsTextToSpeechModel2["UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL"] = "UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL";
|
|
1583
|
+
ElevenLabsTextToSpeechModel2["ELEVEN_MULTILINGUAL_V2"] = "ELEVEN_MULTILINGUAL_V2";
|
|
1584
|
+
ElevenLabsTextToSpeechModel2["ELEVEN_FLASH_V2_5"] = "ELEVEN_FLASH_V2_5";
|
|
1585
|
+
ElevenLabsTextToSpeechModel2["ELEVEN_FLASH_V2"] = "ELEVEN_FLASH_V2";
|
|
1586
|
+
return ElevenLabsTextToSpeechModel2;
|
|
1587
|
+
})(ElevenLabsTextToSpeechModel || {});
|
|
1501
1588
|
var WebhookIdentityType = /* @__PURE__ */ ((WebhookIdentityType2) => {
|
|
1502
1589
|
WebhookIdentityType2["UNKNOWN"] = "UNKNOWN";
|
|
1503
1590
|
WebhookIdentityType2["ANONYMOUS_VISITOR"] = "ANONYMOUS_VISITOR";
|
|
@@ -1566,6 +1653,24 @@ function generateContentByPromptObjectAsync2() {
|
|
|
1566
1653
|
__originalResponseType: null
|
|
1567
1654
|
};
|
|
1568
1655
|
}
|
|
1656
|
+
function generateAudioStreamed2() {
|
|
1657
|
+
const payload = {};
|
|
1658
|
+
const getRequestOptions = generateAudioStreamed(payload);
|
|
1659
|
+
const getUrl = (context) => {
|
|
1660
|
+
const { url } = getRequestOptions(context);
|
|
1661
|
+
return url;
|
|
1662
|
+
};
|
|
1663
|
+
return {
|
|
1664
|
+
getUrl,
|
|
1665
|
+
httpMethod: "POST",
|
|
1666
|
+
path: "/v1/generate-audio-streamed",
|
|
1667
|
+
pathParams: {},
|
|
1668
|
+
__requestType: null,
|
|
1669
|
+
__originalRequestType: null,
|
|
1670
|
+
__responseType: null,
|
|
1671
|
+
__originalResponseType: null
|
|
1672
|
+
};
|
|
1673
|
+
}
|
|
1569
1674
|
// Annotate the CommonJS export names for ESM import in node:
|
|
1570
1675
|
0 && (module.exports = {
|
|
1571
1676
|
AnthropicModelOriginal,
|
|
@@ -1579,6 +1684,7 @@ function generateContentByPromptObjectAsync2() {
|
|
|
1579
1684
|
CreatePredictionModelOriginal,
|
|
1580
1685
|
DynamicRetrievalConfigModeOriginal,
|
|
1581
1686
|
EditImageWithPromptRequestModelOriginal,
|
|
1687
|
+
ElevenLabsTextToSpeechModelOriginal,
|
|
1582
1688
|
FinishReasonOriginal,
|
|
1583
1689
|
GatewayMessageDefinitionRoleOriginal,
|
|
1584
1690
|
GenerateAnImageModelOriginal,
|
|
@@ -1622,6 +1728,7 @@ function generateContentByPromptObjectAsync2() {
|
|
|
1622
1728
|
ResponsesModelOriginal,
|
|
1623
1729
|
RoleOriginal,
|
|
1624
1730
|
SamplerOriginal,
|
|
1731
|
+
SpeechModelOriginal,
|
|
1625
1732
|
StylePresetOriginal,
|
|
1626
1733
|
TextBisonModelOriginal,
|
|
1627
1734
|
TextToImageRequestModelOriginal,
|
|
@@ -1639,9 +1746,11 @@ function generateContentByPromptObjectAsync2() {
|
|
|
1639
1746
|
V1ResponseTypeTypeOriginal,
|
|
1640
1747
|
V1ResponsesModelOriginal,
|
|
1641
1748
|
V1ToolChoiceTypeOriginal,
|
|
1749
|
+
V1VideoModelOriginal,
|
|
1642
1750
|
VideoGenModelOriginal,
|
|
1643
1751
|
VideoModelOriginal,
|
|
1644
1752
|
WebhookIdentityTypeOriginal,
|
|
1753
|
+
generateAudioStreamed,
|
|
1645
1754
|
generateContentByPromptObject,
|
|
1646
1755
|
generateContentByPromptObjectAsync,
|
|
1647
1756
|
generateTextByPromptObjectStreamed
|