oci-generativeaiinference 2.114.1 → 2.116.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/model/assistant-message.d.ts +4 -0
- package/lib/model/assistant-message.js.map +1 -1
- package/lib/model/chat-choice.d.ts +2 -1
- package/lib/model/chat-choice.js +4 -2
- package/lib/model/chat-choice.js.map +1 -1
- package/lib/model/cohere-chat-request.d.ts +2 -1
- package/lib/model/cohere-chat-request.js.map +1 -1
- package/lib/model/developer-message.d.ts +35 -0
- package/lib/model/developer-message.js +57 -0
- package/lib/model/developer-message.js.map +1 -0
- package/lib/model/function-call.d.ts +1 -1
- package/lib/model/generic-chat-request.d.ts +41 -2
- package/lib/model/generic-chat-request.js +29 -0
- package/lib/model/generic-chat-request.js.map +1 -1
- package/lib/model/generic-chat-response.d.ts +1 -0
- package/lib/model/generic-chat-response.js +4 -2
- package/lib/model/generic-chat-response.js.map +1 -1
- package/lib/model/index.d.ts +16 -0
- package/lib/model/index.js +18 -2
- package/lib/model/index.js.map +1 -1
- package/lib/model/json-object-response-format.d.ts +31 -0
- package/lib/model/json-object-response-format.js +61 -0
- package/lib/model/json-object-response-format.js.map +1 -0
- package/lib/model/json-schema-response-format.d.ts +32 -0
- package/lib/model/json-schema-response-format.js +69 -0
- package/lib/model/json-schema-response-format.js.map +1 -0
- package/lib/model/message.js +4 -0
- package/lib/model/message.js.map +1 -1
- package/lib/model/prediction.d.ts +30 -0
- package/lib/model/prediction.js +75 -0
- package/lib/model/prediction.js.map +1 -0
- package/lib/model/response-format.d.ts +29 -0
- package/lib/model/response-format.js +83 -0
- package/lib/model/response-format.js.map +1 -0
- package/lib/model/response-json-schema.d.ts +45 -0
- package/lib/model/response-json-schema.js +36 -0
- package/lib/model/response-json-schema.js.map +1 -0
- package/lib/model/static-content.d.ts +36 -0
- package/lib/model/static-content.js +69 -0
- package/lib/model/static-content.js.map +1 -0
- package/lib/model/text-response-format.d.ts +31 -0
- package/lib/model/text-response-format.js +59 -0
- package/lib/model/text-response-format.js.map +1 -0
- package/lib/model/tool-choice-function.d.ts +1 -1
- package/lib/model/tool-message.d.ts +1 -1
- package/package.json +3 -3
|
@@ -30,6 +30,10 @@ export interface AssistantMessage extends model.Message {
|
|
|
30
30
|
* An optional name for the participant. Provides the model information to differentiate between participants of the same role.
|
|
31
31
|
*/
|
|
32
32
|
"name"?: string;
|
|
33
|
+
/**
|
|
34
|
+
* The refusal message by the assistant.
|
|
35
|
+
*/
|
|
36
|
+
"refusal"?: string;
|
|
33
37
|
/**
|
|
34
38
|
* The tool calls generated by the model, such as function calls.
|
|
35
39
|
*/
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"assistant-message.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/assistant-message.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;
|
|
1
|
+
{"version":3,"file":"assistant-message.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/assistant-message.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AA2BlC,IAAiB,gBAAgB,CA8BhC;AA9BD,WAAiB,gBAAgB;IAC/B,SAAgB,UAAU,CAAC,GAAqB,EAAE,eAAyB;QACzE,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,OAAO,CAAC,UAAU,CAAC,GAAG,CAAsB,CAAC,GAC7E;YACD,WAAW,EAAE,GAAG,CAAC,SAAS;gBACxB,CAAC,CAAC,GAAG,CAAC,SAAS,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACvB,OAAO,KAAK,CAAC,QAAQ,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBACzC,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAbe,2BAAU,aAazB,CAAA;IACY,qBAAI,GAAG,WAAW,CAAC;IAChC,SAAgB,sBAAsB,CAAC,GAAqB,EAAE,eAAyB;QACrF,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,OAAO,CAAC,sBAAsB,CAAC,GAAG,CAAsB,CAAC,GACzF;YACD,WAAW,EAAE,GAAG,CAAC,SAAS;gBACxB,CAAC,CAAC,GAAG,CAAC,SAAS,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACvB,OAAO,KAAK,CAAC,QAAQ,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBACrD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAbe,uCAAsB,yBAarC,CAAA;AACH,CAAC,EA9BgB,gBAAgB,GAAhB,wBAAgB,KAAhB,wBAAgB,QA8BhC"}
|
|
@@ -26,7 +26,7 @@ export interface ChatChoice {
|
|
|
26
26
|
* The index of the chat. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
27
27
|
*/
|
|
28
28
|
"index": number;
|
|
29
|
-
"message": model.SystemMessage | model.AssistantMessage | model.UserMessage | model.ToolMessage;
|
|
29
|
+
"message": model.SystemMessage | model.AssistantMessage | model.UserMessage | model.ToolMessage | model.DeveloperMessage;
|
|
30
30
|
/**
|
|
31
31
|
* The reason why the model stopped generating tokens.
|
|
32
32
|
* <p>
|
|
@@ -35,6 +35,7 @@ export interface ChatChoice {
|
|
|
35
35
|
*/
|
|
36
36
|
"finishReason": string;
|
|
37
37
|
"logprobs"?: model.Logprobs;
|
|
38
|
+
"usage"?: model.Usage;
|
|
38
39
|
}
|
|
39
40
|
export declare namespace ChatChoice {
|
|
40
41
|
function getJsonObj(obj: ChatChoice): object;
|
package/lib/model/chat-choice.js
CHANGED
|
@@ -45,7 +45,8 @@ var ChatChoice;
|
|
|
45
45
|
function getJsonObj(obj) {
|
|
46
46
|
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
47
47
|
"message": obj.message ? model.Message.getJsonObj(obj.message) : undefined,
|
|
48
|
-
"logprobs": obj.logprobs ? model.Logprobs.getJsonObj(obj.logprobs) : undefined
|
|
48
|
+
"logprobs": obj.logprobs ? model.Logprobs.getJsonObj(obj.logprobs) : undefined,
|
|
49
|
+
"usage": obj.usage ? model.Usage.getJsonObj(obj.usage) : undefined
|
|
49
50
|
});
|
|
50
51
|
return jsonObj;
|
|
51
52
|
}
|
|
@@ -53,7 +54,8 @@ var ChatChoice;
|
|
|
53
54
|
function getDeserializedJsonObj(obj) {
|
|
54
55
|
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
55
56
|
"message": obj.message ? model.Message.getDeserializedJsonObj(obj.message) : undefined,
|
|
56
|
-
"logprobs": obj.logprobs ? model.Logprobs.getDeserializedJsonObj(obj.logprobs) : undefined
|
|
57
|
+
"logprobs": obj.logprobs ? model.Logprobs.getDeserializedJsonObj(obj.logprobs) : undefined,
|
|
58
|
+
"usage": obj.usage ? model.Usage.getDeserializedJsonObj(obj.usage) : undefined
|
|
57
59
|
});
|
|
58
60
|
return jsonObj;
|
|
59
61
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"chat-choice.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/chat-choice.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;
|
|
1
|
+
{"version":3,"file":"chat-choice.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/chat-choice.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AA4BlC,IAAiB,UAAU,CA2B1B;AA3BD,WAAiB,UAAU;IACzB,SAAgB,UAAU,CAAC,GAAe;QACxC,MAAM,OAAO,mCACR,GAAG,GACH;YACD,SAAS,EAAE,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,UAAU,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,SAAS;YAE1E,UAAU,EAAE,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,SAAS;YAC9E,OAAO,EAAE,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,SAAS;SACnE,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAZe,qBAAU,aAYzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAe;QACpD,MAAM,OAAO,mCACR,GAAG,GACH;YACD,SAAS,EAAE,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,sBAAsB,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,SAAS;YAEtF,UAAU,EAAE,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,sBAAsB,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,SAAS;YAC1F,OAAO,EAAE,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,sBAAsB,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,SAAS;SAC/E,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAZe,iCAAsB,yBAYrC,CAAA;AACH,CAAC,EA3BgB,UAAU,GAAV,kBAAU,KAAV,kBAAU,QA2B1B"}
|
|
@@ -61,7 +61,8 @@ export interface CohereChatRequest extends model.BaseChatRequest {
|
|
|
61
61
|
"isStream"?: boolean;
|
|
62
62
|
"streamOptions"?: model.StreamOptions;
|
|
63
63
|
/**
|
|
64
|
-
* The maximum number of output tokens that the model will generate for the response.
|
|
64
|
+
* The maximum number of output tokens that the model will generate for the response. The token count of your prompt plus maxTokens must not exceed the model's context length. For on-demand inferencing, the response length is capped at 4,000 tokens for each run.
|
|
65
|
+
* Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
65
66
|
*/
|
|
66
67
|
"maxTokens"?: number;
|
|
67
68
|
/**
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"cohere-chat-request.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-chat-request.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;
|
|
1
|
+
{"version":3,"file":"cohere-chat-request.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-chat-request.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AA2IlC,IAAiB,iBAAiB,CAyFjC;AAzFD,WAAiB,iBAAiB;IAChC,IAAY,gBAGX;IAHD,WAAY,gBAAgB;QAC1B,+BAAW,CAAA;QACX,6DAAyC,CAAA;IAC3C,CAAC,EAHW,gBAAgB,GAAhB,kCAAgB,KAAhB,kCAAgB,QAG3B;IAED,IAAY,eAGX;IAHD,WAAY,eAAe;QACzB,wCAAqB,CAAA;QACrB,gCAAa,CAAA;IACf,CAAC,EAHW,eAAe,GAAf,iCAAe,KAAf,iCAAe,QAG1B;IAED,IAAY,UAIX;IAJD,WAAY,UAAU;QACpB,uCAAyB,CAAA;QACzB,+BAAiB,CAAA;QACjB,yBAAW,CAAA;IACb,CAAC,EAJW,UAAU,GAAV,4BAAU,KAAV,4BAAU,QAIrB;IAED,SAAgB,UAAU,CAAC,GAAsB,EAAE,eAAyB;QAC1E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,eAAe,CAAC,UAAU,CAAC,GAAG,CAAuB,CAAC,GACtF;YACD,aAAa,EAAE,GAAG,CAAC,WAAW;gBAC5B,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACzB,OAAO,KAAK,CAAC,aAAa,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBAC9C,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;YAEb,gBAAgB,EAAE,GAAG,CAAC,cAAc;gBAClC,CAAC,CAAC,KAAK,CAAC,oBAAoB,CAAC,UAAU,CAAC,GAAG,CAAC,cAAc,CAAC;gBAC3D,CAAC,CAAC,SAAS;YAEb,eAAe,EAAE,GAAG,CAAC,aAAa;gBAChC,CAAC,CAAC,KAAK,CAAC,aAAa,CAAC,UAAU,CAAC,GAAG,CAAC,aAAa,CAAC;gBACnD,CAAC,CAAC,SAAS;YAEb,OAAO,EAAE,GAAG,CAAC,KAAK;gBAChB,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACnB,OAAO,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBAC3C,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;YACb,aAAa,EAAE,GAAG,CAAC,WAAW;gBAC5B,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACzB,OAAO,KAAK,CAAC,gBAAgB,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBACjD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAhCe,4BAAU,aAgCzB,CAAA;IACY,2BAAS,GAAG,QAAQ,CAAC;IAClC,SAAgB,sBAAsB,CACpC,GAAsB,EACtB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,eAAe,CAAC,sBAAsB,CAAC,GAAG,CAAuB,CAAC,GAC1E;YACD,aAAa,EAAE,GAAG,CAAC,WAAW;gBAC5B,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACzB,OAAO,KAAK,CAAC,aAAa,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBAC1D,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;YAEb,gBAAgB,EAAE,GAAG,CAAC,cAAc;gBAClC,CAAC,CAAC,KAAK,CAAC,oBAAoB,CAAC,sBAAsB,CAAC,GAAG,CAAC,cAAc,CAAC;gBACvE,CAAC,CAAC,SAAS;YAEb,eAAe,EAAE,GAAG,CAAC,aAAa;gBAChC,CAAC,CAAC,KAAK,CAAC,aAAa,CAAC,sBAAsB,CAAC,GAAG,CAAC,aAAa,CAAC;gBAC/D,CAAC,CAAC,SAAS;YAEb,OAAO,EAAE,GAAG,CAAC,KAAK;gBAChB,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACnB,OAAO,KAAK,CAAC,UAAU,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBACvD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;YACb,aAAa,EAAE,GAAG,CAAC,WAAW;gBAC5B,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACzB,OAAO,KAAK,CAAC,gBAAgB,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBAC7D,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IArCe,wCAAsB,yBAqCrC,CAAA;AACH,CAAC,EAzFgB,iBAAiB,GAAjB,yBAAiB,KAAjB,yBAAiB,QAyFjC"}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
import * as model from "../model";
|
|
21
|
+
/**
|
|
22
|
+
* Developer-provided instructions that the model should follow, regardless of messages sent by the user. With o1 models and newer, developer messages replace the previous system messages.
|
|
23
|
+
*/
|
|
24
|
+
export interface DeveloperMessage extends model.Message {
|
|
25
|
+
/**
|
|
26
|
+
* An optional name for the participant. Provides the model information to differentiate between participants of the same role.
|
|
27
|
+
*/
|
|
28
|
+
"name"?: string;
|
|
29
|
+
"role": string;
|
|
30
|
+
}
|
|
31
|
+
export declare namespace DeveloperMessage {
|
|
32
|
+
function getJsonObj(obj: DeveloperMessage, isParentJsonObj?: boolean): object;
|
|
33
|
+
const role = "DEVELOPER";
|
|
34
|
+
function getDeserializedJsonObj(obj: DeveloperMessage, isParentJsonObj?: boolean): object;
|
|
35
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
22
|
+
if (k2 === undefined) k2 = k;
|
|
23
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
24
|
+
}) : (function(o, m, k, k2) {
|
|
25
|
+
if (k2 === undefined) k2 = k;
|
|
26
|
+
o[k2] = m[k];
|
|
27
|
+
}));
|
|
28
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
29
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
30
|
+
}) : function(o, v) {
|
|
31
|
+
o["default"] = v;
|
|
32
|
+
});
|
|
33
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
34
|
+
if (mod && mod.__esModule) return mod;
|
|
35
|
+
var result = {};
|
|
36
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
37
|
+
__setModuleDefault(result, mod);
|
|
38
|
+
return result;
|
|
39
|
+
};
|
|
40
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
+
exports.DeveloperMessage = void 0;
|
|
42
|
+
const model = __importStar(require("../model"));
|
|
43
|
+
var DeveloperMessage;
|
|
44
|
+
(function (DeveloperMessage) {
|
|
45
|
+
function getJsonObj(obj, isParentJsonObj) {
|
|
46
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.Message.getJsonObj(obj))), {});
|
|
47
|
+
return jsonObj;
|
|
48
|
+
}
|
|
49
|
+
DeveloperMessage.getJsonObj = getJsonObj;
|
|
50
|
+
DeveloperMessage.role = "DEVELOPER";
|
|
51
|
+
function getDeserializedJsonObj(obj, isParentJsonObj) {
|
|
52
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.Message.getDeserializedJsonObj(obj))), {});
|
|
53
|
+
return jsonObj;
|
|
54
|
+
}
|
|
55
|
+
DeveloperMessage.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
56
|
+
})(DeveloperMessage = exports.DeveloperMessage || (exports.DeveloperMessage = {}));
|
|
57
|
+
//# sourceMappingURL=developer-message.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"developer-message.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/developer-message.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAelC,IAAiB,gBAAgB,CAkBhC;AAlBD,WAAiB,gBAAgB;IAC/B,SAAgB,UAAU,CAAC,GAAqB,EAAE,eAAyB;QACzE,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,OAAO,CAAC,UAAU,CAAC,GAAG,CAAsB,CAAC,GAC7E,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAPe,2BAAU,aAOzB,CAAA;IACY,qBAAI,GAAG,WAAW,CAAC;IAChC,SAAgB,sBAAsB,CAAC,GAAqB,EAAE,eAAyB;QACrF,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,OAAO,CAAC,sBAAsB,CAAC,GAAG,CAAsB,CAAC,GACzF,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAPe,uCAAsB,yBAOrC,CAAA;AACH,CAAC,EAlBgB,gBAAgB,GAAhB,wBAAgB,KAAhB,wBAAgB,QAkBhC"}
|
|
@@ -23,7 +23,7 @@ import * as model from "../model";
|
|
|
23
23
|
*/
|
|
24
24
|
export interface FunctionCall extends model.ToolCall {
|
|
25
25
|
/**
|
|
26
|
-
* The name of the function to call.
|
|
26
|
+
* The name of the function to call. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
|
|
27
27
|
*/
|
|
28
28
|
"name"?: string;
|
|
29
29
|
/**
|
|
@@ -26,10 +26,28 @@ export interface GenericChatRequest extends model.BaseChatRequest {
|
|
|
26
26
|
* The series of messages in a chat request. Includes the previous messages in a conversation. Each message includes a role ({@code USER} or the {@code CHATBOT}) and content.
|
|
27
27
|
*/
|
|
28
28
|
"messages"?: Array<model.Message>;
|
|
29
|
+
/**
|
|
30
|
+
* Constrains effort on reasoning for reasoning models. Currently supported values are minimal, low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
|
|
31
|
+
*
|
|
32
|
+
*/
|
|
33
|
+
"reasoningEffort"?: GenericChatRequest.ReasoningEffort;
|
|
34
|
+
/**
|
|
35
|
+
* Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
36
|
+
*
|
|
37
|
+
*/
|
|
38
|
+
"verbosity"?: GenericChatRequest.Verbosity;
|
|
39
|
+
/**
|
|
40
|
+
* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.
|
|
41
|
+
* <p>
|
|
42
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.
|
|
43
|
+
*
|
|
44
|
+
*/
|
|
45
|
+
"metadata"?: any;
|
|
29
46
|
/**
|
|
30
47
|
* Whether to stream back partial progress. If set to true, as tokens become available, they are sent as data-only server-sent events.
|
|
31
48
|
*/
|
|
32
49
|
"isStream"?: boolean;
|
|
50
|
+
"streamOptions"?: model.StreamOptions;
|
|
33
51
|
/**
|
|
34
52
|
* The number of of generated texts that will be returned. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
35
53
|
*/
|
|
@@ -87,11 +105,15 @@ export interface GenericChatRequest extends model.BaseChatRequest {
|
|
|
87
105
|
*/
|
|
88
106
|
"logProbs"?: number;
|
|
89
107
|
/**
|
|
90
|
-
* The maximum number of tokens that can be generated per output sequence. The token count of your prompt plus
|
|
91
|
-
* Not setting a value for maxTokens results in the possible use of model's full context length.
|
|
108
|
+
* The maximum number of tokens that can be generated per output sequence. The token count of your prompt plus maxTokens must not exceed the model's context length. For on-demand inferencing, the response length is capped at 4,000 tokens for each run.
|
|
92
109
|
* Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
93
110
|
*/
|
|
94
111
|
"maxTokens"?: number;
|
|
112
|
+
/**
|
|
113
|
+
* An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
|
|
114
|
+
* Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
115
|
+
*/
|
|
116
|
+
"maxCompletionTokens"?: number;
|
|
95
117
|
/**
|
|
96
118
|
* Modifies the likelihood of specified tokens that appear in the completion.
|
|
97
119
|
* <p>
|
|
@@ -99,7 +121,13 @@ export interface GenericChatRequest extends model.BaseChatRequest {
|
|
|
99
121
|
*
|
|
100
122
|
*/
|
|
101
123
|
"logitBias"?: any;
|
|
124
|
+
"prediction"?: model.StaticContent;
|
|
125
|
+
"responseFormat"?: model.TextResponseFormat | model.JsonObjectResponseFormat | model.JsonSchemaResponseFormat;
|
|
102
126
|
"toolChoice"?: model.ToolChoiceFunction | model.ToolChoiceNone | model.ToolChoiceAuto | model.ToolChoiceRequired;
|
|
127
|
+
/**
|
|
128
|
+
* Whether to enable parallel function calling during tool use.
|
|
129
|
+
*/
|
|
130
|
+
"isParallelToolCalls"?: boolean;
|
|
103
131
|
/**
|
|
104
132
|
* A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.
|
|
105
133
|
*/
|
|
@@ -107,6 +135,17 @@ export interface GenericChatRequest extends model.BaseChatRequest {
|
|
|
107
135
|
"apiFormat": string;
|
|
108
136
|
}
|
|
109
137
|
export declare namespace GenericChatRequest {
|
|
138
|
+
enum ReasoningEffort {
|
|
139
|
+
Minimal = "MINIMAL",
|
|
140
|
+
Low = "LOW",
|
|
141
|
+
Medium = "MEDIUM",
|
|
142
|
+
High = "HIGH"
|
|
143
|
+
}
|
|
144
|
+
enum Verbosity {
|
|
145
|
+
Low = "LOW",
|
|
146
|
+
Medium = "MEDIUM",
|
|
147
|
+
High = "HIGH"
|
|
148
|
+
}
|
|
110
149
|
function getJsonObj(obj: GenericChatRequest, isParentJsonObj?: boolean): object;
|
|
111
150
|
const apiFormat = "GENERIC";
|
|
112
151
|
function getDeserializedJsonObj(obj: GenericChatRequest, isParentJsonObj?: boolean): object;
|
|
@@ -42,6 +42,19 @@ exports.GenericChatRequest = void 0;
|
|
|
42
42
|
const model = __importStar(require("../model"));
|
|
43
43
|
var GenericChatRequest;
|
|
44
44
|
(function (GenericChatRequest) {
|
|
45
|
+
let ReasoningEffort;
|
|
46
|
+
(function (ReasoningEffort) {
|
|
47
|
+
ReasoningEffort["Minimal"] = "MINIMAL";
|
|
48
|
+
ReasoningEffort["Low"] = "LOW";
|
|
49
|
+
ReasoningEffort["Medium"] = "MEDIUM";
|
|
50
|
+
ReasoningEffort["High"] = "HIGH";
|
|
51
|
+
})(ReasoningEffort = GenericChatRequest.ReasoningEffort || (GenericChatRequest.ReasoningEffort = {}));
|
|
52
|
+
let Verbosity;
|
|
53
|
+
(function (Verbosity) {
|
|
54
|
+
Verbosity["Low"] = "LOW";
|
|
55
|
+
Verbosity["Medium"] = "MEDIUM";
|
|
56
|
+
Verbosity["High"] = "HIGH";
|
|
57
|
+
})(Verbosity = GenericChatRequest.Verbosity || (GenericChatRequest.Verbosity = {}));
|
|
45
58
|
function getJsonObj(obj, isParentJsonObj) {
|
|
46
59
|
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.BaseChatRequest.getJsonObj(obj))), {
|
|
47
60
|
"messages": obj.messages
|
|
@@ -49,6 +62,13 @@ var GenericChatRequest;
|
|
|
49
62
|
return model.Message.getJsonObj(item);
|
|
50
63
|
})
|
|
51
64
|
: undefined,
|
|
65
|
+
"streamOptions": obj.streamOptions
|
|
66
|
+
? model.StreamOptions.getJsonObj(obj.streamOptions)
|
|
67
|
+
: undefined,
|
|
68
|
+
"prediction": obj.prediction ? model.Prediction.getJsonObj(obj.prediction) : undefined,
|
|
69
|
+
"responseFormat": obj.responseFormat
|
|
70
|
+
? model.ResponseFormat.getJsonObj(obj.responseFormat)
|
|
71
|
+
: undefined,
|
|
52
72
|
"toolChoice": obj.toolChoice ? model.ToolChoice.getJsonObj(obj.toolChoice) : undefined,
|
|
53
73
|
"tools": obj.tools
|
|
54
74
|
? obj.tools.map(item => {
|
|
@@ -69,6 +89,15 @@ var GenericChatRequest;
|
|
|
69
89
|
return model.Message.getDeserializedJsonObj(item);
|
|
70
90
|
})
|
|
71
91
|
: undefined,
|
|
92
|
+
"streamOptions": obj.streamOptions
|
|
93
|
+
? model.StreamOptions.getDeserializedJsonObj(obj.streamOptions)
|
|
94
|
+
: undefined,
|
|
95
|
+
"prediction": obj.prediction
|
|
96
|
+
? model.Prediction.getDeserializedJsonObj(obj.prediction)
|
|
97
|
+
: undefined,
|
|
98
|
+
"responseFormat": obj.responseFormat
|
|
99
|
+
? model.ResponseFormat.getDeserializedJsonObj(obj.responseFormat)
|
|
100
|
+
: undefined,
|
|
72
101
|
"toolChoice": obj.toolChoice
|
|
73
102
|
? model.ToolChoice.getDeserializedJsonObj(obj.toolChoice)
|
|
74
103
|
: undefined,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"generic-chat-request.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/generic-chat-request.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;
|
|
1
|
+
{"version":3,"file":"generic-chat-request.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/generic-chat-request.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAgIlC,IAAiB,kBAAkB,CAoFlC;AApFD,WAAiB,kBAAkB;IACjC,IAAY,eAKX;IALD,WAAY,eAAe;QACzB,sCAAmB,CAAA;QACnB,8BAAW,CAAA;QACX,oCAAiB,CAAA;QACjB,gCAAa,CAAA;IACf,CAAC,EALW,eAAe,GAAf,kCAAe,KAAf,kCAAe,QAK1B;IAED,IAAY,SAIX;IAJD,WAAY,SAAS;QACnB,wBAAW,CAAA;QACX,8BAAiB,CAAA;QACjB,0BAAa,CAAA;IACf,CAAC,EAJW,SAAS,GAAT,4BAAS,KAAT,4BAAS,QAIpB;IAED,SAAgB,UAAU,CAAC,GAAuB,EAAE,eAAyB;QAC3E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,eAAe,CAAC,UAAU,CAAC,GAAG,CAAwB,CAAC,GACvF;YACD,UAAU,EAAE,GAAG,CAAC,QAAQ;gBACtB,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACtB,OAAO,KAAK,CAAC,OAAO,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBACxC,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;YAEb,eAAe,EAAE,GAAG,CAAC,aAAa;gBAChC,CAAC,CAAC,KAAK,CAAC,aAAa,CAAC,UAAU,CAAC,GAAG,CAAC,aAAa,CAAC;gBACnD,CAAC,CAAC,SAAS;YAEb,YAAY,EAAE,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,SAAS;YACtF,gBAAgB,EAAE,GAAG,CAAC,cAAc;gBAClC,CAAC,CAAC,KAAK,CAAC,cAAc,CAAC,UAAU,CAAC,GAAG,CAAC,cAAc,CAAC;gBACrD,CAAC,CAAC,SAAS;YACb,YAAY,EAAE,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,SAAS;YAEtF,OAAO,EAAE,GAAG,CAAC,KAAK;gBAChB,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACnB,OAAO,KAAK,CAAC,cAAc,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBAC/C,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IA7Be,6BAAU,aA6BzB,CAAA;IACY,4BAAS,GAAG,SAAS,CAAC;IACnC,SAAgB,sBAAsB,CACpC,GAAuB,EACvB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,eAAe,CAAC,sBAAsB,CAAC,GAAG,CAAwB,CAAC,GAC3E;YACD,UAAU,EAAE,GAAG,CAAC,QAAQ;gBACtB,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACtB,OAAO,KAAK,CAAC,OAAO,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBACpD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;YAEb,eAAe,EAAE,GAAG,CAAC,aAAa;gBAChC,CAAC,CAAC,KAAK,CAAC,aAAa,CAAC,sBAAsB,CAAC,GAAG,CAAC,aAAa,CAAC;gBAC/D,CAAC,CAAC,SAAS;YAEb,YAAY,EAAE,GAAG,CAAC,UAAU;gBAC1B,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,sBAAsB,CAAC,GAAG,CAAC,UAAU,CAAC;gBACzD,CAAC,CAAC,SAAS;YACb,gBAAgB,EAAE,GAAG,CAAC,cAAc;gBAClC,CAAC,CAAC,KAAK,CAAC,cAAc,CAAC,sBAAsB,CAAC,GAAG,CAAC,cAAc,CAAC;gBACjE,CAAC,CAAC,SAAS;YACb,YAAY,EAAE,GAAG,CAAC,UAAU;gBAC1B,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,sBAAsB,CAAC,GAAG,CAAC,UAAU,CAAC;gBACzD,CAAC,CAAC,SAAS;YAEb,OAAO,EAAE,GAAG,CAAC,KAAK;gBAChB,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACnB,OAAO,KAAK,CAAC,cAAc,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBAC3D,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAtCe,yCAAsB,yBAsCrC,CAAA;AACH,CAAC,EApFgB,kBAAkB,GAAlB,0BAAkB,KAAlB,0BAAkB,QAoFlC"}
|
|
@@ -30,6 +30,7 @@ export interface GenericChatResponse extends model.BaseChatResponse {
|
|
|
30
30
|
* A list of generated texts. Can be more than one if n is greater than 1.
|
|
31
31
|
*/
|
|
32
32
|
"choices": Array<model.ChatChoice>;
|
|
33
|
+
"usage"?: model.Usage;
|
|
33
34
|
"apiFormat": string;
|
|
34
35
|
}
|
|
35
36
|
export declare namespace GenericChatResponse {
|
|
@@ -48,7 +48,8 @@ var GenericChatResponse;
|
|
|
48
48
|
? obj.choices.map(item => {
|
|
49
49
|
return model.ChatChoice.getJsonObj(item);
|
|
50
50
|
})
|
|
51
|
-
: undefined
|
|
51
|
+
: undefined,
|
|
52
|
+
"usage": obj.usage ? model.Usage.getJsonObj(obj.usage) : undefined
|
|
52
53
|
});
|
|
53
54
|
return jsonObj;
|
|
54
55
|
}
|
|
@@ -62,7 +63,8 @@ var GenericChatResponse;
|
|
|
62
63
|
? obj.choices.map(item => {
|
|
63
64
|
return model.ChatChoice.getDeserializedJsonObj(item);
|
|
64
65
|
})
|
|
65
|
-
: undefined
|
|
66
|
+
: undefined,
|
|
67
|
+
"usage": obj.usage ? model.Usage.getDeserializedJsonObj(obj.usage) : undefined
|
|
66
68
|
});
|
|
67
69
|
return jsonObj;
|
|
68
70
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"generic-chat-response.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/generic-chat-response.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;
|
|
1
|
+
{"version":3,"file":"generic-chat-response.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/generic-chat-response.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAoBlC,IAAiB,mBAAmB,CAqCnC;AArCD,WAAiB,mBAAmB;IAClC,SAAgB,UAAU,CAAC,GAAwB,EAAE,eAAyB;QAC5E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,gBAAgB,CAAC,UAAU,CAAC,GAAG,CAAyB,CAAC,GACzF;YACD,SAAS,EAAE,GAAG,CAAC,OAAO;gBACpB,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACrB,OAAO,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBAC3C,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;YACb,OAAO,EAAE,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,SAAS;SACnE,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAde,8BAAU,aAczB,CAAA;IACY,6BAAS,GAAG,SAAS,CAAC;IACnC,SAAgB,sBAAsB,CACpC,GAAwB,EACxB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,gBAAgB,CAAC,sBAAsB,CAAC,GAAG,CAAyB,CAAC,GAC7E;YACD,SAAS,EAAE,GAAG,CAAC,OAAO;gBACpB,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACrB,OAAO,KAAK,CAAC,UAAU,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBACvD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;YACb,OAAO,EAAE,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,sBAAsB,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,SAAS;SAC/E,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAnBe,0CAAsB,yBAmBrC,CAAA;AACH,CAAC,EArCgB,mBAAmB,GAAnB,2BAAmB,KAAnB,2BAAmB,QAqCnC"}
|
package/lib/model/index.d.ts
CHANGED
|
@@ -91,6 +91,8 @@ import * as PersonallyIdentifiableInformationConfiguration from "./personally-id
|
|
|
91
91
|
export import PersonallyIdentifiableInformationConfiguration = PersonallyIdentifiableInformationConfiguration.PersonallyIdentifiableInformationConfiguration;
|
|
92
92
|
import * as PersonallyIdentifiableInformationResult from "./personally-identifiable-information-result";
|
|
93
93
|
export import PersonallyIdentifiableInformationResult = PersonallyIdentifiableInformationResult.PersonallyIdentifiableInformationResult;
|
|
94
|
+
import * as Prediction from "./prediction";
|
|
95
|
+
export import Prediction = Prediction.Prediction;
|
|
94
96
|
import * as PromptInjectionConfiguration from "./prompt-injection-configuration";
|
|
95
97
|
export import PromptInjectionConfiguration = PromptInjectionConfiguration.PromptInjectionConfiguration;
|
|
96
98
|
import * as PromptInjectionProtectionResult from "./prompt-injection-protection-result";
|
|
@@ -101,6 +103,10 @@ import * as RerankTextDetails from "./rerank-text-details";
|
|
|
101
103
|
export import RerankTextDetails = RerankTextDetails.RerankTextDetails;
|
|
102
104
|
import * as RerankTextResult from "./rerank-text-result";
|
|
103
105
|
export import RerankTextResult = RerankTextResult.RerankTextResult;
|
|
106
|
+
import * as ResponseFormat from "./response-format";
|
|
107
|
+
export import ResponseFormat = ResponseFormat.ResponseFormat;
|
|
108
|
+
import * as ResponseJsonSchema from "./response-json-schema";
|
|
109
|
+
export import ResponseJsonSchema = ResponseJsonSchema.ResponseJsonSchema;
|
|
104
110
|
import * as SearchQuery from "./search-query";
|
|
105
111
|
export import SearchQuery = SearchQuery.SearchQuery;
|
|
106
112
|
import * as ServingMode from "./serving-mode";
|
|
@@ -145,6 +151,8 @@ import * as CohereUserMessage from "./cohere-user-message";
|
|
|
145
151
|
export import CohereUserMessage = CohereUserMessage.CohereUserMessage;
|
|
146
152
|
import * as DedicatedServingMode from "./dedicated-serving-mode";
|
|
147
153
|
export import DedicatedServingMode = DedicatedServingMode.DedicatedServingMode;
|
|
154
|
+
import * as DeveloperMessage from "./developer-message";
|
|
155
|
+
export import DeveloperMessage = DeveloperMessage.DeveloperMessage;
|
|
148
156
|
import * as FunctionCall from "./function-call";
|
|
149
157
|
export import FunctionCall = FunctionCall.FunctionCall;
|
|
150
158
|
import * as FunctionDefinition from "./function-definition";
|
|
@@ -157,16 +165,24 @@ import * as GuardrailsTextInput from "./guardrails-text-input";
|
|
|
157
165
|
export import GuardrailsTextInput = GuardrailsTextInput.GuardrailsTextInput;
|
|
158
166
|
import * as ImageContent from "./image-content";
|
|
159
167
|
export import ImageContent = ImageContent.ImageContent;
|
|
168
|
+
import * as JsonObjectResponseFormat from "./json-object-response-format";
|
|
169
|
+
export import JsonObjectResponseFormat = JsonObjectResponseFormat.JsonObjectResponseFormat;
|
|
170
|
+
import * as JsonSchemaResponseFormat from "./json-schema-response-format";
|
|
171
|
+
export import JsonSchemaResponseFormat = JsonSchemaResponseFormat.JsonSchemaResponseFormat;
|
|
160
172
|
import * as LlamaLlmInferenceRequest from "./llama-llm-inference-request";
|
|
161
173
|
export import LlamaLlmInferenceRequest = LlamaLlmInferenceRequest.LlamaLlmInferenceRequest;
|
|
162
174
|
import * as LlamaLlmInferenceResponse from "./llama-llm-inference-response";
|
|
163
175
|
export import LlamaLlmInferenceResponse = LlamaLlmInferenceResponse.LlamaLlmInferenceResponse;
|
|
164
176
|
import * as OnDemandServingMode from "./on-demand-serving-mode";
|
|
165
177
|
export import OnDemandServingMode = OnDemandServingMode.OnDemandServingMode;
|
|
178
|
+
import * as StaticContent from "./static-content";
|
|
179
|
+
export import StaticContent = StaticContent.StaticContent;
|
|
166
180
|
import * as SystemMessage from "./system-message";
|
|
167
181
|
export import SystemMessage = SystemMessage.SystemMessage;
|
|
168
182
|
import * as TextContent from "./text-content";
|
|
169
183
|
export import TextContent = TextContent.TextContent;
|
|
184
|
+
import * as TextResponseFormat from "./text-response-format";
|
|
185
|
+
export import TextResponseFormat = TextResponseFormat.TextResponseFormat;
|
|
170
186
|
import * as ToolChoiceAuto from "./tool-choice-auto";
|
|
171
187
|
export import ToolChoiceAuto = ToolChoiceAuto.ToolChoiceAuto;
|
|
172
188
|
import * as ToolChoiceFunction from "./tool-choice-function";
|
package/lib/model/index.js
CHANGED
|
@@ -38,8 +38,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
|
38
38
|
return result;
|
|
39
39
|
};
|
|
40
40
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
-
exports.
|
|
42
|
-
exports.UserMessage = exports.ToolMessage = exports.ToolChoiceRequired = exports.ToolChoiceNone = exports.ToolChoiceFunction = exports.ToolChoiceAuto = exports.TextContent = exports.SystemMessage = exports.OnDemandServingMode = exports.LlamaLlmInferenceResponse = exports.LlamaLlmInferenceRequest = exports.ImageContent = exports.GuardrailsTextInput = exports.GenericChatResponse = exports.GenericChatRequest = exports.FunctionDefinition = exports.FunctionCall = exports.DedicatedServingMode = exports.CohereUserMessage = exports.CohereToolMessage = exports.CohereSystemMessage = exports.CohereResponseTextFormat = exports.CohereResponseJsonFormat = exports.CohereLlmInferenceResponse = exports.CohereLlmInferenceRequest = exports.CohereChatResponse = exports.CohereChatRequest = exports.CohereChatBotMessage = exports.AssistantMessage = exports.Usage = exports.ToolDefinition = void 0;
|
|
41
|
+
exports.SummarizeTextResult = exports.SummarizeTextDetails = exports.StreamOptions = exports.ServingMode = exports.SearchQuery = exports.ResponseJsonSchema = exports.ResponseFormat = exports.RerankTextResult = exports.RerankTextDetails = exports.PromptTokensDetails = exports.PromptInjectionProtectionResult = exports.PromptInjectionConfiguration = exports.Prediction = exports.PersonallyIdentifiableInformationResult = exports.PersonallyIdentifiableInformationConfiguration = exports.Message = exports.Logprobs = exports.LlmInferenceResponse = exports.LlmInferenceRequest = exports.ImageUrl = exports.GuardrailsResults = exports.GuardrailsInput = exports.GuardrailConfigs = exports.GeneratedText = exports.GenerateTextResult = exports.GenerateTextDetails = exports.EmbedTextResult = exports.EmbedTextDetails = exports.DocumentRank = exports.Document = exports.ContentModerationResult = exports.ContentModerationConfiguration = exports.CompletionTokensDetails = exports.CohereToolResult = exports.CohereToolCall = exports.CohereTool = exports.CohereResponseFormat = exports.CohereParameterDefinition = exports.CohereMessage = exports.Citation = exports.Choice = exports.ChatResult = exports.ChatDetails = exports.ChatContent = exports.ChatChoice = exports.CategoryScore = exports.BaseChatResponse = exports.BaseChatRequest = exports.ApplyGuardrailsResult = exports.ApplyGuardrailsDetails = void 0;
|
|
42
|
+
exports.UserMessage = exports.ToolMessage = exports.ToolChoiceRequired = exports.ToolChoiceNone = exports.ToolChoiceFunction = exports.ToolChoiceAuto = exports.TextResponseFormat = exports.TextContent = exports.SystemMessage = exports.StaticContent = exports.OnDemandServingMode = exports.LlamaLlmInferenceResponse = exports.LlamaLlmInferenceRequest = exports.JsonSchemaResponseFormat = exports.JsonObjectResponseFormat = exports.ImageContent = exports.GuardrailsTextInput = exports.GenericChatResponse = exports.GenericChatRequest = exports.FunctionDefinition = exports.FunctionCall = exports.DeveloperMessage = exports.DedicatedServingMode = exports.CohereUserMessage = exports.CohereToolMessage = exports.CohereSystemMessage = exports.CohereResponseTextFormat = exports.CohereResponseJsonFormat = exports.CohereLlmInferenceResponse = exports.CohereLlmInferenceRequest = exports.CohereChatResponse = exports.CohereChatRequest = exports.CohereChatBotMessage = exports.AssistantMessage = exports.Usage = exports.ToolDefinition = exports.ToolChoice = exports.ToolCall = exports.TokenLikelihood = void 0;
|
|
43
43
|
const ApplyGuardrailsDetails = __importStar(require("./apply-guardrails-details"));
|
|
44
44
|
exports.ApplyGuardrailsDetails = ApplyGuardrailsDetails.ApplyGuardrailsDetails;
|
|
45
45
|
const ApplyGuardrailsResult = __importStar(require("./apply-guardrails-result"));
|
|
@@ -114,6 +114,8 @@ const PersonallyIdentifiableInformationConfiguration = __importStar(require("./p
|
|
|
114
114
|
exports.PersonallyIdentifiableInformationConfiguration = PersonallyIdentifiableInformationConfiguration.PersonallyIdentifiableInformationConfiguration;
|
|
115
115
|
const PersonallyIdentifiableInformationResult = __importStar(require("./personally-identifiable-information-result"));
|
|
116
116
|
exports.PersonallyIdentifiableInformationResult = PersonallyIdentifiableInformationResult.PersonallyIdentifiableInformationResult;
|
|
117
|
+
const Prediction = __importStar(require("./prediction"));
|
|
118
|
+
exports.Prediction = Prediction.Prediction;
|
|
117
119
|
const PromptInjectionConfiguration = __importStar(require("./prompt-injection-configuration"));
|
|
118
120
|
exports.PromptInjectionConfiguration = PromptInjectionConfiguration.PromptInjectionConfiguration;
|
|
119
121
|
const PromptInjectionProtectionResult = __importStar(require("./prompt-injection-protection-result"));
|
|
@@ -124,6 +126,10 @@ const RerankTextDetails = __importStar(require("./rerank-text-details"));
|
|
|
124
126
|
exports.RerankTextDetails = RerankTextDetails.RerankTextDetails;
|
|
125
127
|
const RerankTextResult = __importStar(require("./rerank-text-result"));
|
|
126
128
|
exports.RerankTextResult = RerankTextResult.RerankTextResult;
|
|
129
|
+
const ResponseFormat = __importStar(require("./response-format"));
|
|
130
|
+
exports.ResponseFormat = ResponseFormat.ResponseFormat;
|
|
131
|
+
const ResponseJsonSchema = __importStar(require("./response-json-schema"));
|
|
132
|
+
exports.ResponseJsonSchema = ResponseJsonSchema.ResponseJsonSchema;
|
|
127
133
|
const SearchQuery = __importStar(require("./search-query"));
|
|
128
134
|
exports.SearchQuery = SearchQuery.SearchQuery;
|
|
129
135
|
const ServingMode = __importStar(require("./serving-mode"));
|
|
@@ -168,6 +174,8 @@ const CohereUserMessage = __importStar(require("./cohere-user-message"));
|
|
|
168
174
|
exports.CohereUserMessage = CohereUserMessage.CohereUserMessage;
|
|
169
175
|
const DedicatedServingMode = __importStar(require("./dedicated-serving-mode"));
|
|
170
176
|
exports.DedicatedServingMode = DedicatedServingMode.DedicatedServingMode;
|
|
177
|
+
const DeveloperMessage = __importStar(require("./developer-message"));
|
|
178
|
+
exports.DeveloperMessage = DeveloperMessage.DeveloperMessage;
|
|
171
179
|
const FunctionCall = __importStar(require("./function-call"));
|
|
172
180
|
exports.FunctionCall = FunctionCall.FunctionCall;
|
|
173
181
|
const FunctionDefinition = __importStar(require("./function-definition"));
|
|
@@ -180,16 +188,24 @@ const GuardrailsTextInput = __importStar(require("./guardrails-text-input"));
|
|
|
180
188
|
exports.GuardrailsTextInput = GuardrailsTextInput.GuardrailsTextInput;
|
|
181
189
|
const ImageContent = __importStar(require("./image-content"));
|
|
182
190
|
exports.ImageContent = ImageContent.ImageContent;
|
|
191
|
+
const JsonObjectResponseFormat = __importStar(require("./json-object-response-format"));
|
|
192
|
+
exports.JsonObjectResponseFormat = JsonObjectResponseFormat.JsonObjectResponseFormat;
|
|
193
|
+
const JsonSchemaResponseFormat = __importStar(require("./json-schema-response-format"));
|
|
194
|
+
exports.JsonSchemaResponseFormat = JsonSchemaResponseFormat.JsonSchemaResponseFormat;
|
|
183
195
|
const LlamaLlmInferenceRequest = __importStar(require("./llama-llm-inference-request"));
|
|
184
196
|
exports.LlamaLlmInferenceRequest = LlamaLlmInferenceRequest.LlamaLlmInferenceRequest;
|
|
185
197
|
const LlamaLlmInferenceResponse = __importStar(require("./llama-llm-inference-response"));
|
|
186
198
|
exports.LlamaLlmInferenceResponse = LlamaLlmInferenceResponse.LlamaLlmInferenceResponse;
|
|
187
199
|
const OnDemandServingMode = __importStar(require("./on-demand-serving-mode"));
|
|
188
200
|
exports.OnDemandServingMode = OnDemandServingMode.OnDemandServingMode;
|
|
201
|
+
const StaticContent = __importStar(require("./static-content"));
|
|
202
|
+
exports.StaticContent = StaticContent.StaticContent;
|
|
189
203
|
const SystemMessage = __importStar(require("./system-message"));
|
|
190
204
|
exports.SystemMessage = SystemMessage.SystemMessage;
|
|
191
205
|
const TextContent = __importStar(require("./text-content"));
|
|
192
206
|
exports.TextContent = TextContent.TextContent;
|
|
207
|
+
const TextResponseFormat = __importStar(require("./text-response-format"));
|
|
208
|
+
exports.TextResponseFormat = TextResponseFormat.TextResponseFormat;
|
|
193
209
|
const ToolChoiceAuto = __importStar(require("./tool-choice-auto"));
|
|
194
210
|
exports.ToolChoiceAuto = ToolChoiceAuto.ToolChoiceAuto;
|
|
195
211
|
const ToolChoiceFunction = __importStar(require("./tool-choice-function"));
|
package/lib/model/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/index.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;;AAEH,mFAAqE;AACvD,QAAA,sBAAsB,GAAG,sBAAsB,CAAC,sBAAsB,CAAC;AACrF,iFAAmE;AACrD,QAAA,qBAAqB,GAAG,qBAAqB,CAAC,qBAAqB,CAAC;AAClF,qEAAuD;AACzC,QAAA,eAAe,GAAG,eAAe,CAAC,eAAe,CAAC;AAChE,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,iDAAmC;AACrB,QAAA,MAAM,GAAG,MAAM,CAAC,MAAM,CAAC;AACrC,qDAAuC;AACzB,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,yFAA2E;AAC7D,QAAA,yBAAyB,GAAG,yBAAyB,CAAC,yBAAyB,CAAC;AAC9F,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,mEAAqD;AACvC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,qFAAuE;AACzD,QAAA,uBAAuB,GAAG,uBAAuB,CAAC,uBAAuB,CAAC;AACxF,mGAAqF;AACvE,QAAA,8BAA8B,GAAG,8BAA8B,CAAC,8BAA8B,CAAC;AAC7G,qFAAuE;AACzD,QAAA,uBAAuB,GAAG,uBAAuB,CAAC,uBAAuB,CAAC;AACxF,qDAAuC;AACzB,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,8DAAgD;AAClC,QAAA,YAAY,GAAG,YAAY,CAAC,YAAY,CAAC;AACvD,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,qEAAuD;AACzC,QAAA,eAAe,GAAG,eAAe,CAAC,eAAe,CAAC;AAChE,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,sEAAwD;AAC1C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,oEAAsD;AACxC,QAAA,eAAe,GAAG,eAAe,CAAC,eAAe,CAAC;AAChE,wEAA0D;AAC5C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,sDAAwC;AAC1B,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,qDAAuC;AACzB,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,mDAAqC;AACvB,QAAA,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC;AACxC,oIAAsH;AACxG,QAAA,8CAA8C,GAAG,8CAA8C,CAAC,8CAA8C,CAAC;AAC7J,sHAAwG;AAC1F,QAAA,uCAAuC,GAAG,uCAAuC,CAAC,uCAAuC,CAAC;AACxI,+FAAiF;AACnE,QAAA,4BAA4B,GAAG,4BAA4B,CAAC,4BAA4B,CAAC;AACvG,sGAAwF;AAC1E,QAAA,+BAA+B,GAAG,+BAA+B,CAAC,+BAA+B,CAAC;AAChH,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,oEAAsD;AACxC,QAAA,eAAe,GAAG,eAAe,CAAC,eAAe,CAAC;AAChE,sDAAwC;AAC1B,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,kEAAoD;AACtC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,+CAAiC;AACnB,QAAA,KAAK,GAAG,KAAK,CAAC,KAAK,CAAC;AAElC,sEAAwD;AAC1C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,gFAAkE;AACpD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,0FAA4E;AAC9D,QAAA,yBAAyB,GAAG,yBAAyB,CAAC,yBAAyB,CAAC;AAC9F,4FAA8E;AAChE,QAAA,0BAA0B,GAAG,0BAA0B,CAAC,0BAA0B,CAAC;AACjG,wFAA0E;AAC5D,QAAA,wBAAwB,GAAG,wBAAwB,CAAC,wBAAwB,CAAC;AAC3F,wFAA0E;AAC5D,QAAA,wBAAwB,GAAG,wBAAwB,CAAC,wBAAwB,CAAC;AAC3F,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,8DAAgD;AAClC,QAAA,YAAY,GAAG,YAAY,CAAC,YAAY,CAAC;AACvD,0EAA4D;AAC9C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,8DAAgD;AAClC,QAAA,YAAY,GAAG,YAAY,CAAC,YAAY,CAAC;AACvD,wFAA0E;AAC5D,QAAA,wBAAwB,GAAG,wBAAwB,CAAC,wBAAwB,CAAC;AAC3F,0FAA4E;AAC9D,QAAA,yBAAyB,GAAG,yBAAyB,CAAC,yBAAyB,CAAC;AAC9F,8EAAgE;AAClD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,mEAAqD;AACvC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,mEAAqD;AACvC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC"}
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/index.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;;AAEH,mFAAqE;AACvD,QAAA,sBAAsB,GAAG,sBAAsB,CAAC,sBAAsB,CAAC;AACrF,iFAAmE;AACrD,QAAA,qBAAqB,GAAG,qBAAqB,CAAC,qBAAqB,CAAC;AAClF,qEAAuD;AACzC,QAAA,eAAe,GAAG,eAAe,CAAC,eAAe,CAAC;AAChE,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,iDAAmC;AACrB,QAAA,MAAM,GAAG,MAAM,CAAC,MAAM,CAAC;AACrC,qDAAuC;AACzB,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,yFAA2E;AAC7D,QAAA,yBAAyB,GAAG,yBAAyB,CAAC,yBAAyB,CAAC;AAC9F,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,mEAAqD;AACvC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,qFAAuE;AACzD,QAAA,uBAAuB,GAAG,uBAAuB,CAAC,uBAAuB,CAAC;AACxF,mGAAqF;AACvE,QAAA,8BAA8B,GAAG,8BAA8B,CAAC,8BAA8B,CAAC;AAC7G,qFAAuE;AACzD,QAAA,uBAAuB,GAAG,uBAAuB,CAAC,uBAAuB,CAAC;AACxF,qDAAuC;AACzB,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,8DAAgD;AAClC,QAAA,YAAY,GAAG,YAAY,CAAC,YAAY,CAAC;AACvD,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,qEAAuD;AACzC,QAAA,eAAe,GAAG,eAAe,CAAC,eAAe,CAAC;AAChE,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,sEAAwD;AAC1C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,oEAAsD;AACxC,QAAA,eAAe,GAAG,eAAe,CAAC,eAAe,CAAC;AAChE,wEAA0D;AAC5C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,sDAAwC;AAC1B,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,qDAAuC;AACzB,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,mDAAqC;AACvB,QAAA,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC;AACxC,oIAAsH;AACxG,QAAA,8CAA8C,GAAG,8CAA8C,CAAC,8CAA8C,CAAC;AAC7J,sHAAwG;AAC1F,QAAA,uCAAuC,GAAG,uCAAuC,CAAC,uCAAuC,CAAC;AACxI,yDAA2C;AAC7B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,+FAAiF;AACnE,QAAA,4BAA4B,GAAG,4BAA4B,CAAC,4BAA4B,CAAC;AACvG,sGAAwF;AAC1E,QAAA,+BAA+B,GAAG,+BAA+B,CAAC,+BAA+B,CAAC;AAChH,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,kEAAoD;AACtC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,oEAAsD;AACxC,QAAA,eAAe,GAAG,eAAe,CAAC,eAAe,CAAC;AAChE,sDAAwC;AAC1B,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,kEAAoD;AACtC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,+CAAiC;AACnB,QAAA,KAAK,GAAG,KAAK,CAAC,KAAK,CAAC;AAElC,sEAAwD;AAC1C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,gFAAkE;AACpD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,0FAA4E;AAC9D,QAAA,yBAAyB,GAAG,yBAAyB,CAAC,yBAAyB,CAAC;AAC9F,4FAA8E;AAChE,QAAA,0BAA0B,GAAG,0BAA0B,CAAC,0BAA0B,CAAC;AACjG,wFAA0E;AAC5D,QAAA,wBAAwB,GAAG,wBAAwB,CAAC,wBAAwB,CAAC;AAC3F,wFAA0E;AAC5D,QAAA,wBAAwB,GAAG,wBAAwB,CAAC,wBAAwB,CAAC;AAC3F,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,sEAAwD;AAC1C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,8DAAgD;AAClC,QAAA,YAAY,GAAG,YAAY,CAAC,YAAY,CAAC;AACvD,0EAA4D;AAC9C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,8DAAgD;AAClC,QAAA,YAAY,GAAG,YAAY,CAAC,YAAY,CAAC;AACvD,wFAA0E;AAC5D,QAAA,wBAAwB,GAAG,wBAAwB,CAAC,wBAAwB,CAAC;AAC3F,wFAA0E;AAC5D,QAAA,wBAAwB,GAAG,wBAAwB,CAAC,wBAAwB,CAAC;AAC3F,wFAA0E;AAC5D,QAAA,wBAAwB,GAAG,wBAAwB,CAAC,wBAAwB,CAAC;AAC3F,0FAA4E;AAC9D,QAAA,yBAAyB,GAAG,yBAAyB,CAAC,yBAAyB,CAAC;AAC9F,8EAAgE;AAClD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,mEAAqD;AACvC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,mEAAqD;AACvC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC"}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
import * as model from "../model";
|
|
21
|
+
/**
|
|
22
|
+
* Enables JSON mode, which ensures the message the model generates is valid JSON.
|
|
23
|
+
*/
|
|
24
|
+
export interface JsonObjectResponseFormat extends model.ResponseFormat {
|
|
25
|
+
"type": string;
|
|
26
|
+
}
|
|
27
|
+
export declare namespace JsonObjectResponseFormat {
|
|
28
|
+
function getJsonObj(obj: JsonObjectResponseFormat, isParentJsonObj?: boolean): object;
|
|
29
|
+
const type = "JSON_OBJECT";
|
|
30
|
+
function getDeserializedJsonObj(obj: JsonObjectResponseFormat, isParentJsonObj?: boolean): object;
|
|
31
|
+
}
|