oci-generativeaiinference 2.87.0 → 2.88.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +1 -1
- package/index.js +1 -1
- package/lib/client.d.ts +5 -5
- package/lib/client.js +5 -5
- package/lib/model/assistant-message.d.ts +35 -0
- package/lib/model/assistant-message.js +57 -0
- package/lib/model/assistant-message.js.map +1 -0
- package/lib/model/base-chat-request.d.ts +2 -2
- package/lib/model/base-chat-request.js +1 -1
- package/lib/model/base-chat-response.d.ts +2 -2
- package/lib/model/base-chat-response.js +1 -1
- package/lib/model/chat-choice.d.ts +2 -2
- package/lib/model/chat-choice.js +1 -1
- package/lib/model/chat-content.d.ts +1 -1
- package/lib/model/chat-content.js +1 -1
- package/lib/model/chat-details.d.ts +3 -3
- package/lib/model/chat-details.js +1 -1
- package/lib/model/chat-result.d.ts +2 -2
- package/lib/model/chat-result.js +1 -1
- package/lib/model/choice.d.ts +2 -2
- package/lib/model/choice.js +1 -1
- package/lib/model/citation.d.ts +6 -6
- package/lib/model/citation.js +1 -1
- package/lib/model/cohere-chat-bot-message.d.ts +39 -0
- package/lib/model/cohere-chat-bot-message.js +71 -0
- package/lib/model/cohere-chat-bot-message.js.map +1 -0
- package/lib/model/cohere-chat-request.d.ts +74 -18
- package/lib/model/cohere-chat-request.js +31 -1
- package/lib/model/cohere-chat-request.js.map +1 -1
- package/lib/model/cohere-chat-response.d.ts +39 -10
- package/lib/model/cohere-chat-response.js +21 -1
- package/lib/model/cohere-chat-response.js.map +1 -1
- package/lib/model/cohere-llm-inference-request.d.ts +1 -1
- package/lib/model/cohere-llm-inference-request.js +1 -1
- package/lib/model/cohere-llm-inference-response.d.ts +1 -1
- package/lib/model/cohere-llm-inference-response.js +1 -1
- package/lib/model/cohere-message.d.ts +3 -14
- package/lib/model/cohere-message.js +52 -6
- package/lib/model/cohere-message.js.map +1 -1
- package/lib/model/cohere-parameter-definition.d.ts +40 -0
- package/lib/model/cohere-parameter-definition.js +36 -0
- package/lib/model/cohere-parameter-definition.js.map +1 -0
- package/lib/model/cohere-system-message.d.ts +35 -0
- package/lib/model/cohere-system-message.js +59 -0
- package/lib/model/cohere-system-message.js.map +1 -0
- package/lib/model/cohere-tool-call.d.ts +36 -0
- package/lib/model/cohere-tool-call.js +36 -0
- package/lib/model/cohere-tool-call.js.map +1 -0
- package/lib/model/cohere-tool-message.d.ts +35 -0
- package/lib/model/cohere-tool-message.js +71 -0
- package/lib/model/cohere-tool-message.js.map +1 -0
- package/lib/model/cohere-tool-result.d.ts +34 -0
- package/lib/model/cohere-tool-result.js +60 -0
- package/lib/model/cohere-tool-result.js.map +1 -0
- package/lib/model/cohere-tool.d.ts +43 -0
- package/lib/model/cohere-tool.js +65 -0
- package/lib/model/cohere-tool.js.map +1 -0
- package/lib/model/cohere-user-message.d.ts +35 -0
- package/lib/model/cohere-user-message.js +59 -0
- package/lib/model/cohere-user-message.js.map +1 -0
- package/lib/model/dedicated-serving-mode.d.ts +1 -1
- package/lib/model/dedicated-serving-mode.js +1 -1
- package/lib/model/embed-text-details.d.ts +3 -3
- package/lib/model/embed-text-details.js +1 -1
- package/lib/model/embed-text-result.d.ts +1 -1
- package/lib/model/embed-text-result.js +1 -1
- package/lib/model/generate-text-details.d.ts +2 -2
- package/lib/model/generate-text-details.js +1 -1
- package/lib/model/generate-text-result.d.ts +1 -1
- package/lib/model/generate-text-result.js +1 -1
- package/lib/model/generated-text.d.ts +1 -1
- package/lib/model/generated-text.js +1 -1
- package/lib/model/generic-chat-request.d.ts +12 -7
- package/lib/model/generic-chat-request.js +1 -1
- package/lib/model/generic-chat-request.js.map +1 -1
- package/lib/model/generic-chat-response.d.ts +3 -3
- package/lib/model/generic-chat-response.js +1 -1
- package/lib/model/index.d.ts +23 -1
- package/lib/model/index.js +24 -2
- package/lib/model/index.js.map +1 -1
- package/lib/model/llama-llm-inference-request.d.ts +2 -2
- package/lib/model/llama-llm-inference-request.js +1 -1
- package/lib/model/llama-llm-inference-response.d.ts +1 -1
- package/lib/model/llama-llm-inference-response.js +1 -1
- package/lib/model/llm-inference-request.d.ts +1 -1
- package/lib/model/llm-inference-request.js +1 -1
- package/lib/model/llm-inference-response.d.ts +1 -1
- package/lib/model/llm-inference-response.js +1 -1
- package/lib/model/logprobs.d.ts +6 -3
- package/lib/model/logprobs.js +1 -1
- package/lib/model/logprobs.js.map +1 -1
- package/lib/model/message.d.ts +4 -7
- package/lib/model/message.js +28 -1
- package/lib/model/message.js.map +1 -1
- package/lib/model/on-demand-serving-mode.d.ts +2 -2
- package/lib/model/on-demand-serving-mode.js +1 -1
- package/lib/model/search-query.d.ts +1 -1
- package/lib/model/search-query.js +1 -1
- package/lib/model/serving-mode.d.ts +2 -2
- package/lib/model/serving-mode.js +1 -1
- package/lib/model/summarize-text-details.d.ts +2 -2
- package/lib/model/summarize-text-details.js +1 -1
- package/lib/model/summarize-text-result.d.ts +1 -1
- package/lib/model/summarize-text-result.js +1 -1
- package/lib/model/system-message.d.ts +35 -0
- package/lib/model/system-message.js +57 -0
- package/lib/model/system-message.js.map +1 -0
- package/lib/model/text-content.d.ts +2 -2
- package/lib/model/text-content.js +1 -1
- package/lib/model/token-likelihood.d.ts +1 -1
- package/lib/model/token-likelihood.js +1 -1
- package/lib/model/user-message.d.ts +35 -0
- package/lib/model/user-message.js +57 -0
- package/lib/model/user-message.js.map +1 -0
- package/lib/request/chat-request.d.ts +3 -4
- package/lib/request/embed-text-request.d.ts +3 -4
- package/lib/request/generate-text-request.d.ts +3 -4
- package/lib/request/index.d.ts +1 -1
- package/lib/request/index.js +1 -1
- package/lib/request/summarize-text-request.d.ts +3 -4
- package/lib/response/index.d.ts +1 -1
- package/lib/response/index.js +1 -1
- package/package.json +3 -3
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
* Generative AI Service Inference API
|
|
4
4
|
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
5
|
|
|
6
|
-
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
7
7
|
|
|
8
8
|
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
9
9
|
|
|
@@ -18,22 +18,68 @@ To learn more about the service, see the [Generative AI documentation](/iaas/Con
|
|
|
18
18
|
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
19
19
|
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
20
|
*/
|
|
21
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
22
|
+
if (k2 === undefined) k2 = k;
|
|
23
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
24
|
+
}) : (function(o, m, k, k2) {
|
|
25
|
+
if (k2 === undefined) k2 = k;
|
|
26
|
+
o[k2] = m[k];
|
|
27
|
+
}));
|
|
28
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
29
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
30
|
+
}) : function(o, v) {
|
|
31
|
+
o["default"] = v;
|
|
32
|
+
});
|
|
33
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
34
|
+
if (mod && mod.__esModule) return mod;
|
|
35
|
+
var result = {};
|
|
36
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
37
|
+
__setModuleDefault(result, mod);
|
|
38
|
+
return result;
|
|
39
|
+
};
|
|
21
40
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
22
41
|
exports.CohereMessage = void 0;
|
|
42
|
+
const model = __importStar(require("../model"));
|
|
43
|
+
const common = require("oci-common");
|
|
23
44
|
var CohereMessage;
|
|
24
45
|
(function (CohereMessage) {
|
|
25
|
-
let Role;
|
|
26
|
-
(function (Role) {
|
|
27
|
-
Role["Chatbot"] = "CHATBOT";
|
|
28
|
-
Role["User"] = "USER";
|
|
29
|
-
})(Role = CohereMessage.Role || (CohereMessage.Role = {}));
|
|
30
46
|
function getJsonObj(obj) {
|
|
31
47
|
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
48
|
+
if (obj && "role" in obj && obj.role) {
|
|
49
|
+
switch (obj.role) {
|
|
50
|
+
case "CHATBOT":
|
|
51
|
+
return model.CohereChatBotMessage.getJsonObj(jsonObj, true);
|
|
52
|
+
case "SYSTEM":
|
|
53
|
+
return model.CohereSystemMessage.getJsonObj(jsonObj, true);
|
|
54
|
+
case "TOOL":
|
|
55
|
+
return model.CohereToolMessage.getJsonObj(jsonObj, true);
|
|
56
|
+
case "USER":
|
|
57
|
+
return model.CohereUserMessage.getJsonObj(jsonObj, true);
|
|
58
|
+
default:
|
|
59
|
+
if (common.LOG.logger)
|
|
60
|
+
common.LOG.logger.info(`Unknown value for: ${obj.role}`);
|
|
61
|
+
}
|
|
62
|
+
}
|
|
32
63
|
return jsonObj;
|
|
33
64
|
}
|
|
34
65
|
CohereMessage.getJsonObj = getJsonObj;
|
|
35
66
|
function getDeserializedJsonObj(obj) {
|
|
36
67
|
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
68
|
+
if (obj && "role" in obj && obj.role) {
|
|
69
|
+
switch (obj.role) {
|
|
70
|
+
case "CHATBOT":
|
|
71
|
+
return model.CohereChatBotMessage.getDeserializedJsonObj(jsonObj, true);
|
|
72
|
+
case "SYSTEM":
|
|
73
|
+
return model.CohereSystemMessage.getDeserializedJsonObj(jsonObj, true);
|
|
74
|
+
case "TOOL":
|
|
75
|
+
return model.CohereToolMessage.getDeserializedJsonObj(jsonObj, true);
|
|
76
|
+
case "USER":
|
|
77
|
+
return model.CohereUserMessage.getDeserializedJsonObj(jsonObj, true);
|
|
78
|
+
default:
|
|
79
|
+
if (common.LOG.logger)
|
|
80
|
+
common.LOG.logger.info(`Unknown value for: ${obj.role}`);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
37
83
|
return jsonObj;
|
|
38
84
|
}
|
|
39
85
|
CohereMessage.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"cohere-message.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-message.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG
|
|
1
|
+
{"version":3,"file":"cohere-message.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-message.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAClC,qCAAsC;AAStC,IAAiB,aAAa,CA+D7B;AA/DD,WAAiB,aAAa;IAC5B,SAAgB,UAAU,CAAC,GAAkB;QAC3C,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,IAAI,GAAG,IAAI,MAAM,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,EAAE;YACpC,QAAQ,GAAG,CAAC,IAAI,EAAE;gBAChB,KAAK,SAAS;oBACZ,OAAO,KAAK,CAAC,oBAAoB,CAAC,UAAU,CACL,OAAQ,EAC7C,IAAI,CACL,CAAC;gBACJ,KAAK,QAAQ;oBACX,OAAO,KAAK,CAAC,mBAAmB,CAAC,UAAU,CACL,OAAQ,EAC5C,IAAI,CACL,CAAC;gBACJ,KAAK,MAAM;oBACT,OAAO,KAAK,CAAC,iBAAiB,CAAC,UAAU,CACL,OAAQ,EAC1C,IAAI,CACL,CAAC;gBACJ,KAAK,MAAM;oBACT,OAAO,KAAK,CAAC,iBAAiB,CAAC,UAAU,CACL,OAAQ,EAC1C,IAAI,CACL,CAAC;gBACJ;oBACE,IAAI,MAAM,CAAC,GAAG,CAAC,MAAM;wBAAE,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,sBAAsB,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;aACnF;SACF;QACD,OAAO,OAAO,CAAC;IACjB,CAAC;IA9Be,wBAAU,aA8BzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAkB;QACvD,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,IAAI,GAAG,IAAI,MAAM,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,EAAE;YACpC,QAAQ,GAAG,CAAC,IAAI,EAAE;gBAChB,KAAK,SAAS;oBACZ,OAAO,KAAK,CAAC,oBAAoB,CAAC,sBAAsB,CACjB,OAAQ,EAC7C,IAAI,CACL,CAAC;gBACJ,KAAK,QAAQ;oBACX,OAAO,KAAK,CAAC,mBAAmB,CAAC,sBAAsB,CACjB,OAAQ,EAC5C,IAAI,CACL,CAAC;gBACJ,KAAK,MAAM;oBACT,OAAO,KAAK,CAAC,iBAAiB,CAAC,sBAAsB,CACjB,OAAQ,EAC1C,IAAI,CACL,CAAC;gBACJ,KAAK,MAAM;oBACT,OAAO,KAAK,CAAC,iBAAiB,CAAC,sBAAsB,CACjB,OAAQ,EAC1C,IAAI,CACL,CAAC;gBACJ;oBACE,IAAI,MAAM,CAAC,GAAG,CAAC,MAAM;wBAAE,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,sBAAsB,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;aACnF;SACF;QACD,OAAO,OAAO,CAAC;IACjB,CAAC;IA9Be,oCAAsB,yBA8BrC,CAAA;AACH,CAAC,EA/DgB,aAAa,GAAb,qBAAa,KAAb,qBAAa,QA+D7B"}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
/**
|
|
21
|
+
* A definition of tool parameter.
|
|
22
|
+
*/
|
|
23
|
+
export interface CohereParameterDefinition {
|
|
24
|
+
/**
|
|
25
|
+
* The description of the parameter.
|
|
26
|
+
*/
|
|
27
|
+
"description"?: string;
|
|
28
|
+
/**
|
|
29
|
+
* The type of the parameter. Must be a valid Python type.
|
|
30
|
+
*/
|
|
31
|
+
"type": string;
|
|
32
|
+
/**
|
|
33
|
+
* Denotes whether the parameter is always present (required) or not. Defaults to not required.
|
|
34
|
+
*/
|
|
35
|
+
"isRequired"?: boolean;
|
|
36
|
+
}
|
|
37
|
+
export declare namespace CohereParameterDefinition {
|
|
38
|
+
function getJsonObj(obj: CohereParameterDefinition): object;
|
|
39
|
+
function getDeserializedJsonObj(obj: CohereParameterDefinition): object;
|
|
40
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
22
|
+
exports.CohereParameterDefinition = void 0;
|
|
23
|
+
var CohereParameterDefinition;
|
|
24
|
+
(function (CohereParameterDefinition) {
|
|
25
|
+
function getJsonObj(obj) {
|
|
26
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
27
|
+
return jsonObj;
|
|
28
|
+
}
|
|
29
|
+
CohereParameterDefinition.getJsonObj = getJsonObj;
|
|
30
|
+
function getDeserializedJsonObj(obj) {
|
|
31
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
32
|
+
return jsonObj;
|
|
33
|
+
}
|
|
34
|
+
CohereParameterDefinition.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
35
|
+
})(CohereParameterDefinition = exports.CohereParameterDefinition || (exports.CohereParameterDefinition = {}));
|
|
36
|
+
//# sourceMappingURL=cohere-parameter-definition.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cohere-parameter-definition.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-parameter-definition.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;AAuBH,IAAiB,yBAAyB,CAWzC;AAXD,WAAiB,yBAAyB;IACxC,SAAgB,UAAU,CAAC,GAA8B;QACvD,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,oCAAU,aAIzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAA8B;QACnE,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,gDAAsB,yBAIrC,CAAA;AACH,CAAC,EAXgB,yBAAyB,GAAzB,iCAAyB,KAAzB,iCAAyB,QAWzC"}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
import * as model from "../model";
|
|
21
|
+
/**
|
|
22
|
+
* A message that represents a single chat dialog as SYSTEM role.
|
|
23
|
+
*/
|
|
24
|
+
export interface CohereSystemMessage extends model.CohereMessage {
|
|
25
|
+
/**
|
|
26
|
+
* Contents of the chat message.
|
|
27
|
+
*/
|
|
28
|
+
"message": string;
|
|
29
|
+
"role": string;
|
|
30
|
+
}
|
|
31
|
+
export declare namespace CohereSystemMessage {
|
|
32
|
+
function getJsonObj(obj: CohereSystemMessage, isParentJsonObj?: boolean): object;
|
|
33
|
+
const role = "SYSTEM";
|
|
34
|
+
function getDeserializedJsonObj(obj: CohereSystemMessage, isParentJsonObj?: boolean): object;
|
|
35
|
+
}
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
22
|
+
if (k2 === undefined) k2 = k;
|
|
23
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
24
|
+
}) : (function(o, m, k, k2) {
|
|
25
|
+
if (k2 === undefined) k2 = k;
|
|
26
|
+
o[k2] = m[k];
|
|
27
|
+
}));
|
|
28
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
29
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
30
|
+
}) : function(o, v) {
|
|
31
|
+
o["default"] = v;
|
|
32
|
+
});
|
|
33
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
34
|
+
if (mod && mod.__esModule) return mod;
|
|
35
|
+
var result = {};
|
|
36
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
37
|
+
__setModuleDefault(result, mod);
|
|
38
|
+
return result;
|
|
39
|
+
};
|
|
40
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
+
exports.CohereSystemMessage = void 0;
|
|
42
|
+
const model = __importStar(require("../model"));
|
|
43
|
+
var CohereSystemMessage;
|
|
44
|
+
(function (CohereSystemMessage) {
|
|
45
|
+
function getJsonObj(obj, isParentJsonObj) {
|
|
46
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.CohereMessage.getJsonObj(obj))), {});
|
|
47
|
+
return jsonObj;
|
|
48
|
+
}
|
|
49
|
+
CohereSystemMessage.getJsonObj = getJsonObj;
|
|
50
|
+
CohereSystemMessage.role = "SYSTEM";
|
|
51
|
+
function getDeserializedJsonObj(obj, isParentJsonObj) {
|
|
52
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj
|
|
53
|
+
? obj
|
|
54
|
+
: model.CohereMessage.getDeserializedJsonObj(obj))), {});
|
|
55
|
+
return jsonObj;
|
|
56
|
+
}
|
|
57
|
+
CohereSystemMessage.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
58
|
+
})(CohereSystemMessage = exports.CohereSystemMessage || (exports.CohereSystemMessage = {}));
|
|
59
|
+
//# sourceMappingURL=cohere-system-message.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cohere-system-message.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-system-message.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAelC,IAAiB,mBAAmB,CAuBnC;AAvBD,WAAiB,mBAAmB;IAClC,SAAgB,UAAU,CAAC,GAAwB,EAAE,eAAyB;QAC5E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,aAAa,CAAC,UAAU,CAAC,GAAG,CAAyB,CAAC,GACtF,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAPe,8BAAU,aAOzB,CAAA;IACY,wBAAI,GAAG,QAAQ,CAAC;IAC7B,SAAgB,sBAAsB,CACpC,GAAwB,EACxB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,aAAa,CAAC,sBAAsB,CAAC,GAAG,CAAyB,CAAC,GAC1E,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAZe,0CAAsB,yBAYrC,CAAA;AACH,CAAC,EAvBgB,mBAAmB,GAAnB,2BAAmB,KAAnB,2BAAmB,QAuBnC"}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
/**
|
|
21
|
+
* A tool call generated by the model.
|
|
22
|
+
*/
|
|
23
|
+
export interface CohereToolCall {
|
|
24
|
+
/**
|
|
25
|
+
* Name of the tool to call.
|
|
26
|
+
*/
|
|
27
|
+
"name": string;
|
|
28
|
+
/**
|
|
29
|
+
* The parameters to use when invoking a tool.
|
|
30
|
+
*/
|
|
31
|
+
"parameters": any;
|
|
32
|
+
}
|
|
33
|
+
export declare namespace CohereToolCall {
|
|
34
|
+
function getJsonObj(obj: CohereToolCall): object;
|
|
35
|
+
function getDeserializedJsonObj(obj: CohereToolCall): object;
|
|
36
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
22
|
+
exports.CohereToolCall = void 0;
|
|
23
|
+
var CohereToolCall;
|
|
24
|
+
(function (CohereToolCall) {
|
|
25
|
+
function getJsonObj(obj) {
|
|
26
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
27
|
+
return jsonObj;
|
|
28
|
+
}
|
|
29
|
+
CohereToolCall.getJsonObj = getJsonObj;
|
|
30
|
+
function getDeserializedJsonObj(obj) {
|
|
31
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
32
|
+
return jsonObj;
|
|
33
|
+
}
|
|
34
|
+
CohereToolCall.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
35
|
+
})(CohereToolCall = exports.CohereToolCall || (exports.CohereToolCall = {}));
|
|
36
|
+
//# sourceMappingURL=cohere-tool-call.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cohere-tool-call.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-tool-call.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;AAmBH,IAAiB,cAAc,CAW9B;AAXD,WAAiB,cAAc;IAC7B,SAAgB,UAAU,CAAC,GAAmB;QAC5C,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,yBAAU,aAIzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAmB;QACxD,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,qCAAsB,yBAIrC,CAAA;AACH,CAAC,EAXgB,cAAc,GAAd,sBAAc,KAAd,sBAAc,QAW9B"}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
import * as model from "../model";
|
|
21
|
+
/**
|
|
22
|
+
* A message that represents a single chat dialog as TOOL role.
|
|
23
|
+
*/
|
|
24
|
+
export interface CohereToolMessage extends model.CohereMessage {
|
|
25
|
+
/**
|
|
26
|
+
* A list of results from invoking tools recommended by the model in the previous chat turn.
|
|
27
|
+
*/
|
|
28
|
+
"toolResults": Array<model.CohereToolResult>;
|
|
29
|
+
"role": string;
|
|
30
|
+
}
|
|
31
|
+
export declare namespace CohereToolMessage {
|
|
32
|
+
function getJsonObj(obj: CohereToolMessage, isParentJsonObj?: boolean): object;
|
|
33
|
+
const role = "TOOL";
|
|
34
|
+
function getDeserializedJsonObj(obj: CohereToolMessage, isParentJsonObj?: boolean): object;
|
|
35
|
+
}
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
22
|
+
if (k2 === undefined) k2 = k;
|
|
23
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
24
|
+
}) : (function(o, m, k, k2) {
|
|
25
|
+
if (k2 === undefined) k2 = k;
|
|
26
|
+
o[k2] = m[k];
|
|
27
|
+
}));
|
|
28
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
29
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
30
|
+
}) : function(o, v) {
|
|
31
|
+
o["default"] = v;
|
|
32
|
+
});
|
|
33
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
34
|
+
if (mod && mod.__esModule) return mod;
|
|
35
|
+
var result = {};
|
|
36
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
37
|
+
__setModuleDefault(result, mod);
|
|
38
|
+
return result;
|
|
39
|
+
};
|
|
40
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
+
exports.CohereToolMessage = void 0;
|
|
42
|
+
const model = __importStar(require("../model"));
|
|
43
|
+
var CohereToolMessage;
|
|
44
|
+
(function (CohereToolMessage) {
|
|
45
|
+
function getJsonObj(obj, isParentJsonObj) {
|
|
46
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.CohereMessage.getJsonObj(obj))), {
|
|
47
|
+
"toolResults": obj.toolResults
|
|
48
|
+
? obj.toolResults.map(item => {
|
|
49
|
+
return model.CohereToolResult.getJsonObj(item);
|
|
50
|
+
})
|
|
51
|
+
: undefined
|
|
52
|
+
});
|
|
53
|
+
return jsonObj;
|
|
54
|
+
}
|
|
55
|
+
CohereToolMessage.getJsonObj = getJsonObj;
|
|
56
|
+
CohereToolMessage.role = "TOOL";
|
|
57
|
+
function getDeserializedJsonObj(obj, isParentJsonObj) {
|
|
58
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj
|
|
59
|
+
? obj
|
|
60
|
+
: model.CohereMessage.getDeserializedJsonObj(obj))), {
|
|
61
|
+
"toolResults": obj.toolResults
|
|
62
|
+
? obj.toolResults.map(item => {
|
|
63
|
+
return model.CohereToolResult.getDeserializedJsonObj(item);
|
|
64
|
+
})
|
|
65
|
+
: undefined
|
|
66
|
+
});
|
|
67
|
+
return jsonObj;
|
|
68
|
+
}
|
|
69
|
+
CohereToolMessage.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
70
|
+
})(CohereToolMessage = exports.CohereToolMessage || (exports.CohereToolMessage = {}));
|
|
71
|
+
//# sourceMappingURL=cohere-tool-message.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cohere-tool-message.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-tool-message.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAelC,IAAiB,iBAAiB,CAmCjC;AAnCD,WAAiB,iBAAiB;IAChC,SAAgB,UAAU,CAAC,GAAsB,EAAE,eAAyB;QAC1E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,aAAa,CAAC,UAAU,CAAC,GAAG,CAAuB,CAAC,GACpF;YACD,aAAa,EAAE,GAAG,CAAC,WAAW;gBAC5B,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACzB,OAAO,KAAK,CAAC,gBAAgB,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBACjD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAbe,4BAAU,aAazB,CAAA;IACY,sBAAI,GAAG,MAAM,CAAC;IAC3B,SAAgB,sBAAsB,CACpC,GAAsB,EACtB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,aAAa,CAAC,sBAAsB,CAAC,GAAG,CAAuB,CAAC,GACxE;YACD,aAAa,EAAE,GAAG,CAAC,WAAW;gBAC5B,CAAC,CAAC,GAAG,CAAC,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACzB,OAAO,KAAK,CAAC,gBAAgB,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBAC7D,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAlBe,wCAAsB,yBAkBrC,CAAA;AACH,CAAC,EAnCgB,iBAAiB,GAAjB,yBAAiB,KAAjB,yBAAiB,QAmCjC"}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
import * as model from "../model";
|
|
21
|
+
/**
|
|
22
|
+
* The result from invoking tools recommended by the model in the previous chat turn.
|
|
23
|
+
*/
|
|
24
|
+
export interface CohereToolResult {
|
|
25
|
+
"call": model.CohereToolCall;
|
|
26
|
+
/**
|
|
27
|
+
* An array of objects returned by tool.
|
|
28
|
+
*/
|
|
29
|
+
"outputs": Array<any>;
|
|
30
|
+
}
|
|
31
|
+
export declare namespace CohereToolResult {
|
|
32
|
+
function getJsonObj(obj: CohereToolResult): object;
|
|
33
|
+
function getDeserializedJsonObj(obj: CohereToolResult): object;
|
|
34
|
+
}
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
22
|
+
if (k2 === undefined) k2 = k;
|
|
23
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
24
|
+
}) : (function(o, m, k, k2) {
|
|
25
|
+
if (k2 === undefined) k2 = k;
|
|
26
|
+
o[k2] = m[k];
|
|
27
|
+
}));
|
|
28
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
29
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
30
|
+
}) : function(o, v) {
|
|
31
|
+
o["default"] = v;
|
|
32
|
+
});
|
|
33
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
34
|
+
if (mod && mod.__esModule) return mod;
|
|
35
|
+
var result = {};
|
|
36
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
37
|
+
__setModuleDefault(result, mod);
|
|
38
|
+
return result;
|
|
39
|
+
};
|
|
40
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
+
exports.CohereToolResult = void 0;
|
|
42
|
+
const model = __importStar(require("../model"));
|
|
43
|
+
var CohereToolResult;
|
|
44
|
+
(function (CohereToolResult) {
|
|
45
|
+
function getJsonObj(obj) {
|
|
46
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
47
|
+
"call": obj.call ? model.CohereToolCall.getJsonObj(obj.call) : undefined
|
|
48
|
+
});
|
|
49
|
+
return jsonObj;
|
|
50
|
+
}
|
|
51
|
+
CohereToolResult.getJsonObj = getJsonObj;
|
|
52
|
+
function getDeserializedJsonObj(obj) {
|
|
53
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
54
|
+
"call": obj.call ? model.CohereToolCall.getDeserializedJsonObj(obj.call) : undefined
|
|
55
|
+
});
|
|
56
|
+
return jsonObj;
|
|
57
|
+
}
|
|
58
|
+
CohereToolResult.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
59
|
+
})(CohereToolResult = exports.CohereToolResult || (exports.CohereToolResult = {}));
|
|
60
|
+
//# sourceMappingURL=cohere-tool-result.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cohere-tool-result.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-tool-result.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAclC,IAAiB,gBAAgB,CAqBhC;AArBD,WAAiB,gBAAgB;IAC/B,SAAgB,UAAU,CAAC,GAAqB;QAC9C,MAAM,OAAO,mCACR,GAAG,GACH;YACD,MAAM,EAAE,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,cAAc,CAAC,UAAU,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,SAAS;SACzE,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IATe,2BAAU,aASzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAqB;QAC1D,MAAM,OAAO,mCACR,GAAG,GACH;YACD,MAAM,EAAE,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,cAAc,CAAC,sBAAsB,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,SAAS;SACrF,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IATe,uCAAsB,yBASrC,CAAA;AACH,CAAC,EArBgB,gBAAgB,GAAhB,wBAAgB,KAAhB,wBAAgB,QAqBhC"}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
import * as model from "../model";
|
|
21
|
+
/**
|
|
22
|
+
* A definition of tool (function).
|
|
23
|
+
*/
|
|
24
|
+
export interface CohereTool {
|
|
25
|
+
/**
|
|
26
|
+
* The name of the tool to be called. Valid names contain only the characters a-z, A-Z, 0-9, _ and must not begin with a digit.
|
|
27
|
+
*/
|
|
28
|
+
"name": string;
|
|
29
|
+
/**
|
|
30
|
+
* The description of what the tool does, the model uses the description to choose when and how to call the function.
|
|
31
|
+
*/
|
|
32
|
+
"description": string;
|
|
33
|
+
/**
|
|
34
|
+
* The input parameters of the tool.
|
|
35
|
+
*/
|
|
36
|
+
"parameterDefinitions"?: {
|
|
37
|
+
[key: string]: model.CohereParameterDefinition;
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
export declare namespace CohereTool {
|
|
41
|
+
function getJsonObj(obj: CohereTool): object;
|
|
42
|
+
function getDeserializedJsonObj(obj: CohereTool): object;
|
|
43
|
+
}
|