oci-generativeaiinference 2.101.0 → 2.102.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/client.d.ts +12 -0
- package/lib/client.js +66 -0
- package/lib/client.js.map +1 -1
- package/lib/model/assistant-message.d.ts +4 -0
- package/lib/model/assistant-message.js +14 -2
- package/lib/model/assistant-message.js.map +1 -1
- package/lib/model/chat-choice.d.ts +1 -1
- package/lib/model/cohere-chat-request.d.ts +1 -1
- package/lib/model/document-rank.d.ts +38 -0
- package/lib/model/document-rank.js +60 -0
- package/lib/model/document-rank.js.map +1 -0
- package/lib/model/document.d.ts +32 -0
- package/lib/model/document.js +36 -0
- package/lib/model/document.js.map +1 -0
- package/lib/model/embed-text-details.d.ts +3 -2
- package/lib/model/embed-text-details.js +1 -0
- package/lib/model/embed-text-details.js.map +1 -1
- package/lib/model/function-call.d.ts +39 -0
- package/lib/model/function-call.js +57 -0
- package/lib/model/function-call.js.map +1 -0
- package/lib/model/function-definition.d.ts +43 -0
- package/lib/model/function-definition.js +59 -0
- package/lib/model/function-definition.js.map +1 -0
- package/lib/model/generic-chat-request.d.ts +10 -0
- package/lib/model/generic-chat-request.js +14 -0
- package/lib/model/generic-chat-request.js.map +1 -1
- package/lib/model/image-url.d.ts +13 -4
- package/lib/model/image-url.js.map +1 -1
- package/lib/model/index.d.ts +28 -0
- package/lib/model/index.js +30 -1
- package/lib/model/index.js.map +1 -1
- package/lib/model/message.js +4 -0
- package/lib/model/message.js.map +1 -1
- package/lib/model/rerank-text-details.d.ts +54 -0
- package/lib/model/rerank-text-details.js +62 -0
- package/lib/model/rerank-text-details.js.map +1 -0
- package/lib/model/rerank-text-result.d.ts +45 -0
- package/lib/model/rerank-text-result.js +68 -0
- package/lib/model/rerank-text-result.js.map +1 -0
- package/lib/model/tool-call.d.ts +33 -0
- package/lib/model/tool-call.js +75 -0
- package/lib/model/tool-call.js.map +1 -0
- package/lib/model/tool-choice-auto.d.ts +31 -0
- package/lib/model/tool-choice-auto.js +57 -0
- package/lib/model/tool-choice-auto.js.map +1 -0
- package/lib/model/tool-choice-function.d.ts +35 -0
- package/lib/model/tool-choice-function.js +59 -0
- package/lib/model/tool-choice-function.js.map +1 -0
- package/lib/model/tool-choice-none.d.ts +31 -0
- package/lib/model/tool-choice-none.js +57 -0
- package/lib/model/tool-choice-none.js.map +1 -0
- package/lib/model/tool-choice-required.d.ts +31 -0
- package/lib/model/tool-choice-required.js +59 -0
- package/lib/model/tool-choice-required.js.map +1 -0
- package/lib/model/tool-choice.d.ts +29 -0
- package/lib/model/tool-choice.js +87 -0
- package/lib/model/tool-choice.js.map +1 -0
- package/lib/model/tool-definition.d.ts +29 -0
- package/lib/model/tool-definition.js +75 -0
- package/lib/model/tool-definition.js.map +1 -0
- package/lib/model/tool-message.d.ts +35 -0
- package/lib/model/tool-message.js +57 -0
- package/lib/model/tool-message.js.map +1 -0
- package/lib/request/index.d.ts +2 -0
- package/lib/request/rerank-text-request.d.ts +35 -0
- package/lib/request/rerank-text-request.js +15 -0
- package/lib/request/rerank-text-request.js.map +1 -0
- package/lib/response/index.d.ts +2 -0
- package/lib/response/rerank-text-response.d.ts +30 -0
- package/lib/response/rerank-text-response.js +15 -0
- package/lib/response/rerank-text-response.js.map +1 -0
- package/package.json +3 -3
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
22
|
+
if (k2 === undefined) k2 = k;
|
|
23
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
24
|
+
}) : (function(o, m, k, k2) {
|
|
25
|
+
if (k2 === undefined) k2 = k;
|
|
26
|
+
o[k2] = m[k];
|
|
27
|
+
}));
|
|
28
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
29
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
30
|
+
}) : function(o, v) {
|
|
31
|
+
o["default"] = v;
|
|
32
|
+
});
|
|
33
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
34
|
+
if (mod && mod.__esModule) return mod;
|
|
35
|
+
var result = {};
|
|
36
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
37
|
+
__setModuleDefault(result, mod);
|
|
38
|
+
return result;
|
|
39
|
+
};
|
|
40
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
+
exports.FunctionDefinition = void 0;
|
|
42
|
+
const model = __importStar(require("../model"));
|
|
43
|
+
var FunctionDefinition;
|
|
44
|
+
(function (FunctionDefinition) {
|
|
45
|
+
function getJsonObj(obj, isParentJsonObj) {
|
|
46
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.ToolDefinition.getJsonObj(obj))), {});
|
|
47
|
+
return jsonObj;
|
|
48
|
+
}
|
|
49
|
+
FunctionDefinition.getJsonObj = getJsonObj;
|
|
50
|
+
FunctionDefinition.type = "FUNCTION";
|
|
51
|
+
function getDeserializedJsonObj(obj, isParentJsonObj) {
|
|
52
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj
|
|
53
|
+
? obj
|
|
54
|
+
: model.ToolDefinition.getDeserializedJsonObj(obj))), {});
|
|
55
|
+
return jsonObj;
|
|
56
|
+
}
|
|
57
|
+
FunctionDefinition.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
58
|
+
})(FunctionDefinition = exports.FunctionDefinition || (exports.FunctionDefinition = {}));
|
|
59
|
+
//# sourceMappingURL=function-definition.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"function-definition.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/function-definition.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAuBlC,IAAiB,kBAAkB,CAuBlC;AAvBD,WAAiB,kBAAkB;IACjC,SAAgB,UAAU,CAAC,GAAuB,EAAE,eAAyB;QAC3E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,cAAc,CAAC,UAAU,CAAC,GAAG,CAAwB,CAAC,GACtF,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAPe,6BAAU,aAOzB,CAAA;IACY,uBAAI,GAAG,UAAU,CAAC;IAC/B,SAAgB,sBAAsB,CACpC,GAAuB,EACvB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,cAAc,CAAC,sBAAsB,CAAC,GAAG,CAAwB,CAAC,GAC1E,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAZe,yCAAsB,yBAYrC,CAAA;AACH,CAAC,EAvBgB,kBAAkB,GAAlB,0BAAkB,KAAlB,0BAAkB,QAuBlC"}
|
|
@@ -34,6 +34,11 @@ export interface GenericChatRequest extends model.BaseChatRequest {
|
|
|
34
34
|
* The number of of generated texts that will be returned. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
35
35
|
*/
|
|
36
36
|
"numGenerations"?: number;
|
|
37
|
+
/**
|
|
38
|
+
* If specified, the backend will make a best effort to sample tokens deterministically, so that repeated requests with the same seed and parameters yield the same result. However, determinism cannot be fully guaranteed.
|
|
39
|
+
* Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
40
|
+
*/
|
|
41
|
+
"seed"?: number;
|
|
37
42
|
/**
|
|
38
43
|
* Whether to include the user prompt in the response. Applies only to non-stream results.
|
|
39
44
|
*/
|
|
@@ -94,6 +99,11 @@ export interface GenericChatRequest extends model.BaseChatRequest {
|
|
|
94
99
|
*
|
|
95
100
|
*/
|
|
96
101
|
"logitBias"?: any;
|
|
102
|
+
"toolChoice"?: model.ToolChoiceFunction | model.ToolChoiceNone | model.ToolChoiceAuto | model.ToolChoiceRequired;
|
|
103
|
+
/**
|
|
104
|
+
* A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.
|
|
105
|
+
*/
|
|
106
|
+
"tools"?: Array<model.ToolDefinition>;
|
|
97
107
|
"apiFormat": string;
|
|
98
108
|
}
|
|
99
109
|
export declare namespace GenericChatRequest {
|
|
@@ -48,6 +48,12 @@ var GenericChatRequest;
|
|
|
48
48
|
? obj.messages.map(item => {
|
|
49
49
|
return model.Message.getJsonObj(item);
|
|
50
50
|
})
|
|
51
|
+
: undefined,
|
|
52
|
+
"toolChoice": obj.toolChoice ? model.ToolChoice.getJsonObj(obj.toolChoice) : undefined,
|
|
53
|
+
"tools": obj.tools
|
|
54
|
+
? obj.tools.map(item => {
|
|
55
|
+
return model.ToolDefinition.getJsonObj(item);
|
|
56
|
+
})
|
|
51
57
|
: undefined
|
|
52
58
|
});
|
|
53
59
|
return jsonObj;
|
|
@@ -62,6 +68,14 @@ var GenericChatRequest;
|
|
|
62
68
|
? obj.messages.map(item => {
|
|
63
69
|
return model.Message.getDeserializedJsonObj(item);
|
|
64
70
|
})
|
|
71
|
+
: undefined,
|
|
72
|
+
"toolChoice": obj.toolChoice
|
|
73
|
+
? model.ToolChoice.getDeserializedJsonObj(obj.toolChoice)
|
|
74
|
+
: undefined,
|
|
75
|
+
"tools": obj.tools
|
|
76
|
+
? obj.tools.map(item => {
|
|
77
|
+
return model.ToolDefinition.getDeserializedJsonObj(item);
|
|
78
|
+
})
|
|
65
79
|
: undefined
|
|
66
80
|
});
|
|
67
81
|
return jsonObj;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"generic-chat-request.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/generic-chat-request.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;
|
|
1
|
+
{"version":3,"file":"generic-chat-request.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/generic-chat-request.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAiGlC,IAAiB,kBAAkB,CAmDlC;AAnDD,WAAiB,kBAAkB;IACjC,SAAgB,UAAU,CAAC,GAAuB,EAAE,eAAyB;QAC3E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,eAAe,CAAC,UAAU,CAAC,GAAG,CAAwB,CAAC,GACvF;YACD,UAAU,EAAE,GAAG,CAAC,QAAQ;gBACtB,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACtB,OAAO,KAAK,CAAC,OAAO,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBACxC,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;YAEb,YAAY,EAAE,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,SAAS;YACtF,OAAO,EAAE,GAAG,CAAC,KAAK;gBAChB,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACnB,OAAO,KAAK,CAAC,cAAc,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBAC/C,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IApBe,6BAAU,aAoBzB,CAAA;IACY,4BAAS,GAAG,SAAS,CAAC;IACnC,SAAgB,sBAAsB,CACpC,GAAuB,EACvB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,eAAe,CAAC,sBAAsB,CAAC,GAAG,CAAwB,CAAC,GAC3E;YACD,UAAU,EAAE,GAAG,CAAC,QAAQ;gBACtB,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACtB,OAAO,KAAK,CAAC,OAAO,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBACpD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;YAEb,YAAY,EAAE,GAAG,CAAC,UAAU;gBAC1B,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,sBAAsB,CAAC,GAAG,CAAC,UAAU,CAAC;gBACzD,CAAC,CAAC,SAAS;YACb,OAAO,EAAE,GAAG,CAAC,KAAK;gBAChB,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACnB,OAAO,KAAK,CAAC,cAAc,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBAC3D,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IA3Be,yCAAsB,yBA2BrC,CAAA;AACH,CAAC,EAnDgB,kBAAkB,GAAlB,0BAAkB,KAAlB,0BAAkB,QAmDlC"}
|
package/lib/model/image-url.d.ts
CHANGED
|
@@ -18,15 +18,24 @@ To learn more about the service, see the [Generative AI documentation](/iaas/Con
|
|
|
18
18
|
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
19
|
*/
|
|
20
20
|
/**
|
|
21
|
-
*
|
|
21
|
+
* Provide a base64 encoded image.
|
|
22
22
|
*/
|
|
23
23
|
export interface ImageUrl {
|
|
24
24
|
/**
|
|
25
|
-
|
|
26
|
-
|
|
25
|
+
* The base64 encoded image data.
|
|
26
|
+
* <p>
|
|
27
|
+
Example for a png image:
|
|
28
|
+
* {@code {
|
|
29
|
+
* \"type\": \"IMAGE\",
|
|
30
|
+
* \"imageUrl\": {
|
|
31
|
+
* \"url\": \"data:image/png;base64,<base64 encoded image content>\"
|
|
32
|
+
* }
|
|
33
|
+
* }}
|
|
34
|
+
*
|
|
35
|
+
*/
|
|
27
36
|
"url": string;
|
|
28
37
|
/**
|
|
29
|
-
* The
|
|
38
|
+
* The default value is AUTO and only AUTO is supported. This option controls how to convert the base64 encoded image to tokens.
|
|
30
39
|
*/
|
|
31
40
|
"detail"?: ImageUrl.Detail;
|
|
32
41
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"image-url.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/image-url.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;
|
|
1
|
+
{"version":3,"file":"image-url.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/image-url.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;AA4BH,IAAiB,QAAQ,CAsBxB;AAtBD,WAAiB,QAAQ;IACvB,IAAY,MASX;IATD,WAAY,MAAM;QAChB,uBAAa,CAAA;QACb,uBAAa,CAAA;QACb,qBAAW,CAAA;QACX;;;WAGG;QACH,wCAA8B,CAAA;IAChC,CAAC,EATW,MAAM,GAAN,eAAM,KAAN,eAAM,QASjB;IAED,SAAgB,UAAU,CAAC,GAAa;QACtC,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,mBAAU,aAIzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAa;QAClD,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,+BAAsB,yBAIrC,CAAA;AACH,CAAC,EAtBgB,QAAQ,GAAR,gBAAQ,KAAR,gBAAQ,QAsBxB"}
|
package/lib/model/index.d.ts
CHANGED
|
@@ -45,6 +45,10 @@ import * as CohereToolCall from "./cohere-tool-call";
|
|
|
45
45
|
export import CohereToolCall = CohereToolCall.CohereToolCall;
|
|
46
46
|
import * as CohereToolResult from "./cohere-tool-result";
|
|
47
47
|
export import CohereToolResult = CohereToolResult.CohereToolResult;
|
|
48
|
+
import * as Document from "./document";
|
|
49
|
+
export import Document = Document.Document;
|
|
50
|
+
import * as DocumentRank from "./document-rank";
|
|
51
|
+
export import DocumentRank = DocumentRank.DocumentRank;
|
|
48
52
|
import * as EmbedTextDetails from "./embed-text-details";
|
|
49
53
|
export import EmbedTextDetails = EmbedTextDetails.EmbedTextDetails;
|
|
50
54
|
import * as EmbedTextResult from "./embed-text-result";
|
|
@@ -65,6 +69,10 @@ import * as Logprobs from "./logprobs";
|
|
|
65
69
|
export import Logprobs = Logprobs.Logprobs;
|
|
66
70
|
import * as Message from "./message";
|
|
67
71
|
export import Message = Message.Message;
|
|
72
|
+
import * as RerankTextDetails from "./rerank-text-details";
|
|
73
|
+
export import RerankTextDetails = RerankTextDetails.RerankTextDetails;
|
|
74
|
+
import * as RerankTextResult from "./rerank-text-result";
|
|
75
|
+
export import RerankTextResult = RerankTextResult.RerankTextResult;
|
|
68
76
|
import * as SearchQuery from "./search-query";
|
|
69
77
|
export import SearchQuery = SearchQuery.SearchQuery;
|
|
70
78
|
import * as ServingMode from "./serving-mode";
|
|
@@ -75,6 +83,12 @@ import * as SummarizeTextResult from "./summarize-text-result";
|
|
|
75
83
|
export import SummarizeTextResult = SummarizeTextResult.SummarizeTextResult;
|
|
76
84
|
import * as TokenLikelihood from "./token-likelihood";
|
|
77
85
|
export import TokenLikelihood = TokenLikelihood.TokenLikelihood;
|
|
86
|
+
import * as ToolCall from "./tool-call";
|
|
87
|
+
export import ToolCall = ToolCall.ToolCall;
|
|
88
|
+
import * as ToolChoice from "./tool-choice";
|
|
89
|
+
export import ToolChoice = ToolChoice.ToolChoice;
|
|
90
|
+
import * as ToolDefinition from "./tool-definition";
|
|
91
|
+
export import ToolDefinition = ToolDefinition.ToolDefinition;
|
|
78
92
|
import * as AssistantMessage from "./assistant-message";
|
|
79
93
|
export import AssistantMessage = AssistantMessage.AssistantMessage;
|
|
80
94
|
import * as CohereChatBotMessage from "./cohere-chat-bot-message";
|
|
@@ -99,6 +113,10 @@ import * as CohereUserMessage from "./cohere-user-message";
|
|
|
99
113
|
export import CohereUserMessage = CohereUserMessage.CohereUserMessage;
|
|
100
114
|
import * as DedicatedServingMode from "./dedicated-serving-mode";
|
|
101
115
|
export import DedicatedServingMode = DedicatedServingMode.DedicatedServingMode;
|
|
116
|
+
import * as FunctionCall from "./function-call";
|
|
117
|
+
export import FunctionCall = FunctionCall.FunctionCall;
|
|
118
|
+
import * as FunctionDefinition from "./function-definition";
|
|
119
|
+
export import FunctionDefinition = FunctionDefinition.FunctionDefinition;
|
|
102
120
|
import * as GenericChatRequest from "./generic-chat-request";
|
|
103
121
|
export import GenericChatRequest = GenericChatRequest.GenericChatRequest;
|
|
104
122
|
import * as GenericChatResponse from "./generic-chat-response";
|
|
@@ -115,5 +133,15 @@ import * as SystemMessage from "./system-message";
|
|
|
115
133
|
export import SystemMessage = SystemMessage.SystemMessage;
|
|
116
134
|
import * as TextContent from "./text-content";
|
|
117
135
|
export import TextContent = TextContent.TextContent;
|
|
136
|
+
import * as ToolChoiceAuto from "./tool-choice-auto";
|
|
137
|
+
export import ToolChoiceAuto = ToolChoiceAuto.ToolChoiceAuto;
|
|
138
|
+
import * as ToolChoiceFunction from "./tool-choice-function";
|
|
139
|
+
export import ToolChoiceFunction = ToolChoiceFunction.ToolChoiceFunction;
|
|
140
|
+
import * as ToolChoiceNone from "./tool-choice-none";
|
|
141
|
+
export import ToolChoiceNone = ToolChoiceNone.ToolChoiceNone;
|
|
142
|
+
import * as ToolChoiceRequired from "./tool-choice-required";
|
|
143
|
+
export import ToolChoiceRequired = ToolChoiceRequired.ToolChoiceRequired;
|
|
144
|
+
import * as ToolMessage from "./tool-message";
|
|
145
|
+
export import ToolMessage = ToolMessage.ToolMessage;
|
|
118
146
|
import * as UserMessage from "./user-message";
|
|
119
147
|
export import UserMessage = UserMessage.UserMessage;
|
package/lib/model/index.js
CHANGED
|
@@ -38,7 +38,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
|
38
38
|
return result;
|
|
39
39
|
};
|
|
40
40
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
-
exports.
|
|
41
|
+
exports.FunctionDefinition = exports.FunctionCall = exports.DedicatedServingMode = exports.CohereUserMessage = exports.CohereToolMessage = exports.CohereSystemMessage = exports.CohereResponseTextFormat = exports.CohereResponseJsonFormat = exports.CohereLlmInferenceResponse = exports.CohereLlmInferenceRequest = exports.CohereChatResponse = exports.CohereChatRequest = exports.CohereChatBotMessage = exports.AssistantMessage = exports.ToolDefinition = exports.ToolChoice = exports.ToolCall = exports.TokenLikelihood = exports.SummarizeTextResult = exports.SummarizeTextDetails = exports.ServingMode = exports.SearchQuery = exports.RerankTextResult = exports.RerankTextDetails = exports.Message = exports.Logprobs = exports.LlmInferenceResponse = exports.LlmInferenceRequest = exports.ImageUrl = exports.GeneratedText = exports.GenerateTextResult = exports.GenerateTextDetails = exports.EmbedTextResult = exports.EmbedTextDetails = exports.DocumentRank = exports.Document = exports.CohereToolResult = exports.CohereToolCall = exports.CohereTool = exports.CohereResponseFormat = exports.CohereParameterDefinition = exports.CohereMessage = exports.Citation = exports.Choice = exports.ChatResult = exports.ChatDetails = exports.ChatContent = exports.ChatChoice = exports.BaseChatResponse = exports.BaseChatRequest = void 0;
|
|
42
|
+
exports.UserMessage = exports.ToolMessage = exports.ToolChoiceRequired = exports.ToolChoiceNone = exports.ToolChoiceFunction = exports.ToolChoiceAuto = exports.TextContent = exports.SystemMessage = exports.OnDemandServingMode = exports.LlamaLlmInferenceResponse = exports.LlamaLlmInferenceRequest = exports.ImageContent = exports.GenericChatResponse = exports.GenericChatRequest = void 0;
|
|
42
43
|
const BaseChatRequest = __importStar(require("./base-chat-request"));
|
|
43
44
|
exports.BaseChatRequest = BaseChatRequest.BaseChatRequest;
|
|
44
45
|
const BaseChatResponse = __importStar(require("./base-chat-response"));
|
|
@@ -67,6 +68,10 @@ const CohereToolCall = __importStar(require("./cohere-tool-call"));
|
|
|
67
68
|
exports.CohereToolCall = CohereToolCall.CohereToolCall;
|
|
68
69
|
const CohereToolResult = __importStar(require("./cohere-tool-result"));
|
|
69
70
|
exports.CohereToolResult = CohereToolResult.CohereToolResult;
|
|
71
|
+
const Document = __importStar(require("./document"));
|
|
72
|
+
exports.Document = Document.Document;
|
|
73
|
+
const DocumentRank = __importStar(require("./document-rank"));
|
|
74
|
+
exports.DocumentRank = DocumentRank.DocumentRank;
|
|
70
75
|
const EmbedTextDetails = __importStar(require("./embed-text-details"));
|
|
71
76
|
exports.EmbedTextDetails = EmbedTextDetails.EmbedTextDetails;
|
|
72
77
|
const EmbedTextResult = __importStar(require("./embed-text-result"));
|
|
@@ -87,6 +92,10 @@ const Logprobs = __importStar(require("./logprobs"));
|
|
|
87
92
|
exports.Logprobs = Logprobs.Logprobs;
|
|
88
93
|
const Message = __importStar(require("./message"));
|
|
89
94
|
exports.Message = Message.Message;
|
|
95
|
+
const RerankTextDetails = __importStar(require("./rerank-text-details"));
|
|
96
|
+
exports.RerankTextDetails = RerankTextDetails.RerankTextDetails;
|
|
97
|
+
const RerankTextResult = __importStar(require("./rerank-text-result"));
|
|
98
|
+
exports.RerankTextResult = RerankTextResult.RerankTextResult;
|
|
90
99
|
const SearchQuery = __importStar(require("./search-query"));
|
|
91
100
|
exports.SearchQuery = SearchQuery.SearchQuery;
|
|
92
101
|
const ServingMode = __importStar(require("./serving-mode"));
|
|
@@ -97,6 +106,12 @@ const SummarizeTextResult = __importStar(require("./summarize-text-result"));
|
|
|
97
106
|
exports.SummarizeTextResult = SummarizeTextResult.SummarizeTextResult;
|
|
98
107
|
const TokenLikelihood = __importStar(require("./token-likelihood"));
|
|
99
108
|
exports.TokenLikelihood = TokenLikelihood.TokenLikelihood;
|
|
109
|
+
const ToolCall = __importStar(require("./tool-call"));
|
|
110
|
+
exports.ToolCall = ToolCall.ToolCall;
|
|
111
|
+
const ToolChoice = __importStar(require("./tool-choice"));
|
|
112
|
+
exports.ToolChoice = ToolChoice.ToolChoice;
|
|
113
|
+
const ToolDefinition = __importStar(require("./tool-definition"));
|
|
114
|
+
exports.ToolDefinition = ToolDefinition.ToolDefinition;
|
|
100
115
|
const AssistantMessage = __importStar(require("./assistant-message"));
|
|
101
116
|
exports.AssistantMessage = AssistantMessage.AssistantMessage;
|
|
102
117
|
const CohereChatBotMessage = __importStar(require("./cohere-chat-bot-message"));
|
|
@@ -121,6 +136,10 @@ const CohereUserMessage = __importStar(require("./cohere-user-message"));
|
|
|
121
136
|
exports.CohereUserMessage = CohereUserMessage.CohereUserMessage;
|
|
122
137
|
const DedicatedServingMode = __importStar(require("./dedicated-serving-mode"));
|
|
123
138
|
exports.DedicatedServingMode = DedicatedServingMode.DedicatedServingMode;
|
|
139
|
+
const FunctionCall = __importStar(require("./function-call"));
|
|
140
|
+
exports.FunctionCall = FunctionCall.FunctionCall;
|
|
141
|
+
const FunctionDefinition = __importStar(require("./function-definition"));
|
|
142
|
+
exports.FunctionDefinition = FunctionDefinition.FunctionDefinition;
|
|
124
143
|
const GenericChatRequest = __importStar(require("./generic-chat-request"));
|
|
125
144
|
exports.GenericChatRequest = GenericChatRequest.GenericChatRequest;
|
|
126
145
|
const GenericChatResponse = __importStar(require("./generic-chat-response"));
|
|
@@ -137,6 +156,16 @@ const SystemMessage = __importStar(require("./system-message"));
|
|
|
137
156
|
exports.SystemMessage = SystemMessage.SystemMessage;
|
|
138
157
|
const TextContent = __importStar(require("./text-content"));
|
|
139
158
|
exports.TextContent = TextContent.TextContent;
|
|
159
|
+
const ToolChoiceAuto = __importStar(require("./tool-choice-auto"));
|
|
160
|
+
exports.ToolChoiceAuto = ToolChoiceAuto.ToolChoiceAuto;
|
|
161
|
+
const ToolChoiceFunction = __importStar(require("./tool-choice-function"));
|
|
162
|
+
exports.ToolChoiceFunction = ToolChoiceFunction.ToolChoiceFunction;
|
|
163
|
+
const ToolChoiceNone = __importStar(require("./tool-choice-none"));
|
|
164
|
+
exports.ToolChoiceNone = ToolChoiceNone.ToolChoiceNone;
|
|
165
|
+
const ToolChoiceRequired = __importStar(require("./tool-choice-required"));
|
|
166
|
+
exports.ToolChoiceRequired = ToolChoiceRequired.ToolChoiceRequired;
|
|
167
|
+
const ToolMessage = __importStar(require("./tool-message"));
|
|
168
|
+
exports.ToolMessage = ToolMessage.ToolMessage;
|
|
140
169
|
const UserMessage = __importStar(require("./user-message"));
|
|
141
170
|
exports.UserMessage = UserMessage.UserMessage;
|
|
142
171
|
//# sourceMappingURL=index.js.map
|
package/lib/model/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/index.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/index.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;;AAEH,qEAAuD;AACzC,QAAA,eAAe,GAAG,eAAe,CAAC,eAAe,CAAC;AAChE,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,iDAAmC;AACrB,QAAA,MAAM,GAAG,MAAM,CAAC,MAAM,CAAC;AACrC,qDAAuC;AACzB,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,yFAA2E;AAC7D,QAAA,yBAAyB,GAAG,yBAAyB,CAAC,yBAAyB,CAAC;AAC9F,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,mEAAqD;AACvC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,qDAAuC;AACzB,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,8DAAgD;AAClC,QAAA,YAAY,GAAG,YAAY,CAAC,YAAY,CAAC;AACvD,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,qEAAuD;AACzC,QAAA,eAAe,GAAG,eAAe,CAAC,eAAe,CAAC;AAChE,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,sDAAwC;AAC1B,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,qDAAuC;AACzB,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,mDAAqC;AACvB,QAAA,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC;AACxC,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,uEAAyD;AAC3C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,oEAAsD;AACxC,QAAA,eAAe,GAAG,eAAe,CAAC,eAAe,CAAC;AAChE,sDAAwC;AAC1B,QAAA,QAAQ,GAAG,QAAQ,CAAC,QAAQ,CAAC;AAC3C,0DAA4C;AAC9B,QAAA,UAAU,GAAG,UAAU,CAAC,UAAU,CAAC;AACjD,kEAAoD;AACtC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAE7D,sEAAwD;AAC1C,QAAA,gBAAgB,GAAG,gBAAgB,CAAC,gBAAgB,CAAC;AACnE,gFAAkE;AACpD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,0FAA4E;AAC9D,QAAA,yBAAyB,GAAG,yBAAyB,CAAC,yBAAyB,CAAC;AAC9F,4FAA8E;AAChE,QAAA,0BAA0B,GAAG,0BAA0B,CAAC,0BAA0B,CAAC;AACjG,wFAA0E;AAC5D,QAAA,wBAAwB,GAAG,wBAAwB,CAAC,wBAAwB,CAAC;AAC3F,wFAA0E;AAC5D,QAAA,wBAAwB,GAAG,wBAAwB,CAAC,wBAAwB,CAAC;AAC3F,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,yEAA2D;AAC7C,QAAA,iBAAiB,GAAG,iBAAiB,CAAC,iBAAiB,CAAC;AACtE,+EAAiE;AACnD,QAAA,oBAAoB,GAAG,oBAAoB,CAAC,oBAAoB,CAAC;AAC/E,8DAAgD;AAClC,QAAA,YAAY,GAAG,YAAY,CAAC,YAAY,CAAC;AACvD,0EAA4D;AAC9C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,6EAA+D;AACjD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,8DAAgD;AAClC,QAAA,YAAY,GAAG,YAAY,CAAC,YAAY,CAAC;AACvD,wFAA0E;AAC5D,QAAA,wBAAwB,GAAG,wBAAwB,CAAC,wBAAwB,CAAC;AAC3F,0FAA4E;AAC9D,QAAA,yBAAyB,GAAG,yBAAyB,CAAC,yBAAyB,CAAC;AAC9F,8EAAgE;AAClD,QAAA,mBAAmB,GAAG,mBAAmB,CAAC,mBAAmB,CAAC;AAC5E,gEAAkD;AACpC,QAAA,aAAa,GAAG,aAAa,CAAC,aAAa,CAAC;AAC1D,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,mEAAqD;AACvC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,mEAAqD;AACvC,QAAA,cAAc,GAAG,cAAc,CAAC,cAAc,CAAC;AAC7D,2EAA6D;AAC/C,QAAA,kBAAkB,GAAG,kBAAkB,CAAC,kBAAkB,CAAC;AACzE,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;AACpD,4DAA8C;AAChC,QAAA,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC"}
|
package/lib/model/message.js
CHANGED
|
@@ -59,6 +59,8 @@ var Message;
|
|
|
59
59
|
return model.AssistantMessage.getJsonObj(jsonObj, true);
|
|
60
60
|
case "USER":
|
|
61
61
|
return model.UserMessage.getJsonObj(jsonObj, true);
|
|
62
|
+
case "TOOL":
|
|
63
|
+
return model.ToolMessage.getJsonObj(jsonObj, true);
|
|
62
64
|
default:
|
|
63
65
|
if (common.LOG.logger)
|
|
64
66
|
common.LOG.logger.info(`Unknown value for: ${obj.role}`);
|
|
@@ -83,6 +85,8 @@ var Message;
|
|
|
83
85
|
return model.AssistantMessage.getDeserializedJsonObj(jsonObj, true);
|
|
84
86
|
case "USER":
|
|
85
87
|
return model.UserMessage.getDeserializedJsonObj(jsonObj, true);
|
|
88
|
+
case "TOOL":
|
|
89
|
+
return model.ToolMessage.getDeserializedJsonObj(jsonObj, true);
|
|
86
90
|
default:
|
|
87
91
|
if (common.LOG.logger)
|
|
88
92
|
common.LOG.logger.info(`Unknown value for: ${obj.role}`);
|
package/lib/model/message.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"message.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/message.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAClC,qCAAsC;AActC,IAAiB,OAAO,
|
|
1
|
+
{"version":3,"file":"message.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/message.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAClC,qCAAsC;AActC,IAAiB,OAAO,CAqEvB;AArED,WAAiB,OAAO;IACtB,SAAgB,UAAU,CAAC,GAAY;QACrC,MAAM,OAAO,mCACR,GAAG,GACH;YACD,SAAS,EAAE,GAAG,CAAC,OAAO;gBACpB,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACrB,OAAO,KAAK,CAAC,WAAW,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBAC5C,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,IAAI,GAAG,IAAI,MAAM,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,EAAE;YACpC,QAAQ,GAAG,CAAC,IAAI,EAAE;gBAChB,KAAK,QAAQ;oBACX,OAAO,KAAK,CAAC,aAAa,CAAC,UAAU,CAA+B,OAAQ,EAAE,IAAI,CAAC,CAAC;gBACtF,KAAK,WAAW;oBACd,OAAO,KAAK,CAAC,gBAAgB,CAAC,UAAU,CAAkC,OAAQ,EAAE,IAAI,CAAC,CAAC;gBAC5F,KAAK,MAAM;oBACT,OAAO,KAAK,CAAC,WAAW,CAAC,UAAU,CAA6B,OAAQ,EAAE,IAAI,CAAC,CAAC;gBAClF,KAAK,MAAM;oBACT,OAAO,KAAK,CAAC,WAAW,CAAC,UAAU,CAA6B,OAAQ,EAAE,IAAI,CAAC,CAAC;gBAClF;oBACE,IAAI,MAAM,CAAC,GAAG,CAAC,MAAM;wBAAE,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,sBAAsB,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;aACnF;SACF;QACD,OAAO,OAAO,CAAC;IACjB,CAAC;IA3Be,kBAAU,aA2BzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAY;QACjD,MAAM,OAAO,mCACR,GAAG,GACH;YACD,SAAS,EAAE,GAAG,CAAC,OAAO;gBACpB,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACrB,OAAO,KAAK,CAAC,WAAW,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBACxD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,IAAI,GAAG,IAAI,MAAM,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,EAAE;YACpC,QAAQ,GAAG,CAAC,IAAI,EAAE;gBAChB,KAAK,QAAQ;oBACX,OAAO,KAAK,CAAC,aAAa,CAAC,sBAAsB,CACjB,OAAQ,EACtC,IAAI,CACL,CAAC;gBACJ,KAAK,WAAW;oBACd,OAAO,KAAK,CAAC,gBAAgB,CAAC,sBAAsB,CACjB,OAAQ,EACzC,IAAI,CACL,CAAC;gBACJ,KAAK,MAAM;oBACT,OAAO,KAAK,CAAC,WAAW,CAAC,sBAAsB,CACjB,OAAQ,EACpC,IAAI,CACL,CAAC;gBACJ,KAAK,MAAM;oBACT,OAAO,KAAK,CAAC,WAAW,CAAC,sBAAsB,CACjB,OAAQ,EACpC,IAAI,CACL,CAAC;gBACJ;oBACE,IAAI,MAAM,CAAC,GAAG,CAAC,MAAM;wBAAE,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,sBAAsB,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;aACnF;SACF;QACD,OAAO,OAAO,CAAC;IACjB,CAAC;IAvCe,8BAAsB,yBAuCrC,CAAA;AACH,CAAC,EArEgB,OAAO,GAAP,eAAO,KAAP,eAAO,QAqEvB"}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
import * as model from "../model";
|
|
21
|
+
/**
|
|
22
|
+
* Details required for a rerank request.
|
|
23
|
+
*/
|
|
24
|
+
export interface RerankTextDetails {
|
|
25
|
+
/**
|
|
26
|
+
* Input query for search in the documents.
|
|
27
|
+
*/
|
|
28
|
+
"input": string;
|
|
29
|
+
/**
|
|
30
|
+
* The OCID of the compartment to call into the Generative AI service LLMs.
|
|
31
|
+
*/
|
|
32
|
+
"compartmentId": string;
|
|
33
|
+
"servingMode": model.DedicatedServingMode | model.OnDemandServingMode;
|
|
34
|
+
/**
|
|
35
|
+
* A list of document strings to rerank based on the query asked.
|
|
36
|
+
*/
|
|
37
|
+
"documents": Array<string>;
|
|
38
|
+
/**
|
|
39
|
+
* The number of most relevant documents or indices to return. Defaults to the length of the documents. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
40
|
+
*/
|
|
41
|
+
"topN"?: number;
|
|
42
|
+
/**
|
|
43
|
+
* Whether or not to return the documents in the response.
|
|
44
|
+
*/
|
|
45
|
+
"isEcho"?: boolean;
|
|
46
|
+
/**
|
|
47
|
+
* The maximum number of chunks to produce internally from a document. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
48
|
+
*/
|
|
49
|
+
"maxChunksPerDocument"?: number;
|
|
50
|
+
}
|
|
51
|
+
export declare namespace RerankTextDetails {
|
|
52
|
+
function getJsonObj(obj: RerankTextDetails): object;
|
|
53
|
+
function getDeserializedJsonObj(obj: RerankTextDetails): object;
|
|
54
|
+
}
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
22
|
+
if (k2 === undefined) k2 = k;
|
|
23
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
24
|
+
}) : (function(o, m, k, k2) {
|
|
25
|
+
if (k2 === undefined) k2 = k;
|
|
26
|
+
o[k2] = m[k];
|
|
27
|
+
}));
|
|
28
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
29
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
30
|
+
}) : function(o, v) {
|
|
31
|
+
o["default"] = v;
|
|
32
|
+
});
|
|
33
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
34
|
+
if (mod && mod.__esModule) return mod;
|
|
35
|
+
var result = {};
|
|
36
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
37
|
+
__setModuleDefault(result, mod);
|
|
38
|
+
return result;
|
|
39
|
+
};
|
|
40
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
+
exports.RerankTextDetails = void 0;
|
|
42
|
+
const model = __importStar(require("../model"));
|
|
43
|
+
var RerankTextDetails;
|
|
44
|
+
(function (RerankTextDetails) {
|
|
45
|
+
function getJsonObj(obj) {
|
|
46
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
47
|
+
"servingMode": obj.servingMode ? model.ServingMode.getJsonObj(obj.servingMode) : undefined
|
|
48
|
+
});
|
|
49
|
+
return jsonObj;
|
|
50
|
+
}
|
|
51
|
+
RerankTextDetails.getJsonObj = getJsonObj;
|
|
52
|
+
function getDeserializedJsonObj(obj) {
|
|
53
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
54
|
+
"servingMode": obj.servingMode
|
|
55
|
+
? model.ServingMode.getDeserializedJsonObj(obj.servingMode)
|
|
56
|
+
: undefined
|
|
57
|
+
});
|
|
58
|
+
return jsonObj;
|
|
59
|
+
}
|
|
60
|
+
RerankTextDetails.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
61
|
+
})(RerankTextDetails = exports.RerankTextDetails || (exports.RerankTextDetails = {}));
|
|
62
|
+
//# sourceMappingURL=rerank-text-details.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"rerank-text-details.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/rerank-text-details.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAkClC,IAAiB,iBAAiB,CAuBjC;AAvBD,WAAiB,iBAAiB;IAChC,SAAgB,UAAU,CAAC,GAAsB;QAC/C,MAAM,OAAO,mCACR,GAAG,GACH;YACD,aAAa,EAAE,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,WAAW,CAAC,UAAU,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,SAAS;SAC3F,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IATe,4BAAU,aASzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAsB;QAC3D,MAAM,OAAO,mCACR,GAAG,GACH;YACD,aAAa,EAAE,GAAG,CAAC,WAAW;gBAC5B,CAAC,CAAC,KAAK,CAAC,WAAW,CAAC,sBAAsB,CAAC,GAAG,CAAC,WAAW,CAAC;gBAC3D,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAXe,wCAAsB,yBAWrC,CAAA;AACH,CAAC,EAvBgB,iBAAiB,GAAjB,yBAAiB,KAAjB,yBAAiB,QAuBjC"}
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
import * as model from "../model";
|
|
21
|
+
/**
|
|
22
|
+
* The rerank response to return to the caller.
|
|
23
|
+
*/
|
|
24
|
+
export interface RerankTextResult {
|
|
25
|
+
/**
|
|
26
|
+
* A unique identifier for this {@code RerankResult}.
|
|
27
|
+
*/
|
|
28
|
+
"id": string;
|
|
29
|
+
/**
|
|
30
|
+
* The OCID of the model used in the rerank request.
|
|
31
|
+
*/
|
|
32
|
+
"modelId"?: string;
|
|
33
|
+
/**
|
|
34
|
+
* The version of the model.
|
|
35
|
+
*/
|
|
36
|
+
"modelVersion"?: string;
|
|
37
|
+
/**
|
|
38
|
+
* Top n documents with their index and relevance score.
|
|
39
|
+
*/
|
|
40
|
+
"documentRanks": Array<model.DocumentRank>;
|
|
41
|
+
}
|
|
42
|
+
export declare namespace RerankTextResult {
|
|
43
|
+
function getJsonObj(obj: RerankTextResult): object;
|
|
44
|
+
function getDeserializedJsonObj(obj: RerankTextResult): object;
|
|
45
|
+
}
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
22
|
+
if (k2 === undefined) k2 = k;
|
|
23
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
24
|
+
}) : (function(o, m, k, k2) {
|
|
25
|
+
if (k2 === undefined) k2 = k;
|
|
26
|
+
o[k2] = m[k];
|
|
27
|
+
}));
|
|
28
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
29
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
30
|
+
}) : function(o, v) {
|
|
31
|
+
o["default"] = v;
|
|
32
|
+
});
|
|
33
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
34
|
+
if (mod && mod.__esModule) return mod;
|
|
35
|
+
var result = {};
|
|
36
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
37
|
+
__setModuleDefault(result, mod);
|
|
38
|
+
return result;
|
|
39
|
+
};
|
|
40
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
+
exports.RerankTextResult = void 0;
|
|
42
|
+
const model = __importStar(require("../model"));
|
|
43
|
+
var RerankTextResult;
|
|
44
|
+
(function (RerankTextResult) {
|
|
45
|
+
function getJsonObj(obj) {
|
|
46
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
47
|
+
"documentRanks": obj.documentRanks
|
|
48
|
+
? obj.documentRanks.map(item => {
|
|
49
|
+
return model.DocumentRank.getJsonObj(item);
|
|
50
|
+
})
|
|
51
|
+
: undefined
|
|
52
|
+
});
|
|
53
|
+
return jsonObj;
|
|
54
|
+
}
|
|
55
|
+
RerankTextResult.getJsonObj = getJsonObj;
|
|
56
|
+
function getDeserializedJsonObj(obj) {
|
|
57
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
58
|
+
"documentRanks": obj.documentRanks
|
|
59
|
+
? obj.documentRanks.map(item => {
|
|
60
|
+
return model.DocumentRank.getDeserializedJsonObj(item);
|
|
61
|
+
})
|
|
62
|
+
: undefined
|
|
63
|
+
});
|
|
64
|
+
return jsonObj;
|
|
65
|
+
}
|
|
66
|
+
RerankTextResult.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
67
|
+
})(RerankTextResult = exports.RerankTextResult || (exports.RerankTextResult = {}));
|
|
68
|
+
//# sourceMappingURL=rerank-text-result.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"rerank-text-result.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/rerank-text-result.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAyBlC,IAAiB,gBAAgB,CA6BhC;AA7BD,WAAiB,gBAAgB;IAC/B,SAAgB,UAAU,CAAC,GAAqB;QAC9C,MAAM,OAAO,mCACR,GAAG,GACH;YACD,eAAe,EAAE,GAAG,CAAC,aAAa;gBAChC,CAAC,CAAC,GAAG,CAAC,aAAa,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBAC3B,OAAO,KAAK,CAAC,YAAY,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBAC7C,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAbe,2BAAU,aAazB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAqB;QAC1D,MAAM,OAAO,mCACR,GAAG,GACH;YACD,eAAe,EAAE,GAAG,CAAC,aAAa;gBAChC,CAAC,CAAC,GAAG,CAAC,aAAa,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBAC3B,OAAO,KAAK,CAAC,YAAY,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBACzD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAbe,uCAAsB,yBAarC,CAAA;AACH,CAAC,EA7BgB,gBAAgB,GAAhB,wBAAgB,KAAhB,wBAAgB,QA6BhC"}
|