oci-generativeaiinference 2.101.0 → 2.102.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/lib/client.d.ts +12 -0
  2. package/lib/client.js +66 -0
  3. package/lib/client.js.map +1 -1
  4. package/lib/model/assistant-message.d.ts +4 -0
  5. package/lib/model/assistant-message.js +14 -2
  6. package/lib/model/assistant-message.js.map +1 -1
  7. package/lib/model/chat-choice.d.ts +1 -1
  8. package/lib/model/cohere-chat-request.d.ts +1 -1
  9. package/lib/model/document-rank.d.ts +38 -0
  10. package/lib/model/document-rank.js +60 -0
  11. package/lib/model/document-rank.js.map +1 -0
  12. package/lib/model/document.d.ts +32 -0
  13. package/lib/model/document.js +36 -0
  14. package/lib/model/document.js.map +1 -0
  15. package/lib/model/embed-text-details.d.ts +3 -2
  16. package/lib/model/embed-text-details.js +1 -0
  17. package/lib/model/embed-text-details.js.map +1 -1
  18. package/lib/model/function-call.d.ts +39 -0
  19. package/lib/model/function-call.js +57 -0
  20. package/lib/model/function-call.js.map +1 -0
  21. package/lib/model/function-definition.d.ts +43 -0
  22. package/lib/model/function-definition.js +59 -0
  23. package/lib/model/function-definition.js.map +1 -0
  24. package/lib/model/generic-chat-request.d.ts +10 -0
  25. package/lib/model/generic-chat-request.js +14 -0
  26. package/lib/model/generic-chat-request.js.map +1 -1
  27. package/lib/model/image-url.d.ts +13 -4
  28. package/lib/model/image-url.js.map +1 -1
  29. package/lib/model/index.d.ts +28 -0
  30. package/lib/model/index.js +30 -1
  31. package/lib/model/index.js.map +1 -1
  32. package/lib/model/message.js +4 -0
  33. package/lib/model/message.js.map +1 -1
  34. package/lib/model/rerank-text-details.d.ts +54 -0
  35. package/lib/model/rerank-text-details.js +62 -0
  36. package/lib/model/rerank-text-details.js.map +1 -0
  37. package/lib/model/rerank-text-result.d.ts +45 -0
  38. package/lib/model/rerank-text-result.js +68 -0
  39. package/lib/model/rerank-text-result.js.map +1 -0
  40. package/lib/model/tool-call.d.ts +33 -0
  41. package/lib/model/tool-call.js +75 -0
  42. package/lib/model/tool-call.js.map +1 -0
  43. package/lib/model/tool-choice-auto.d.ts +31 -0
  44. package/lib/model/tool-choice-auto.js +57 -0
  45. package/lib/model/tool-choice-auto.js.map +1 -0
  46. package/lib/model/tool-choice-function.d.ts +35 -0
  47. package/lib/model/tool-choice-function.js +59 -0
  48. package/lib/model/tool-choice-function.js.map +1 -0
  49. package/lib/model/tool-choice-none.d.ts +31 -0
  50. package/lib/model/tool-choice-none.js +57 -0
  51. package/lib/model/tool-choice-none.js.map +1 -0
  52. package/lib/model/tool-choice-required.d.ts +31 -0
  53. package/lib/model/tool-choice-required.js +59 -0
  54. package/lib/model/tool-choice-required.js.map +1 -0
  55. package/lib/model/tool-choice.d.ts +29 -0
  56. package/lib/model/tool-choice.js +87 -0
  57. package/lib/model/tool-choice.js.map +1 -0
  58. package/lib/model/tool-definition.d.ts +29 -0
  59. package/lib/model/tool-definition.js +75 -0
  60. package/lib/model/tool-definition.js.map +1 -0
  61. package/lib/model/tool-message.d.ts +35 -0
  62. package/lib/model/tool-message.js +57 -0
  63. package/lib/model/tool-message.js.map +1 -0
  64. package/lib/request/index.d.ts +2 -0
  65. package/lib/request/rerank-text-request.d.ts +35 -0
  66. package/lib/request/rerank-text-request.js +15 -0
  67. package/lib/request/rerank-text-request.js.map +1 -0
  68. package/lib/response/index.d.ts +2 -0
  69. package/lib/response/rerank-text-response.d.ts +30 -0
  70. package/lib/response/rerank-text-response.js +15 -0
  71. package/lib/response/rerank-text-response.js.map +1 -0
  72. package/package.json +3 -3
@@ -0,0 +1,33 @@
1
+ /**
2
+ * Generative AI Service Inference API
3
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
+
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+
7
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
8
+
9
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
10
+
11
+ * OpenAPI spec version: 20231130
12
+ *
13
+ *
14
+ * NOTE: This class is auto generated by OracleSDKGenerator.
15
+ * Do not edit the class manually.
16
+ *
17
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
18
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
19
+ */
20
+ /**
21
+ * The tool call generated by the model, such as function call.
22
+ */
23
+ export interface ToolCall {
24
+ /**
25
+ * The ID of the tool call.
26
+ */
27
+ "id": string;
28
+ "type": string;
29
+ }
30
+ export declare namespace ToolCall {
31
+ function getJsonObj(obj: ToolCall): object;
32
+ function getDeserializedJsonObj(obj: ToolCall): object;
33
+ }
@@ -0,0 +1,75 @@
1
+ "use strict";
2
+ /**
3
+ * Generative AI Service Inference API
4
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
+
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
+
8
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
9
+
10
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
11
+
12
+ * OpenAPI spec version: 20231130
13
+ *
14
+ *
15
+ * NOTE: This class is auto generated by OracleSDKGenerator.
16
+ * Do not edit the class manually.
17
+ *
18
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
19
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
20
+ */
21
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
22
+ if (k2 === undefined) k2 = k;
23
+ Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
24
+ }) : (function(o, m, k, k2) {
25
+ if (k2 === undefined) k2 = k;
26
+ o[k2] = m[k];
27
+ }));
28
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
29
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
30
+ }) : function(o, v) {
31
+ o["default"] = v;
32
+ });
33
+ var __importStar = (this && this.__importStar) || function (mod) {
34
+ if (mod && mod.__esModule) return mod;
35
+ var result = {};
36
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
37
+ __setModuleDefault(result, mod);
38
+ return result;
39
+ };
40
+ Object.defineProperty(exports, "__esModule", { value: true });
41
+ exports.ToolCall = void 0;
42
+ const model = __importStar(require("../model"));
43
+ const common = require("oci-common");
44
+ var ToolCall;
45
+ (function (ToolCall) {
46
+ function getJsonObj(obj) {
47
+ const jsonObj = Object.assign(Object.assign({}, obj), {});
48
+ if (obj && "type" in obj && obj.type) {
49
+ switch (obj.type) {
50
+ case "FUNCTION":
51
+ return model.FunctionCall.getJsonObj(jsonObj, true);
52
+ default:
53
+ if (common.LOG.logger)
54
+ common.LOG.logger.info(`Unknown value for: ${obj.type}`);
55
+ }
56
+ }
57
+ return jsonObj;
58
+ }
59
+ ToolCall.getJsonObj = getJsonObj;
60
+ function getDeserializedJsonObj(obj) {
61
+ const jsonObj = Object.assign(Object.assign({}, obj), {});
62
+ if (obj && "type" in obj && obj.type) {
63
+ switch (obj.type) {
64
+ case "FUNCTION":
65
+ return model.FunctionCall.getDeserializedJsonObj(jsonObj, true);
66
+ default:
67
+ if (common.LOG.logger)
68
+ common.LOG.logger.info(`Unknown value for: ${obj.type}`);
69
+ }
70
+ }
71
+ return jsonObj;
72
+ }
73
+ ToolCall.getDeserializedJsonObj = getDeserializedJsonObj;
74
+ })(ToolCall = exports.ToolCall || (exports.ToolCall = {}));
75
+ //# sourceMappingURL=tool-call.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"tool-call.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/tool-call.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAClC,qCAAsC;AActC,IAAiB,QAAQ,CA8BxB;AA9BD,WAAiB,QAAQ;IACvB,SAAgB,UAAU,CAAC,GAAa;QACtC,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,IAAI,GAAG,IAAI,MAAM,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,EAAE;YACpC,QAAQ,GAAG,CAAC,IAAI,EAAE;gBAChB,KAAK,UAAU;oBACb,OAAO,KAAK,CAAC,YAAY,CAAC,UAAU,CAA8B,OAAQ,EAAE,IAAI,CAAC,CAAC;gBACpF;oBACE,IAAI,MAAM,CAAC,GAAG,CAAC,MAAM;wBAAE,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,sBAAsB,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;aACnF;SACF;QACD,OAAO,OAAO,CAAC;IACjB,CAAC;IAZe,mBAAU,aAYzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAa;QAClD,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,IAAI,GAAG,IAAI,MAAM,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,EAAE;YACpC,QAAQ,GAAG,CAAC,IAAI,EAAE;gBAChB,KAAK,UAAU;oBACb,OAAO,KAAK,CAAC,YAAY,CAAC,sBAAsB,CACjB,OAAQ,EACrC,IAAI,CACL,CAAC;gBACJ;oBACE,IAAI,MAAM,CAAC,GAAG,CAAC,MAAM;wBAAE,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,sBAAsB,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;aACnF;SACF;QACD,OAAO,OAAO,CAAC;IACjB,CAAC;IAfe,+BAAsB,yBAerC,CAAA;AACH,CAAC,EA9BgB,QAAQ,GAAR,gBAAQ,KAAR,gBAAQ,QA8BxB"}
@@ -0,0 +1,31 @@
1
+ /**
2
+ * Generative AI Service Inference API
3
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
+
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+
7
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
8
+
9
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
10
+
11
+ * OpenAPI spec version: 20231130
12
+ *
13
+ *
14
+ * NOTE: This class is auto generated by OracleSDKGenerator.
15
+ * Do not edit the class manually.
16
+ *
17
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
18
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
19
+ */
20
+ import * as model from "../model";
21
+ /**
22
+ * The model can pick between generating a message or calling one or more tools.
23
+ */
24
+ export interface ToolChoiceAuto extends model.ToolChoice {
25
+ "type": string;
26
+ }
27
+ export declare namespace ToolChoiceAuto {
28
+ function getJsonObj(obj: ToolChoiceAuto, isParentJsonObj?: boolean): object;
29
+ const type = "AUTO";
30
+ function getDeserializedJsonObj(obj: ToolChoiceAuto, isParentJsonObj?: boolean): object;
31
+ }
@@ -0,0 +1,57 @@
1
+ "use strict";
2
+ /**
3
+ * Generative AI Service Inference API
4
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
+
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
+
8
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
9
+
10
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
11
+
12
+ * OpenAPI spec version: 20231130
13
+ *
14
+ *
15
+ * NOTE: This class is auto generated by OracleSDKGenerator.
16
+ * Do not edit the class manually.
17
+ *
18
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
19
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
20
+ */
21
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
22
+ if (k2 === undefined) k2 = k;
23
+ Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
24
+ }) : (function(o, m, k, k2) {
25
+ if (k2 === undefined) k2 = k;
26
+ o[k2] = m[k];
27
+ }));
28
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
29
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
30
+ }) : function(o, v) {
31
+ o["default"] = v;
32
+ });
33
+ var __importStar = (this && this.__importStar) || function (mod) {
34
+ if (mod && mod.__esModule) return mod;
35
+ var result = {};
36
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
37
+ __setModuleDefault(result, mod);
38
+ return result;
39
+ };
40
+ Object.defineProperty(exports, "__esModule", { value: true });
41
+ exports.ToolChoiceAuto = void 0;
42
+ const model = __importStar(require("../model"));
43
+ var ToolChoiceAuto;
44
+ (function (ToolChoiceAuto) {
45
+ function getJsonObj(obj, isParentJsonObj) {
46
+ const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.ToolChoice.getJsonObj(obj))), {});
47
+ return jsonObj;
48
+ }
49
+ ToolChoiceAuto.getJsonObj = getJsonObj;
50
+ ToolChoiceAuto.type = "AUTO";
51
+ function getDeserializedJsonObj(obj, isParentJsonObj) {
52
+ const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.ToolChoice.getDeserializedJsonObj(obj))), {});
53
+ return jsonObj;
54
+ }
55
+ ToolChoiceAuto.getDeserializedJsonObj = getDeserializedJsonObj;
56
+ })(ToolChoiceAuto = exports.ToolChoiceAuto || (exports.ToolChoiceAuto = {}));
57
+ //# sourceMappingURL=tool-choice-auto.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"tool-choice-auto.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/tool-choice-auto.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAUlC,IAAiB,cAAc,CAkB9B;AAlBD,WAAiB,cAAc;IAC7B,SAAgB,UAAU,CAAC,GAAmB,EAAE,eAAyB;QACvE,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC,GAAG,CAAoB,CAAC,GAC9E,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAPe,yBAAU,aAOzB,CAAA;IACY,mBAAI,GAAG,MAAM,CAAC;IAC3B,SAAgB,sBAAsB,CAAC,GAAmB,EAAE,eAAyB;QACnF,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,UAAU,CAAC,sBAAsB,CAAC,GAAG,CAAoB,CAAC,GAC1F,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAPe,qCAAsB,yBAOrC,CAAA;AACH,CAAC,EAlBgB,cAAc,GAAd,sBAAc,KAAd,sBAAc,QAkB9B"}
@@ -0,0 +1,35 @@
1
+ /**
2
+ * Generative AI Service Inference API
3
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
+
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+
7
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
8
+
9
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
10
+
11
+ * OpenAPI spec version: 20231130
12
+ *
13
+ *
14
+ * NOTE: This class is auto generated by OracleSDKGenerator.
15
+ * Do not edit the class manually.
16
+ *
17
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
18
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
19
+ */
20
+ import * as model from "../model";
21
+ /**
22
+ * The tool choice for a function.
23
+ */
24
+ export interface ToolChoiceFunction extends model.ToolChoice {
25
+ /**
26
+ * The function name.
27
+ */
28
+ "name"?: string;
29
+ "type": string;
30
+ }
31
+ export declare namespace ToolChoiceFunction {
32
+ function getJsonObj(obj: ToolChoiceFunction, isParentJsonObj?: boolean): object;
33
+ const type = "FUNCTION";
34
+ function getDeserializedJsonObj(obj: ToolChoiceFunction, isParentJsonObj?: boolean): object;
35
+ }
@@ -0,0 +1,59 @@
1
+ "use strict";
2
+ /**
3
+ * Generative AI Service Inference API
4
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
+
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
+
8
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
9
+
10
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
11
+
12
+ * OpenAPI spec version: 20231130
13
+ *
14
+ *
15
+ * NOTE: This class is auto generated by OracleSDKGenerator.
16
+ * Do not edit the class manually.
17
+ *
18
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
19
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
20
+ */
21
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
22
+ if (k2 === undefined) k2 = k;
23
+ Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
24
+ }) : (function(o, m, k, k2) {
25
+ if (k2 === undefined) k2 = k;
26
+ o[k2] = m[k];
27
+ }));
28
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
29
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
30
+ }) : function(o, v) {
31
+ o["default"] = v;
32
+ });
33
+ var __importStar = (this && this.__importStar) || function (mod) {
34
+ if (mod && mod.__esModule) return mod;
35
+ var result = {};
36
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
37
+ __setModuleDefault(result, mod);
38
+ return result;
39
+ };
40
+ Object.defineProperty(exports, "__esModule", { value: true });
41
+ exports.ToolChoiceFunction = void 0;
42
+ const model = __importStar(require("../model"));
43
+ var ToolChoiceFunction;
44
+ (function (ToolChoiceFunction) {
45
+ function getJsonObj(obj, isParentJsonObj) {
46
+ const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.ToolChoice.getJsonObj(obj))), {});
47
+ return jsonObj;
48
+ }
49
+ ToolChoiceFunction.getJsonObj = getJsonObj;
50
+ ToolChoiceFunction.type = "FUNCTION";
51
+ function getDeserializedJsonObj(obj, isParentJsonObj) {
52
+ const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj
53
+ ? obj
54
+ : model.ToolChoice.getDeserializedJsonObj(obj))), {});
55
+ return jsonObj;
56
+ }
57
+ ToolChoiceFunction.getDeserializedJsonObj = getDeserializedJsonObj;
58
+ })(ToolChoiceFunction = exports.ToolChoiceFunction || (exports.ToolChoiceFunction = {}));
59
+ //# sourceMappingURL=tool-choice-function.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"tool-choice-function.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/tool-choice-function.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAelC,IAAiB,kBAAkB,CAuBlC;AAvBD,WAAiB,kBAAkB;IACjC,SAAgB,UAAU,CAAC,GAAuB,EAAE,eAAyB;QAC3E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC,GAAG,CAAwB,CAAC,GAClF,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAPe,6BAAU,aAOzB,CAAA;IACY,uBAAI,GAAG,UAAU,CAAC;IAC/B,SAAgB,sBAAsB,CACpC,GAAuB,EACvB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,UAAU,CAAC,sBAAsB,CAAC,GAAG,CAAwB,CAAC,GACtE,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAZe,yCAAsB,yBAYrC,CAAA;AACH,CAAC,EAvBgB,kBAAkB,GAAlB,0BAAkB,KAAlB,0BAAkB,QAuBlC"}
@@ -0,0 +1,31 @@
1
+ /**
2
+ * Generative AI Service Inference API
3
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
+
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+
7
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
8
+
9
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
10
+
11
+ * OpenAPI spec version: 20231130
12
+ *
13
+ *
14
+ * NOTE: This class is auto generated by OracleSDKGenerator.
15
+ * Do not edit the class manually.
16
+ *
17
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
18
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
19
+ */
20
+ import * as model from "../model";
21
+ /**
22
+ * The model will not call any tool and instead generates a message.
23
+ */
24
+ export interface ToolChoiceNone extends model.ToolChoice {
25
+ "type": string;
26
+ }
27
+ export declare namespace ToolChoiceNone {
28
+ function getJsonObj(obj: ToolChoiceNone, isParentJsonObj?: boolean): object;
29
+ const type = "NONE";
30
+ function getDeserializedJsonObj(obj: ToolChoiceNone, isParentJsonObj?: boolean): object;
31
+ }
@@ -0,0 +1,57 @@
1
+ "use strict";
2
+ /**
3
+ * Generative AI Service Inference API
4
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
+
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
+
8
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
9
+
10
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
11
+
12
+ * OpenAPI spec version: 20231130
13
+ *
14
+ *
15
+ * NOTE: This class is auto generated by OracleSDKGenerator.
16
+ * Do not edit the class manually.
17
+ *
18
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
19
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
20
+ */
21
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
22
+ if (k2 === undefined) k2 = k;
23
+ Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
24
+ }) : (function(o, m, k, k2) {
25
+ if (k2 === undefined) k2 = k;
26
+ o[k2] = m[k];
27
+ }));
28
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
29
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
30
+ }) : function(o, v) {
31
+ o["default"] = v;
32
+ });
33
+ var __importStar = (this && this.__importStar) || function (mod) {
34
+ if (mod && mod.__esModule) return mod;
35
+ var result = {};
36
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
37
+ __setModuleDefault(result, mod);
38
+ return result;
39
+ };
40
+ Object.defineProperty(exports, "__esModule", { value: true });
41
+ exports.ToolChoiceNone = void 0;
42
+ const model = __importStar(require("../model"));
43
+ var ToolChoiceNone;
44
+ (function (ToolChoiceNone) {
45
+ function getJsonObj(obj, isParentJsonObj) {
46
+ const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.ToolChoice.getJsonObj(obj))), {});
47
+ return jsonObj;
48
+ }
49
+ ToolChoiceNone.getJsonObj = getJsonObj;
50
+ ToolChoiceNone.type = "NONE";
51
+ function getDeserializedJsonObj(obj, isParentJsonObj) {
52
+ const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.ToolChoice.getDeserializedJsonObj(obj))), {});
53
+ return jsonObj;
54
+ }
55
+ ToolChoiceNone.getDeserializedJsonObj = getDeserializedJsonObj;
56
+ })(ToolChoiceNone = exports.ToolChoiceNone || (exports.ToolChoiceNone = {}));
57
+ //# sourceMappingURL=tool-choice-none.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"tool-choice-none.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/tool-choice-none.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAUlC,IAAiB,cAAc,CAkB9B;AAlBD,WAAiB,cAAc;IAC7B,SAAgB,UAAU,CAAC,GAAmB,EAAE,eAAyB;QACvE,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC,GAAG,CAAoB,CAAC,GAC9E,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAPe,yBAAU,aAOzB,CAAA;IACY,mBAAI,GAAG,MAAM,CAAC;IAC3B,SAAgB,sBAAsB,CAAC,GAAmB,EAAE,eAAyB;QACnF,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,UAAU,CAAC,sBAAsB,CAAC,GAAG,CAAoB,CAAC,GAC1F,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAPe,qCAAsB,yBAOrC,CAAA;AACH,CAAC,EAlBgB,cAAc,GAAd,sBAAc,KAAd,sBAAc,QAkB9B"}
@@ -0,0 +1,31 @@
1
+ /**
2
+ * Generative AI Service Inference API
3
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
+
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+
7
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
8
+
9
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
10
+
11
+ * OpenAPI spec version: 20231130
12
+ *
13
+ *
14
+ * NOTE: This class is auto generated by OracleSDKGenerator.
15
+ * Do not edit the class manually.
16
+ *
17
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
18
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
19
+ */
20
+ import * as model from "../model";
21
+ /**
22
+ * The model must call one or more tools.
23
+ */
24
+ export interface ToolChoiceRequired extends model.ToolChoice {
25
+ "type": string;
26
+ }
27
+ export declare namespace ToolChoiceRequired {
28
+ function getJsonObj(obj: ToolChoiceRequired, isParentJsonObj?: boolean): object;
29
+ const type = "REQUIRED";
30
+ function getDeserializedJsonObj(obj: ToolChoiceRequired, isParentJsonObj?: boolean): object;
31
+ }
@@ -0,0 +1,59 @@
1
+ "use strict";
2
+ /**
3
+ * Generative AI Service Inference API
4
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
+
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
+
8
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
9
+
10
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
11
+
12
+ * OpenAPI spec version: 20231130
13
+ *
14
+ *
15
+ * NOTE: This class is auto generated by OracleSDKGenerator.
16
+ * Do not edit the class manually.
17
+ *
18
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
19
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
20
+ */
21
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
22
+ if (k2 === undefined) k2 = k;
23
+ Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
24
+ }) : (function(o, m, k, k2) {
25
+ if (k2 === undefined) k2 = k;
26
+ o[k2] = m[k];
27
+ }));
28
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
29
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
30
+ }) : function(o, v) {
31
+ o["default"] = v;
32
+ });
33
+ var __importStar = (this && this.__importStar) || function (mod) {
34
+ if (mod && mod.__esModule) return mod;
35
+ var result = {};
36
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
37
+ __setModuleDefault(result, mod);
38
+ return result;
39
+ };
40
+ Object.defineProperty(exports, "__esModule", { value: true });
41
+ exports.ToolChoiceRequired = void 0;
42
+ const model = __importStar(require("../model"));
43
+ var ToolChoiceRequired;
44
+ (function (ToolChoiceRequired) {
45
+ function getJsonObj(obj, isParentJsonObj) {
46
+ const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.ToolChoice.getJsonObj(obj))), {});
47
+ return jsonObj;
48
+ }
49
+ ToolChoiceRequired.getJsonObj = getJsonObj;
50
+ ToolChoiceRequired.type = "REQUIRED";
51
+ function getDeserializedJsonObj(obj, isParentJsonObj) {
52
+ const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj
53
+ ? obj
54
+ : model.ToolChoice.getDeserializedJsonObj(obj))), {});
55
+ return jsonObj;
56
+ }
57
+ ToolChoiceRequired.getDeserializedJsonObj = getDeserializedJsonObj;
58
+ })(ToolChoiceRequired = exports.ToolChoiceRequired || (exports.ToolChoiceRequired = {}));
59
+ //# sourceMappingURL=tool-choice-required.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"tool-choice-required.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/tool-choice-required.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAUlC,IAAiB,kBAAkB,CAuBlC;AAvBD,WAAiB,kBAAkB;IACjC,SAAgB,UAAU,CAAC,GAAuB,EAAE,eAAyB;QAC3E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC,GAAG,CAAwB,CAAC,GAClF,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAPe,6BAAU,aAOzB,CAAA;IACY,uBAAI,GAAG,UAAU,CAAC;IAC/B,SAAgB,sBAAsB,CACpC,GAAuB,EACvB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,UAAU,CAAC,sBAAsB,CAAC,GAAG,CAAwB,CAAC,GACtE,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAZe,yCAAsB,yBAYrC,CAAA;AACH,CAAC,EAvBgB,kBAAkB,GAAlB,0BAAkB,KAAlB,0BAAkB,QAuBlC"}
@@ -0,0 +1,29 @@
1
+ /**
2
+ * Generative AI Service Inference API
3
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
+
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+
7
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
8
+
9
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
10
+
11
+ * OpenAPI spec version: 20231130
12
+ *
13
+ *
14
+ * NOTE: This class is auto generated by OracleSDKGenerator.
15
+ * Do not edit the class manually.
16
+ *
17
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
18
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
19
+ */
20
+ /**
21
+ * The tool choice for a tool.
22
+ */
23
+ export interface ToolChoice {
24
+ "type": string;
25
+ }
26
+ export declare namespace ToolChoice {
27
+ function getJsonObj(obj: ToolChoice): object;
28
+ function getDeserializedJsonObj(obj: ToolChoice): object;
29
+ }
@@ -0,0 +1,87 @@
1
+ "use strict";
2
+ /**
3
+ * Generative AI Service Inference API
4
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
+
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/EN/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/EN/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/EN/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/EN/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
+
8
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](#/EN/generative-ai/latest/) to [create a custom model](#/EN/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/EN/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](#/EN/generative-ai/latest/).
9
+
10
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
11
+
12
+ * OpenAPI spec version: 20231130
13
+ *
14
+ *
15
+ * NOTE: This class is auto generated by OracleSDKGenerator.
16
+ * Do not edit the class manually.
17
+ *
18
+ * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
19
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
20
+ */
21
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
22
+ if (k2 === undefined) k2 = k;
23
+ Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
24
+ }) : (function(o, m, k, k2) {
25
+ if (k2 === undefined) k2 = k;
26
+ o[k2] = m[k];
27
+ }));
28
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
29
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
30
+ }) : function(o, v) {
31
+ o["default"] = v;
32
+ });
33
+ var __importStar = (this && this.__importStar) || function (mod) {
34
+ if (mod && mod.__esModule) return mod;
35
+ var result = {};
36
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
37
+ __setModuleDefault(result, mod);
38
+ return result;
39
+ };
40
+ Object.defineProperty(exports, "__esModule", { value: true });
41
+ exports.ToolChoice = void 0;
42
+ const model = __importStar(require("../model"));
43
+ const common = require("oci-common");
44
+ var ToolChoice;
45
+ (function (ToolChoice) {
46
+ function getJsonObj(obj) {
47
+ const jsonObj = Object.assign(Object.assign({}, obj), {});
48
+ if (obj && "type" in obj && obj.type) {
49
+ switch (obj.type) {
50
+ case "FUNCTION":
51
+ return model.ToolChoiceFunction.getJsonObj(jsonObj, true);
52
+ case "NONE":
53
+ return model.ToolChoiceNone.getJsonObj(jsonObj, true);
54
+ case "AUTO":
55
+ return model.ToolChoiceAuto.getJsonObj(jsonObj, true);
56
+ case "REQUIRED":
57
+ return model.ToolChoiceRequired.getJsonObj(jsonObj, true);
58
+ default:
59
+ if (common.LOG.logger)
60
+ common.LOG.logger.info(`Unknown value for: ${obj.type}`);
61
+ }
62
+ }
63
+ return jsonObj;
64
+ }
65
+ ToolChoice.getJsonObj = getJsonObj;
66
+ function getDeserializedJsonObj(obj) {
67
+ const jsonObj = Object.assign(Object.assign({}, obj), {});
68
+ if (obj && "type" in obj && obj.type) {
69
+ switch (obj.type) {
70
+ case "FUNCTION":
71
+ return model.ToolChoiceFunction.getDeserializedJsonObj(jsonObj, true);
72
+ case "NONE":
73
+ return model.ToolChoiceNone.getDeserializedJsonObj(jsonObj, true);
74
+ case "AUTO":
75
+ return model.ToolChoiceAuto.getDeserializedJsonObj(jsonObj, true);
76
+ case "REQUIRED":
77
+ return model.ToolChoiceRequired.getDeserializedJsonObj(jsonObj, true);
78
+ default:
79
+ if (common.LOG.logger)
80
+ common.LOG.logger.info(`Unknown value for: ${obj.type}`);
81
+ }
82
+ }
83
+ return jsonObj;
84
+ }
85
+ ToolChoice.getDeserializedJsonObj = getDeserializedJsonObj;
86
+ })(ToolChoice = exports.ToolChoice || (exports.ToolChoice = {}));
87
+ //# sourceMappingURL=tool-choice.js.map