oci-generativeaiinference 2.86.3 → 2.88.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/index.d.ts +1 -1
  2. package/index.js +1 -1
  3. package/lib/client.d.ts +5 -5
  4. package/lib/client.js +5 -5
  5. package/lib/model/assistant-message.d.ts +35 -0
  6. package/lib/model/assistant-message.js +57 -0
  7. package/lib/model/assistant-message.js.map +1 -0
  8. package/lib/model/base-chat-request.d.ts +2 -2
  9. package/lib/model/base-chat-request.js +1 -1
  10. package/lib/model/base-chat-response.d.ts +2 -2
  11. package/lib/model/base-chat-response.js +1 -1
  12. package/lib/model/chat-choice.d.ts +2 -2
  13. package/lib/model/chat-choice.js +1 -1
  14. package/lib/model/chat-content.d.ts +1 -1
  15. package/lib/model/chat-content.js +1 -1
  16. package/lib/model/chat-details.d.ts +3 -3
  17. package/lib/model/chat-details.js +1 -1
  18. package/lib/model/chat-result.d.ts +2 -2
  19. package/lib/model/chat-result.js +1 -1
  20. package/lib/model/choice.d.ts +2 -2
  21. package/lib/model/choice.js +1 -1
  22. package/lib/model/citation.d.ts +6 -6
  23. package/lib/model/citation.js +1 -1
  24. package/lib/model/cohere-chat-bot-message.d.ts +39 -0
  25. package/lib/model/cohere-chat-bot-message.js +71 -0
  26. package/lib/model/cohere-chat-bot-message.js.map +1 -0
  27. package/lib/model/cohere-chat-request.d.ts +74 -18
  28. package/lib/model/cohere-chat-request.js +31 -1
  29. package/lib/model/cohere-chat-request.js.map +1 -1
  30. package/lib/model/cohere-chat-response.d.ts +39 -10
  31. package/lib/model/cohere-chat-response.js +21 -1
  32. package/lib/model/cohere-chat-response.js.map +1 -1
  33. package/lib/model/cohere-llm-inference-request.d.ts +1 -1
  34. package/lib/model/cohere-llm-inference-request.js +1 -1
  35. package/lib/model/cohere-llm-inference-response.d.ts +1 -1
  36. package/lib/model/cohere-llm-inference-response.js +1 -1
  37. package/lib/model/cohere-message.d.ts +3 -14
  38. package/lib/model/cohere-message.js +52 -6
  39. package/lib/model/cohere-message.js.map +1 -1
  40. package/lib/model/cohere-parameter-definition.d.ts +40 -0
  41. package/lib/model/cohere-parameter-definition.js +36 -0
  42. package/lib/model/cohere-parameter-definition.js.map +1 -0
  43. package/lib/model/cohere-system-message.d.ts +35 -0
  44. package/lib/model/cohere-system-message.js +59 -0
  45. package/lib/model/cohere-system-message.js.map +1 -0
  46. package/lib/model/cohere-tool-call.d.ts +36 -0
  47. package/lib/model/cohere-tool-call.js +36 -0
  48. package/lib/model/cohere-tool-call.js.map +1 -0
  49. package/lib/model/cohere-tool-message.d.ts +35 -0
  50. package/lib/model/cohere-tool-message.js +71 -0
  51. package/lib/model/cohere-tool-message.js.map +1 -0
  52. package/lib/model/cohere-tool-result.d.ts +34 -0
  53. package/lib/model/cohere-tool-result.js +60 -0
  54. package/lib/model/cohere-tool-result.js.map +1 -0
  55. package/lib/model/cohere-tool.d.ts +43 -0
  56. package/lib/model/cohere-tool.js +65 -0
  57. package/lib/model/cohere-tool.js.map +1 -0
  58. package/lib/model/cohere-user-message.d.ts +35 -0
  59. package/lib/model/cohere-user-message.js +59 -0
  60. package/lib/model/cohere-user-message.js.map +1 -0
  61. package/lib/model/dedicated-serving-mode.d.ts +1 -1
  62. package/lib/model/dedicated-serving-mode.js +1 -1
  63. package/lib/model/embed-text-details.d.ts +3 -3
  64. package/lib/model/embed-text-details.js +1 -1
  65. package/lib/model/embed-text-result.d.ts +1 -1
  66. package/lib/model/embed-text-result.js +1 -1
  67. package/lib/model/generate-text-details.d.ts +2 -2
  68. package/lib/model/generate-text-details.js +1 -1
  69. package/lib/model/generate-text-result.d.ts +1 -1
  70. package/lib/model/generate-text-result.js +1 -1
  71. package/lib/model/generated-text.d.ts +1 -1
  72. package/lib/model/generated-text.js +1 -1
  73. package/lib/model/generic-chat-request.d.ts +12 -7
  74. package/lib/model/generic-chat-request.js +1 -1
  75. package/lib/model/generic-chat-request.js.map +1 -1
  76. package/lib/model/generic-chat-response.d.ts +3 -3
  77. package/lib/model/generic-chat-response.js +1 -1
  78. package/lib/model/index.d.ts +23 -1
  79. package/lib/model/index.js +24 -2
  80. package/lib/model/index.js.map +1 -1
  81. package/lib/model/llama-llm-inference-request.d.ts +2 -2
  82. package/lib/model/llama-llm-inference-request.js +1 -1
  83. package/lib/model/llama-llm-inference-response.d.ts +1 -1
  84. package/lib/model/llama-llm-inference-response.js +1 -1
  85. package/lib/model/llm-inference-request.d.ts +1 -1
  86. package/lib/model/llm-inference-request.js +1 -1
  87. package/lib/model/llm-inference-response.d.ts +1 -1
  88. package/lib/model/llm-inference-response.js +1 -1
  89. package/lib/model/logprobs.d.ts +6 -3
  90. package/lib/model/logprobs.js +1 -1
  91. package/lib/model/logprobs.js.map +1 -1
  92. package/lib/model/message.d.ts +4 -7
  93. package/lib/model/message.js +28 -1
  94. package/lib/model/message.js.map +1 -1
  95. package/lib/model/on-demand-serving-mode.d.ts +2 -2
  96. package/lib/model/on-demand-serving-mode.js +1 -1
  97. package/lib/model/search-query.d.ts +1 -1
  98. package/lib/model/search-query.js +1 -1
  99. package/lib/model/serving-mode.d.ts +2 -2
  100. package/lib/model/serving-mode.js +1 -1
  101. package/lib/model/summarize-text-details.d.ts +2 -2
  102. package/lib/model/summarize-text-details.js +1 -1
  103. package/lib/model/summarize-text-result.d.ts +1 -1
  104. package/lib/model/summarize-text-result.js +1 -1
  105. package/lib/model/system-message.d.ts +35 -0
  106. package/lib/model/system-message.js +57 -0
  107. package/lib/model/system-message.js.map +1 -0
  108. package/lib/model/text-content.d.ts +2 -2
  109. package/lib/model/text-content.js +1 -1
  110. package/lib/model/token-likelihood.d.ts +1 -1
  111. package/lib/model/token-likelihood.js +1 -1
  112. package/lib/model/user-message.d.ts +35 -0
  113. package/lib/model/user-message.js +57 -0
  114. package/lib/model/user-message.js.map +1 -0
  115. package/lib/request/chat-request.d.ts +3 -4
  116. package/lib/request/embed-text-request.d.ts +3 -4
  117. package/lib/request/generate-text-request.d.ts +3 -4
  118. package/lib/request/index.d.ts +1 -1
  119. package/lib/request/index.js +1 -1
  120. package/lib/request/summarize-text-request.d.ts +3 -4
  121. package/lib/response/index.d.ts +1 -1
  122. package/lib/response/index.js +1 -1
  123. package/package.json +3 -3
@@ -0,0 +1,65 @@
1
+ "use strict";
2
+ /**
3
+ * Generative AI Service Inference API
4
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
+
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
+
8
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
9
+
10
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
11
+
12
+ * OpenAPI spec version: 20231130
13
+ *
14
+ *
15
+ * NOTE: This class is auto generated by OracleSDKGenerator.
16
+ * Do not edit the class manually.
17
+ *
18
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
19
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
20
+ */
21
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
22
+ if (k2 === undefined) k2 = k;
23
+ Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
24
+ }) : (function(o, m, k, k2) {
25
+ if (k2 === undefined) k2 = k;
26
+ o[k2] = m[k];
27
+ }));
28
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
29
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
30
+ }) : function(o, v) {
31
+ o["default"] = v;
32
+ });
33
+ var __importStar = (this && this.__importStar) || function (mod) {
34
+ if (mod && mod.__esModule) return mod;
35
+ var result = {};
36
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
37
+ __setModuleDefault(result, mod);
38
+ return result;
39
+ };
40
+ Object.defineProperty(exports, "__esModule", { value: true });
41
+ exports.CohereTool = void 0;
42
+ const model = __importStar(require("../model"));
43
+ const common = require("oci-common");
44
+ var CohereTool;
45
+ (function (CohereTool) {
46
+ function getJsonObj(obj) {
47
+ const jsonObj = Object.assign(Object.assign({}, obj), {
48
+ "parameterDefinitions": obj.parameterDefinitions
49
+ ? common.mapContainer(obj.parameterDefinitions, model.CohereParameterDefinition.getJsonObj)
50
+ : undefined
51
+ });
52
+ return jsonObj;
53
+ }
54
+ CohereTool.getJsonObj = getJsonObj;
55
+ function getDeserializedJsonObj(obj) {
56
+ const jsonObj = Object.assign(Object.assign({}, obj), {
57
+ "parameterDefinitions": obj.parameterDefinitions
58
+ ? common.mapContainer(obj.parameterDefinitions, model.CohereParameterDefinition.getDeserializedJsonObj)
59
+ : undefined
60
+ });
61
+ return jsonObj;
62
+ }
63
+ CohereTool.getDeserializedJsonObj = getDeserializedJsonObj;
64
+ })(CohereTool = exports.CohereTool || (exports.CohereTool = {}));
65
+ //# sourceMappingURL=cohere-tool.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"cohere-tool.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-tool.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAClC,qCAAsC;AAoBtC,IAAiB,UAAU,CA+B1B;AA/BD,WAAiB,UAAU;IACzB,SAAgB,UAAU,CAAC,GAAe;QACxC,MAAM,OAAO,mCACR,GAAG,GACH;YACD,sBAAsB,EAAE,GAAG,CAAC,oBAAoB;gBAC9C,CAAC,CAAC,MAAM,CAAC,YAAY,CACjB,GAAG,CAAC,oBAAoB,EACxB,KAAK,CAAC,yBAAyB,CAAC,UAAU,CAC3C;gBACH,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAde,qBAAU,aAczB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAe;QACpD,MAAM,OAAO,mCACR,GAAG,GACH;YACD,sBAAsB,EAAE,GAAG,CAAC,oBAAoB;gBAC9C,CAAC,CAAC,MAAM,CAAC,YAAY,CACjB,GAAG,CAAC,oBAAoB,EACxB,KAAK,CAAC,yBAAyB,CAAC,sBAAsB,CACvD;gBACH,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAde,iCAAsB,yBAcrC,CAAA;AACH,CAAC,EA/BgB,UAAU,GAAV,kBAAU,KAAV,kBAAU,QA+B1B"}
@@ -0,0 +1,35 @@
1
+ /**
2
+ * Generative AI Service Inference API
3
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
+
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+
7
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
8
+
9
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
10
+
11
+ * OpenAPI spec version: 20231130
12
+ *
13
+ *
14
+ * NOTE: This class is auto generated by OracleSDKGenerator.
15
+ * Do not edit the class manually.
16
+ *
17
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
18
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
19
+ */
20
+ import * as model from "../model";
21
+ /**
22
+ * A message that represents a single chat dialog as USER role.
23
+ */
24
+ export interface CohereUserMessage extends model.CohereMessage {
25
+ /**
26
+ * Contents of the chat message.
27
+ */
28
+ "message": string;
29
+ "role": string;
30
+ }
31
+ export declare namespace CohereUserMessage {
32
+ function getJsonObj(obj: CohereUserMessage, isParentJsonObj?: boolean): object;
33
+ const role = "USER";
34
+ function getDeserializedJsonObj(obj: CohereUserMessage, isParentJsonObj?: boolean): object;
35
+ }
@@ -0,0 +1,59 @@
1
+ "use strict";
2
+ /**
3
+ * Generative AI Service Inference API
4
+ * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
+
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
+
8
+ To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
9
+
10
+ To learn more about the service, see the [Generative AI documentation](/iaas/Content/generative-ai/home.htm).
11
+
12
+ * OpenAPI spec version: 20231130
13
+ *
14
+ *
15
+ * NOTE: This class is auto generated by OracleSDKGenerator.
16
+ * Do not edit the class manually.
17
+ *
18
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
19
+ * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
20
+ */
21
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
22
+ if (k2 === undefined) k2 = k;
23
+ Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
24
+ }) : (function(o, m, k, k2) {
25
+ if (k2 === undefined) k2 = k;
26
+ o[k2] = m[k];
27
+ }));
28
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
29
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
30
+ }) : function(o, v) {
31
+ o["default"] = v;
32
+ });
33
+ var __importStar = (this && this.__importStar) || function (mod) {
34
+ if (mod && mod.__esModule) return mod;
35
+ var result = {};
36
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
37
+ __setModuleDefault(result, mod);
38
+ return result;
39
+ };
40
+ Object.defineProperty(exports, "__esModule", { value: true });
41
+ exports.CohereUserMessage = void 0;
42
+ const model = __importStar(require("../model"));
43
+ var CohereUserMessage;
44
+ (function (CohereUserMessage) {
45
+ function getJsonObj(obj, isParentJsonObj) {
46
+ const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj ? obj : model.CohereMessage.getJsonObj(obj))), {});
47
+ return jsonObj;
48
+ }
49
+ CohereUserMessage.getJsonObj = getJsonObj;
50
+ CohereUserMessage.role = "USER";
51
+ function getDeserializedJsonObj(obj, isParentJsonObj) {
52
+ const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj
53
+ ? obj
54
+ : model.CohereMessage.getDeserializedJsonObj(obj))), {});
55
+ return jsonObj;
56
+ }
57
+ CohereUserMessage.getDeserializedJsonObj = getDeserializedJsonObj;
58
+ })(CohereUserMessage = exports.CohereUserMessage || (exports.CohereUserMessage = {}));
59
+ //# sourceMappingURL=cohere-user-message.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"cohere-user-message.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-user-message.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAelC,IAAiB,iBAAiB,CAuBjC;AAvBD,WAAiB,iBAAiB;IAChC,SAAgB,UAAU,CAAC,GAAsB,EAAE,eAAyB;QAC1E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,aAAa,CAAC,UAAU,CAAC,GAAG,CAAuB,CAAC,GACpF,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAPe,4BAAU,aAOzB,CAAA;IACY,sBAAI,GAAG,MAAM,CAAC;IAC3B,SAAgB,sBAAsB,CACpC,GAAsB,EACtB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,aAAa,CAAC,sBAAsB,CAAC,GAAG,CAAuB,CAAC,GACxE,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAZe,wCAAsB,yBAYrC,CAAA;AACH,CAAC,EAvBgB,iBAAiB,GAAjB,yBAAiB,KAAjB,yBAAiB,QAuBjC"}
@@ -2,7 +2,7 @@
2
2
  * Generative AI Service Inference API
3
3
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
4
 
5
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
6
 
7
7
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
8
8
 
@@ -3,7 +3,7 @@
3
3
  * Generative AI Service Inference API
4
4
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
5
 
6
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
7
 
8
8
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
9
9
 
@@ -2,7 +2,7 @@
2
2
  * Generative AI Service Inference API
3
3
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
4
 
5
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
6
 
7
7
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
8
8
 
@@ -23,12 +23,12 @@ import * as model from "../model";
23
23
  */
24
24
  export interface EmbedTextDetails {
25
25
  /**
26
- * Provide a list of strings with a maximum number of 96 entries. Each string can be words, a phrase, or a paragraph. The maximum length of each string entry in the list is 512 tokens.
26
+ * Provide a list of strings. Each string can be words, a phrase, or a paragraph. The maximum length of each string entry in the list is 512 tokens.
27
27
  */
28
28
  "inputs": Array<string>;
29
29
  "servingMode": model.DedicatedServingMode | model.OnDemandServingMode;
30
30
  /**
31
- * The OCID of compartment that the user is authorized to use to call into the Generative AI service.
31
+ * The OCID of compartment in which to call the Generative AI service to create text embeddings.
32
32
  */
33
33
  "compartmentId": string;
34
34
  /**
@@ -3,7 +3,7 @@
3
3
  * Generative AI Service Inference API
4
4
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
5
 
6
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
7
 
8
8
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
9
9
 
@@ -2,7 +2,7 @@
2
2
  * Generative AI Service Inference API
3
3
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
4
 
5
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
6
 
7
7
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
8
8
 
@@ -3,7 +3,7 @@
3
3
  * Generative AI Service Inference API
4
4
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
5
 
6
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
7
 
8
8
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
9
9
 
@@ -2,7 +2,7 @@
2
2
  * Generative AI Service Inference API
3
3
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
4
 
5
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
6
 
7
7
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
8
8
 
@@ -23,7 +23,7 @@ import * as model from "../model";
23
23
  */
24
24
  export interface GenerateTextDetails {
25
25
  /**
26
- * The OCID of compartment that the user is authorized to use to call into the Generative AI service.
26
+ * The OCID of compartment in which to call the Generative AI service to generate text.
27
27
  */
28
28
  "compartmentId": string;
29
29
  "servingMode": model.DedicatedServingMode | model.OnDemandServingMode;
@@ -3,7 +3,7 @@
3
3
  * Generative AI Service Inference API
4
4
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
5
 
6
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
7
 
8
8
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
9
9
 
@@ -2,7 +2,7 @@
2
2
  * Generative AI Service Inference API
3
3
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
4
 
5
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
6
 
7
7
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
8
8
 
@@ -3,7 +3,7 @@
3
3
  * Generative AI Service Inference API
4
4
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
5
 
6
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
7
 
8
8
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
9
9
 
@@ -2,7 +2,7 @@
2
2
  * Generative AI Service Inference API
3
3
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
4
 
5
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
6
 
7
7
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
8
8
 
@@ -3,7 +3,7 @@
3
3
  * Generative AI Service Inference API
4
4
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
5
 
6
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
7
 
8
8
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
9
9
 
@@ -2,7 +2,7 @@
2
2
  * Generative AI Service Inference API
3
3
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
4
 
5
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
6
 
7
7
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
8
8
 
@@ -23,11 +23,11 @@ import * as model from "../model";
23
23
  */
24
24
  export interface GenericChatRequest extends model.BaseChatRequest {
25
25
  /**
26
- * The series of messages associated with this chat completion request. It should include previous messages in the conversation. Each message has a role and content.
26
+ * The series of messages in a chat request. Includes the previous messages in a conversation. Each message includes a role ({@code USER} or the {@code CHATBOT}) and content.
27
27
  */
28
28
  "messages"?: Array<model.Message>;
29
29
  /**
30
- * Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available.
30
+ * Whether to stream back partial progress. If set to true, as tokens become available, they are sent as data-only server-sent events.
31
31
  */
32
32
  "isStream"?: boolean;
33
33
  /**
@@ -35,7 +35,7 @@ export interface GenericChatRequest extends model.BaseChatRequest {
35
35
  */
36
36
  "numGenerations"?: number;
37
37
  /**
38
- * Whether or not to return the user prompt in the response. Applies only to non-stream results.
38
+ * Whether to include the user prompt in the response. Applies only to non-stream results.
39
39
  */
40
40
  "isEcho"?: boolean;
41
41
  /**
@@ -82,12 +82,17 @@ export interface GenericChatRequest extends model.BaseChatRequest {
82
82
  */
83
83
  "logProbs"?: number;
84
84
  /**
85
- * The maximum number of tokens that can be generated per output sequence. The token count of your prompt plus max_tokens cannot exceed the model's context length. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
85
+ * The maximum number of tokens that can be generated per output sequence. The token count of your prompt plus {@code maxTokens} must not exceed the model's context length.
86
+ * Not setting a value for maxTokens results in the possible use of model's full context length.
87
+ * Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
86
88
  */
87
89
  "maxTokens"?: number;
88
90
  /**
89
- * Modify the likelihood of specified tokens appearing in the completion.
90
- */
91
+ * Modifies the likelihood of specified tokens that appear in the completion.
92
+ * <p>
93
+ Example: '{\"6395\": 2, \"8134\": 1, \"21943\": 0.5, \"5923\": -100}'
94
+ *
95
+ */
91
96
  "logitBias"?: any;
92
97
  "apiFormat": string;
93
98
  }
@@ -3,7 +3,7 @@
3
3
  * Generative AI Service Inference API
4
4
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
5
 
6
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
7
 
8
8
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
9
9
 
@@ -1 +1 @@
1
- {"version":3,"file":"generic-chat-request.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/generic-chat-request.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AA8ElC,IAAiB,kBAAkB,CAmClC;AAnCD,WAAiB,kBAAkB;IACjC,SAAgB,UAAU,CAAC,GAAuB,EAAE,eAAyB;QAC3E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,eAAe,CAAC,UAAU,CAAC,GAAG,CAAwB,CAAC,GACvF;YACD,UAAU,EAAE,GAAG,CAAC,QAAQ;gBACtB,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACtB,OAAO,KAAK,CAAC,OAAO,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBACxC,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAbe,6BAAU,aAazB,CAAA;IACY,4BAAS,GAAG,SAAS,CAAC;IACnC,SAAgB,sBAAsB,CACpC,GAAuB,EACvB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,eAAe,CAAC,sBAAsB,CAAC,GAAG,CAAwB,CAAC,GAC3E;YACD,UAAU,EAAE,GAAG,CAAC,QAAQ;gBACtB,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACtB,OAAO,KAAK,CAAC,OAAO,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBACpD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAlBe,yCAAsB,yBAkBrC,CAAA;AACH,CAAC,EAnCgB,kBAAkB,GAAlB,0BAAkB,KAAlB,0BAAkB,QAmClC"}
1
+ {"version":3,"file":"generic-chat-request.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/generic-chat-request.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAmFlC,IAAiB,kBAAkB,CAmClC;AAnCD,WAAiB,kBAAkB;IACjC,SAAgB,UAAU,CAAC,GAAuB,EAAE,eAAyB;QAC3E,MAAM,OAAO,mCACR,CAAC,eAAe,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAE,KAAK,CAAC,eAAe,CAAC,UAAU,CAAC,GAAG,CAAwB,CAAC,GACvF;YACD,UAAU,EAAE,GAAG,CAAC,QAAQ;gBACtB,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACtB,OAAO,KAAK,CAAC,OAAO,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBACxC,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAbe,6BAAU,aAazB,CAAA;IACY,4BAAS,GAAG,SAAS,CAAC;IACnC,SAAgB,sBAAsB,CACpC,GAAuB,EACvB,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,eAAe,CAAC,sBAAsB,CAAC,GAAG,CAAwB,CAAC,GAC3E;YACD,UAAU,EAAE,GAAG,CAAC,QAAQ;gBACtB,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACtB,OAAO,KAAK,CAAC,OAAO,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBACpD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAlBe,yCAAsB,yBAkBrC,CAAA;AACH,CAAC,EAnCgB,kBAAkB,GAAlB,0BAAkB,KAAlB,0BAAkB,QAmClC"}
@@ -2,7 +2,7 @@
2
2
  * Generative AI Service Inference API
3
3
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
4
 
5
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
6
 
7
7
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
8
8
 
@@ -19,11 +19,11 @@ To learn more about the service, see the [Generative AI documentation](/iaas/Con
19
19
  */
20
20
  import * as model from "../model";
21
21
  /**
22
- * The response to the chat conversation.
22
+ * The response for a chat conversation.
23
23
  */
24
24
  export interface GenericChatResponse extends model.BaseChatResponse {
25
25
  /**
26
- * The Unix timestamp (in seconds) of when the generation was created.
26
+ * The Unix timestamp (in seconds) of when the response text was generated.
27
27
  */
28
28
  "timeCreated": Date;
29
29
  /**
@@ -3,7 +3,7 @@
3
3
  * Generative AI Service Inference API
4
4
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
5
 
6
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
7
 
8
8
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
9
9
 
@@ -2,7 +2,7 @@
2
2
  * Generative AI Service Inference API
3
3
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
4
4
 
5
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
5
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
6
 
7
7
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
8
8
 
@@ -35,6 +35,14 @@ import * as Citation from "./citation";
35
35
  export import Citation = Citation.Citation;
36
36
  import * as CohereMessage from "./cohere-message";
37
37
  export import CohereMessage = CohereMessage.CohereMessage;
38
+ import * as CohereParameterDefinition from "./cohere-parameter-definition";
39
+ export import CohereParameterDefinition = CohereParameterDefinition.CohereParameterDefinition;
40
+ import * as CohereTool from "./cohere-tool";
41
+ export import CohereTool = CohereTool.CohereTool;
42
+ import * as CohereToolCall from "./cohere-tool-call";
43
+ export import CohereToolCall = CohereToolCall.CohereToolCall;
44
+ import * as CohereToolResult from "./cohere-tool-result";
45
+ export import CohereToolResult = CohereToolResult.CohereToolResult;
38
46
  import * as EmbedTextDetails from "./embed-text-details";
39
47
  export import EmbedTextDetails = EmbedTextDetails.EmbedTextDetails;
40
48
  import * as EmbedTextResult from "./embed-text-result";
@@ -63,6 +71,10 @@ import * as SummarizeTextResult from "./summarize-text-result";
63
71
  export import SummarizeTextResult = SummarizeTextResult.SummarizeTextResult;
64
72
  import * as TokenLikelihood from "./token-likelihood";
65
73
  export import TokenLikelihood = TokenLikelihood.TokenLikelihood;
74
+ import * as AssistantMessage from "./assistant-message";
75
+ export import AssistantMessage = AssistantMessage.AssistantMessage;
76
+ import * as CohereChatBotMessage from "./cohere-chat-bot-message";
77
+ export import CohereChatBotMessage = CohereChatBotMessage.CohereChatBotMessage;
66
78
  import * as CohereChatRequest from "./cohere-chat-request";
67
79
  export import CohereChatRequest = CohereChatRequest.CohereChatRequest;
68
80
  import * as CohereChatResponse from "./cohere-chat-response";
@@ -71,6 +83,12 @@ import * as CohereLlmInferenceRequest from "./cohere-llm-inference-request";
71
83
  export import CohereLlmInferenceRequest = CohereLlmInferenceRequest.CohereLlmInferenceRequest;
72
84
  import * as CohereLlmInferenceResponse from "./cohere-llm-inference-response";
73
85
  export import CohereLlmInferenceResponse = CohereLlmInferenceResponse.CohereLlmInferenceResponse;
86
+ import * as CohereSystemMessage from "./cohere-system-message";
87
+ export import CohereSystemMessage = CohereSystemMessage.CohereSystemMessage;
88
+ import * as CohereToolMessage from "./cohere-tool-message";
89
+ export import CohereToolMessage = CohereToolMessage.CohereToolMessage;
90
+ import * as CohereUserMessage from "./cohere-user-message";
91
+ export import CohereUserMessage = CohereUserMessage.CohereUserMessage;
74
92
  import * as DedicatedServingMode from "./dedicated-serving-mode";
75
93
  export import DedicatedServingMode = DedicatedServingMode.DedicatedServingMode;
76
94
  import * as GenericChatRequest from "./generic-chat-request";
@@ -83,5 +101,9 @@ import * as LlamaLlmInferenceResponse from "./llama-llm-inference-response";
83
101
  export import LlamaLlmInferenceResponse = LlamaLlmInferenceResponse.LlamaLlmInferenceResponse;
84
102
  import * as OnDemandServingMode from "./on-demand-serving-mode";
85
103
  export import OnDemandServingMode = OnDemandServingMode.OnDemandServingMode;
104
+ import * as SystemMessage from "./system-message";
105
+ export import SystemMessage = SystemMessage.SystemMessage;
86
106
  import * as TextContent from "./text-content";
87
107
  export import TextContent = TextContent.TextContent;
108
+ import * as UserMessage from "./user-message";
109
+ export import UserMessage = UserMessage.UserMessage;
@@ -3,7 +3,7 @@
3
3
  * Generative AI Service Inference API
4
4
  * OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
5
5
 
6
- Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
6
+ Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to [chat](#/en/generative-ai-inference/latest/ChatResult/Chat), [generate text](#/en/generative-ai-inference/latest/GenerateTextResult/GenerateText), [summarize](#/en/generative-ai-inference/latest/SummarizeTextResult/SummarizeText), and [create text embeddings](#/en/generative-ai-inference/latest/EmbedTextResult/EmbedText).
7
7
 
8
8
  To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the [Generative AI service management API](/#/en/generative-ai/latest/) to [create a custom model](#/en/generative-ai/latest/Model/) by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a [fine-tuning dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/). Then, create a [hosting dedicated AI cluster](#/en/generative-ai/latest/DedicatedAiCluster/) with an [endpoint](#/en/generative-ai/latest/Endpoint/) to host your custom model. For resource management in the Generative AI service, use the [Generative AI service management API](/#/en/generative-ai/latest/).
9
9
 
@@ -38,7 +38,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
38
38
  return result;
39
39
  };
40
40
  Object.defineProperty(exports, "__esModule", { value: true });
41
- exports.TextContent = exports.OnDemandServingMode = exports.LlamaLlmInferenceResponse = exports.LlamaLlmInferenceRequest = exports.GenericChatResponse = exports.GenericChatRequest = exports.DedicatedServingMode = exports.CohereLlmInferenceResponse = exports.CohereLlmInferenceRequest = exports.CohereChatResponse = exports.CohereChatRequest = exports.TokenLikelihood = exports.SummarizeTextResult = exports.SummarizeTextDetails = exports.ServingMode = exports.SearchQuery = exports.Message = exports.Logprobs = exports.LlmInferenceResponse = exports.LlmInferenceRequest = exports.GeneratedText = exports.GenerateTextResult = exports.GenerateTextDetails = exports.EmbedTextResult = exports.EmbedTextDetails = exports.CohereMessage = exports.Citation = exports.Choice = exports.ChatResult = exports.ChatDetails = exports.ChatContent = exports.ChatChoice = exports.BaseChatResponse = exports.BaseChatRequest = void 0;
41
+ exports.UserMessage = exports.TextContent = exports.SystemMessage = exports.OnDemandServingMode = exports.LlamaLlmInferenceResponse = exports.LlamaLlmInferenceRequest = exports.GenericChatResponse = exports.GenericChatRequest = exports.DedicatedServingMode = exports.CohereUserMessage = exports.CohereToolMessage = exports.CohereSystemMessage = exports.CohereLlmInferenceResponse = exports.CohereLlmInferenceRequest = exports.CohereChatResponse = exports.CohereChatRequest = exports.CohereChatBotMessage = exports.AssistantMessage = exports.TokenLikelihood = exports.SummarizeTextResult = exports.SummarizeTextDetails = exports.ServingMode = exports.SearchQuery = exports.Message = exports.Logprobs = exports.LlmInferenceResponse = exports.LlmInferenceRequest = exports.GeneratedText = exports.GenerateTextResult = exports.GenerateTextDetails = exports.EmbedTextResult = exports.EmbedTextDetails = exports.CohereToolResult = exports.CohereToolCall = exports.CohereTool = exports.CohereParameterDefinition = exports.CohereMessage = exports.Citation = exports.Choice = exports.ChatResult = exports.ChatDetails = exports.ChatContent = exports.ChatChoice = exports.BaseChatResponse = exports.BaseChatRequest = void 0;
42
42
  const BaseChatRequest = __importStar(require("./base-chat-request"));
43
43
  exports.BaseChatRequest = BaseChatRequest.BaseChatRequest;
44
44
  const BaseChatResponse = __importStar(require("./base-chat-response"));
@@ -57,6 +57,14 @@ const Citation = __importStar(require("./citation"));
57
57
  exports.Citation = Citation.Citation;
58
58
  const CohereMessage = __importStar(require("./cohere-message"));
59
59
  exports.CohereMessage = CohereMessage.CohereMessage;
60
+ const CohereParameterDefinition = __importStar(require("./cohere-parameter-definition"));
61
+ exports.CohereParameterDefinition = CohereParameterDefinition.CohereParameterDefinition;
62
+ const CohereTool = __importStar(require("./cohere-tool"));
63
+ exports.CohereTool = CohereTool.CohereTool;
64
+ const CohereToolCall = __importStar(require("./cohere-tool-call"));
65
+ exports.CohereToolCall = CohereToolCall.CohereToolCall;
66
+ const CohereToolResult = __importStar(require("./cohere-tool-result"));
67
+ exports.CohereToolResult = CohereToolResult.CohereToolResult;
60
68
  const EmbedTextDetails = __importStar(require("./embed-text-details"));
61
69
  exports.EmbedTextDetails = EmbedTextDetails.EmbedTextDetails;
62
70
  const EmbedTextResult = __importStar(require("./embed-text-result"));
@@ -85,6 +93,10 @@ const SummarizeTextResult = __importStar(require("./summarize-text-result"));
85
93
  exports.SummarizeTextResult = SummarizeTextResult.SummarizeTextResult;
86
94
  const TokenLikelihood = __importStar(require("./token-likelihood"));
87
95
  exports.TokenLikelihood = TokenLikelihood.TokenLikelihood;
96
+ const AssistantMessage = __importStar(require("./assistant-message"));
97
+ exports.AssistantMessage = AssistantMessage.AssistantMessage;
98
+ const CohereChatBotMessage = __importStar(require("./cohere-chat-bot-message"));
99
+ exports.CohereChatBotMessage = CohereChatBotMessage.CohereChatBotMessage;
88
100
  const CohereChatRequest = __importStar(require("./cohere-chat-request"));
89
101
  exports.CohereChatRequest = CohereChatRequest.CohereChatRequest;
90
102
  const CohereChatResponse = __importStar(require("./cohere-chat-response"));
@@ -93,6 +105,12 @@ const CohereLlmInferenceRequest = __importStar(require("./cohere-llm-inference-r
93
105
  exports.CohereLlmInferenceRequest = CohereLlmInferenceRequest.CohereLlmInferenceRequest;
94
106
  const CohereLlmInferenceResponse = __importStar(require("./cohere-llm-inference-response"));
95
107
  exports.CohereLlmInferenceResponse = CohereLlmInferenceResponse.CohereLlmInferenceResponse;
108
+ const CohereSystemMessage = __importStar(require("./cohere-system-message"));
109
+ exports.CohereSystemMessage = CohereSystemMessage.CohereSystemMessage;
110
+ const CohereToolMessage = __importStar(require("./cohere-tool-message"));
111
+ exports.CohereToolMessage = CohereToolMessage.CohereToolMessage;
112
+ const CohereUserMessage = __importStar(require("./cohere-user-message"));
113
+ exports.CohereUserMessage = CohereUserMessage.CohereUserMessage;
96
114
  const DedicatedServingMode = __importStar(require("./dedicated-serving-mode"));
97
115
  exports.DedicatedServingMode = DedicatedServingMode.DedicatedServingMode;
98
116
  const GenericChatRequest = __importStar(require("./generic-chat-request"));
@@ -105,6 +123,10 @@ const LlamaLlmInferenceResponse = __importStar(require("./llama-llm-inference-re
105
123
  exports.LlamaLlmInferenceResponse = LlamaLlmInferenceResponse.LlamaLlmInferenceResponse;
106
124
  const OnDemandServingMode = __importStar(require("./on-demand-serving-mode"));
107
125
  exports.OnDemandServingMode = OnDemandServingMode.OnDemandServingMode;
126
+ const SystemMessage = __importStar(require("./system-message"));
127
+ exports.SystemMessage = SystemMessage.SystemMessage;
108
128
  const TextContent = __importStar(require("./text-content"));
109
129
  exports.TextContent = TextContent.TextContent;
130
+ const UserMessage = __importStar(require("./user-message"));
131
+ exports.UserMessage = UserMessage.UserMessage;
110
132
  //# sourceMappingURL=index.js.map