oci-generativeaiinference 2.122.2 → 2.123.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +1 -1
- package/index.js +1 -1
- package/lib/client.d.ts +1 -1
- package/lib/client.js +1 -1
- package/lib/model/annotation.d.ts +1 -1
- package/lib/model/annotation.js +1 -1
- package/lib/model/apply-guardrails-details.d.ts +1 -1
- package/lib/model/apply-guardrails-details.js +1 -1
- package/lib/model/apply-guardrails-result.d.ts +1 -1
- package/lib/model/apply-guardrails-result.js +1 -1
- package/lib/model/approximate-location.d.ts +1 -1
- package/lib/model/approximate-location.js +1 -1
- package/lib/model/assistant-message.d.ts +5 -1
- package/lib/model/assistant-message.js +1 -1
- package/lib/model/assistant-message.js.map +1 -1
- package/lib/model/audio-content.d.ts +1 -1
- package/lib/model/audio-content.js +1 -1
- package/lib/model/audio-url.d.ts +1 -1
- package/lib/model/audio-url.js +1 -1
- package/lib/model/base-chat-request.d.ts +1 -1
- package/lib/model/base-chat-request.js +5 -1
- package/lib/model/base-chat-request.js.map +1 -1
- package/lib/model/base-chat-response.d.ts +1 -1
- package/lib/model/base-chat-response.js +5 -1
- package/lib/model/base-chat-response.js.map +1 -1
- package/lib/model/category-score.d.ts +1 -1
- package/lib/model/category-score.js +1 -1
- package/lib/model/chat-choice.d.ts +1 -1
- package/lib/model/chat-choice.js +1 -1
- package/lib/model/chat-content.d.ts +1 -1
- package/lib/model/chat-content.js +5 -1
- package/lib/model/chat-content.js.map +1 -1
- package/lib/model/chat-details.d.ts +2 -2
- package/lib/model/chat-details.js +1 -1
- package/lib/model/chat-result.d.ts +2 -2
- package/lib/model/chat-result.js +1 -1
- package/lib/model/choice.d.ts +1 -1
- package/lib/model/choice.js +1 -1
- package/lib/model/citation-options-v2.d.ts +38 -0
- package/lib/model/citation-options-v2.js +42 -0
- package/lib/model/citation-options-v2.js.map +1 -0
- package/lib/model/citation.d.ts +1 -1
- package/lib/model/citation.js +1 -1
- package/lib/model/cohere-assistant-message-v2.d.ts +43 -0
- package/lib/model/cohere-assistant-message-v2.js +83 -0
- package/lib/model/cohere-assistant-message-v2.js.map +1 -0
- package/lib/model/cohere-chat-bot-message.d.ts +1 -1
- package/lib/model/cohere-chat-bot-message.js +1 -1
- package/lib/model/cohere-chat-request-v2.d.ts +142 -0
- package/lib/model/cohere-chat-request-v2.js +114 -0
- package/lib/model/cohere-chat-request-v2.js.map +1 -0
- package/lib/model/cohere-chat-request.d.ts +1 -1
- package/lib/model/cohere-chat-request.js +1 -1
- package/lib/model/cohere-chat-response-v2.d.ts +61 -0
- package/lib/model/cohere-chat-response-v2.js +90 -0
- package/lib/model/cohere-chat-response-v2.js.map +1 -0
- package/lib/model/cohere-chat-response.d.ts +1 -1
- package/lib/model/cohere-chat-response.js +1 -1
- package/lib/model/cohere-citation-document.d.ts +36 -0
- package/lib/model/cohere-citation-document.js +36 -0
- package/lib/model/cohere-citation-document.js.map +1 -0
- package/lib/model/cohere-citation-source-v2.d.ts +44 -0
- package/lib/model/cohere-citation-source-v2.js +74 -0
- package/lib/model/cohere-citation-source-v2.js.map +1 -0
- package/lib/model/cohere-citation-tool-v2.d.ts +36 -0
- package/lib/model/cohere-citation-tool-v2.js +36 -0
- package/lib/model/cohere-citation-tool-v2.js.map +1 -0
- package/lib/model/cohere-citation-v2.d.ts +63 -0
- package/lib/model/cohere-citation-v2.js +79 -0
- package/lib/model/cohere-citation-v2.js.map +1 -0
- package/lib/model/cohere-content-v2.d.ts +29 -0
- package/lib/model/cohere-content-v2.js +87 -0
- package/lib/model/cohere-content-v2.js.map +1 -0
- package/lib/model/cohere-document-content-v2.d.ts +35 -0
- package/lib/model/cohere-document-content-v2.js +61 -0
- package/lib/model/cohere-document-content-v2.js.map +1 -0
- package/lib/model/cohere-image-content-v2.d.ts +32 -0
- package/lib/model/cohere-image-content-v2.js +65 -0
- package/lib/model/cohere-image-content-v2.js.map +1 -0
- package/lib/model/cohere-image-url-v2.d.ts +46 -0
- package/lib/model/cohere-image-url-v2.js +47 -0
- package/lib/model/cohere-image-url-v2.js.map +1 -0
- package/lib/model/cohere-llm-inference-request.d.ts +1 -1
- package/lib/model/cohere-llm-inference-request.js +1 -1
- package/lib/model/cohere-llm-inference-response.d.ts +1 -1
- package/lib/model/cohere-llm-inference-response.js +1 -1
- package/lib/model/cohere-message-v2.d.ts +34 -0
- package/lib/model/cohere-message-v2.js +99 -0
- package/lib/model/cohere-message-v2.js.map +1 -0
- package/lib/model/cohere-message.d.ts +1 -1
- package/lib/model/cohere-message.js +1 -1
- package/lib/model/cohere-parameter-definition.d.ts +1 -1
- package/lib/model/cohere-parameter-definition.js +1 -1
- package/lib/model/cohere-response-format.d.ts +1 -1
- package/lib/model/cohere-response-format.js +1 -1
- package/lib/model/cohere-response-json-format.d.ts +1 -1
- package/lib/model/cohere-response-json-format.js +1 -1
- package/lib/model/cohere-response-text-format.d.ts +1 -1
- package/lib/model/cohere-response-text-format.js +1 -1
- package/lib/model/cohere-system-message-v2.d.ts +31 -0
- package/lib/model/cohere-system-message-v2.js +59 -0
- package/lib/model/cohere-system-message-v2.js.map +1 -0
- package/lib/model/cohere-system-message.d.ts +1 -1
- package/lib/model/cohere-system-message.js +1 -1
- package/lib/model/cohere-text-content-v2.d.ts +35 -0
- package/lib/model/cohere-text-content-v2.js +59 -0
- package/lib/model/cohere-text-content-v2.js.map +1 -0
- package/lib/model/cohere-thinking-content-v2.d.ts +35 -0
- package/lib/model/cohere-thinking-content-v2.js +61 -0
- package/lib/model/cohere-thinking-content-v2.js.map +1 -0
- package/lib/model/cohere-thinking-v2.d.ts +40 -0
- package/lib/model/cohere-thinking-v2.js +41 -0
- package/lib/model/cohere-thinking-v2.js.map +1 -0
- package/lib/model/cohere-tool-call-v2.d.ts +48 -0
- package/lib/model/cohere-tool-call-v2.js +45 -0
- package/lib/model/cohere-tool-call-v2.js.map +1 -0
- package/lib/model/cohere-tool-call.d.ts +1 -1
- package/lib/model/cohere-tool-call.js +1 -1
- package/lib/model/cohere-tool-message-v2.d.ts +35 -0
- package/lib/model/cohere-tool-message-v2.js +59 -0
- package/lib/model/cohere-tool-message-v2.js.map +1 -0
- package/lib/model/cohere-tool-message.d.ts +1 -1
- package/lib/model/cohere-tool-message.js +1 -1
- package/lib/model/cohere-tool-result.d.ts +1 -1
- package/lib/model/cohere-tool-result.js +1 -1
- package/lib/model/cohere-tool-v2.d.ts +37 -0
- package/lib/model/cohere-tool-v2.js +64 -0
- package/lib/model/cohere-tool-v2.js.map +1 -0
- package/lib/model/cohere-tool.d.ts +1 -1
- package/lib/model/cohere-tool.js +1 -1
- package/lib/model/cohere-user-message-v2.d.ts +31 -0
- package/lib/model/cohere-user-message-v2.js +59 -0
- package/lib/model/cohere-user-message-v2.js.map +1 -0
- package/lib/model/cohere-user-message.d.ts +1 -1
- package/lib/model/cohere-user-message.js +1 -1
- package/lib/model/completion-tokens-details.d.ts +1 -1
- package/lib/model/completion-tokens-details.js +1 -1
- package/lib/model/content-moderation-configuration.d.ts +1 -1
- package/lib/model/content-moderation-configuration.js +1 -1
- package/lib/model/content-moderation-result.d.ts +1 -1
- package/lib/model/content-moderation-result.js +1 -1
- package/lib/model/dedicated-serving-mode.d.ts +1 -1
- package/lib/model/dedicated-serving-mode.js +1 -1
- package/lib/model/developer-message.d.ts +1 -1
- package/lib/model/developer-message.js +1 -1
- package/lib/model/document-content.d.ts +32 -0
- package/lib/model/document-content.js +65 -0
- package/lib/model/document-content.js.map +1 -0
- package/lib/model/document-rank.d.ts +1 -1
- package/lib/model/document-rank.js +1 -1
- package/lib/model/document-url.d.ts +63 -0
- package/lib/model/document-url.js +47 -0
- package/lib/model/document-url.js.map +1 -0
- package/lib/model/document.d.ts +1 -1
- package/lib/model/document.js +1 -1
- package/lib/model/embed-text-details.d.ts +17 -1
- package/lib/model/embed-text-details.js +10 -1
- package/lib/model/embed-text-details.js.map +1 -1
- package/lib/model/embed-text-result.d.ts +6 -2
- package/lib/model/embed-text-result.js +1 -1
- package/lib/model/embed-text-result.js.map +1 -1
- package/lib/model/function-call.d.ts +1 -1
- package/lib/model/function-call.js +1 -1
- package/lib/model/function-definition.d.ts +1 -1
- package/lib/model/function-definition.js +1 -1
- package/lib/model/function.d.ts +40 -0
- package/lib/model/function.js +36 -0
- package/lib/model/function.js.map +1 -0
- package/lib/model/generate-text-details.d.ts +1 -1
- package/lib/model/generate-text-details.js +1 -1
- package/lib/model/generate-text-result.d.ts +1 -1
- package/lib/model/generate-text-result.js +1 -1
- package/lib/model/generated-text.d.ts +1 -1
- package/lib/model/generated-text.js +1 -1
- package/lib/model/generic-chat-request.d.ts +2 -1
- package/lib/model/generic-chat-request.js +2 -1
- package/lib/model/generic-chat-request.js.map +1 -1
- package/lib/model/generic-chat-response.d.ts +1 -1
- package/lib/model/generic-chat-response.js +1 -1
- package/lib/model/grounding-chunk.d.ts +1 -1
- package/lib/model/grounding-chunk.js +1 -1
- package/lib/model/grounding-metadata.d.ts +1 -1
- package/lib/model/grounding-metadata.js +1 -1
- package/lib/model/grounding-support-segment.d.ts +1 -1
- package/lib/model/grounding-support-segment.js +1 -1
- package/lib/model/grounding-support.d.ts +1 -1
- package/lib/model/grounding-support.js +1 -1
- package/lib/model/grounding-web-chunk.d.ts +1 -1
- package/lib/model/grounding-web-chunk.js +1 -1
- package/lib/model/guardrail-configs.d.ts +1 -1
- package/lib/model/guardrail-configs.js +1 -1
- package/lib/model/guardrails-input.d.ts +1 -1
- package/lib/model/guardrails-input.js +1 -1
- package/lib/model/guardrails-results.d.ts +1 -1
- package/lib/model/guardrails-results.js +1 -1
- package/lib/model/guardrails-text-input.d.ts +1 -1
- package/lib/model/guardrails-text-input.js +1 -1
- package/lib/model/image-content.d.ts +1 -1
- package/lib/model/image-content.js +1 -1
- package/lib/model/image-url.d.ts +1 -1
- package/lib/model/image-url.js +1 -1
- package/lib/model/index.d.ts +51 -1
- package/lib/model/index.js +54 -4
- package/lib/model/index.js.map +1 -1
- package/lib/model/json-object-response-format.d.ts +1 -1
- package/lib/model/json-object-response-format.js +1 -1
- package/lib/model/json-schema-response-format.d.ts +1 -1
- package/lib/model/json-schema-response-format.js +1 -1
- package/lib/model/llama-llm-inference-request.d.ts +1 -1
- package/lib/model/llama-llm-inference-request.js +1 -1
- package/lib/model/llama-llm-inference-response.d.ts +1 -1
- package/lib/model/llama-llm-inference-response.js +1 -1
- package/lib/model/llm-inference-request.d.ts +1 -1
- package/lib/model/llm-inference-request.js +1 -1
- package/lib/model/llm-inference-response.d.ts +1 -1
- package/lib/model/llm-inference-response.js +1 -1
- package/lib/model/log-probability.d.ts +40 -0
- package/lib/model/log-probability.js +36 -0
- package/lib/model/log-probability.js.map +1 -0
- package/lib/model/logprobs.d.ts +1 -1
- package/lib/model/logprobs.js +1 -1
- package/lib/model/message.d.ts +1 -1
- package/lib/model/message.js +1 -1
- package/lib/model/on-demand-serving-mode.d.ts +1 -1
- package/lib/model/on-demand-serving-mode.js +1 -1
- package/lib/model/personally-identifiable-information-configuration.d.ts +1 -1
- package/lib/model/personally-identifiable-information-configuration.js +1 -1
- package/lib/model/personally-identifiable-information-result.d.ts +1 -1
- package/lib/model/personally-identifiable-information-result.js +1 -1
- package/lib/model/prediction.d.ts +1 -1
- package/lib/model/prediction.js +1 -1
- package/lib/model/prompt-injection-configuration.d.ts +1 -1
- package/lib/model/prompt-injection-configuration.js +1 -1
- package/lib/model/prompt-injection-protection-result.d.ts +1 -1
- package/lib/model/prompt-injection-protection-result.js +1 -1
- package/lib/model/prompt-tokens-details.d.ts +1 -1
- package/lib/model/prompt-tokens-details.js +1 -1
- package/lib/model/rerank-text-details.d.ts +5 -1
- package/lib/model/rerank-text-details.js +1 -1
- package/lib/model/rerank-text-details.js.map +1 -1
- package/lib/model/rerank-text-result.d.ts +1 -1
- package/lib/model/rerank-text-result.js +1 -1
- package/lib/model/response-format.d.ts +1 -1
- package/lib/model/response-format.js +1 -1
- package/lib/model/response-json-schema.d.ts +1 -1
- package/lib/model/response-json-schema.js +1 -1
- package/lib/model/search-entry-point.d.ts +1 -1
- package/lib/model/search-entry-point.js +1 -1
- package/lib/model/search-query.d.ts +1 -1
- package/lib/model/search-query.js +1 -1
- package/lib/model/serving-mode.d.ts +1 -1
- package/lib/model/serving-mode.js +1 -1
- package/lib/model/static-content.d.ts +1 -1
- package/lib/model/static-content.js +1 -1
- package/lib/model/stream-options.d.ts +1 -1
- package/lib/model/stream-options.js +1 -1
- package/lib/model/summarize-text-details.d.ts +1 -1
- package/lib/model/summarize-text-details.js +1 -1
- package/lib/model/summarize-text-result.d.ts +1 -1
- package/lib/model/summarize-text-result.js +1 -1
- package/lib/model/system-message.d.ts +1 -1
- package/lib/model/system-message.js +1 -1
- package/lib/model/text-content.d.ts +1 -1
- package/lib/model/text-content.js +1 -1
- package/lib/model/text-response-format.d.ts +1 -1
- package/lib/model/text-response-format.js +1 -1
- package/lib/model/token-likelihood.d.ts +1 -1
- package/lib/model/token-likelihood.js +1 -1
- package/lib/model/tool-call.d.ts +1 -1
- package/lib/model/tool-call.js +1 -1
- package/lib/model/tool-choice-auto.d.ts +1 -1
- package/lib/model/tool-choice-auto.js +1 -1
- package/lib/model/tool-choice-function.d.ts +1 -1
- package/lib/model/tool-choice-function.js +1 -1
- package/lib/model/tool-choice-none.d.ts +1 -1
- package/lib/model/tool-choice-none.js +1 -1
- package/lib/model/tool-choice-required.d.ts +1 -1
- package/lib/model/tool-choice-required.js +1 -1
- package/lib/model/tool-choice.d.ts +1 -1
- package/lib/model/tool-choice.js +1 -1
- package/lib/model/tool-definition.d.ts +1 -1
- package/lib/model/tool-definition.js +1 -1
- package/lib/model/tool-message.d.ts +1 -1
- package/lib/model/tool-message.js +1 -1
- package/lib/model/url-citation.d.ts +1 -1
- package/lib/model/url-citation.js +1 -1
- package/lib/model/usage.d.ts +1 -1
- package/lib/model/usage.js +1 -1
- package/lib/model/user-message.d.ts +1 -1
- package/lib/model/user-message.js +1 -1
- package/lib/model/video-content.d.ts +1 -1
- package/lib/model/video-content.js +1 -1
- package/lib/model/video-url.d.ts +1 -1
- package/lib/model/video-url.js +1 -1
- package/lib/model/web-search-options.d.ts +1 -1
- package/lib/model/web-search-options.js +1 -1
- package/lib/request/apply-guardrails-request.d.ts +1 -1
- package/lib/request/apply-guardrails-request.js +1 -1
- package/lib/request/chat-request.d.ts +1 -1
- package/lib/request/chat-request.js +1 -1
- package/lib/request/embed-text-request.d.ts +1 -1
- package/lib/request/embed-text-request.js +1 -1
- package/lib/request/generate-text-request.d.ts +1 -1
- package/lib/request/generate-text-request.js +1 -1
- package/lib/request/index.d.ts +1 -1
- package/lib/request/index.js +1 -1
- package/lib/request/rerank-text-request.d.ts +1 -1
- package/lib/request/rerank-text-request.js +1 -1
- package/lib/request/summarize-text-request.d.ts +1 -1
- package/lib/request/summarize-text-request.js +1 -1
- package/lib/response/apply-guardrails-response.d.ts +1 -1
- package/lib/response/apply-guardrails-response.js +1 -1
- package/lib/response/chat-response.d.ts +1 -1
- package/lib/response/chat-response.js +1 -1
- package/lib/response/embed-text-response.d.ts +1 -1
- package/lib/response/embed-text-response.js +1 -1
- package/lib/response/generate-text-response.d.ts +1 -1
- package/lib/response/generate-text-response.js +1 -1
- package/lib/response/index.d.ts +1 -1
- package/lib/response/index.js +1 -1
- package/lib/response/rerank-text-response.d.ts +1 -1
- package/lib/response/rerank-text-response.js +1 -1
- package/lib/response/summarize-text-response.d.ts +1 -1
- package/lib/response/summarize-text-response.js +1 -1
- package/package.json +3 -3
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
/**
|
|
21
|
+
* A citation source referencing a document.
|
|
22
|
+
*/
|
|
23
|
+
export interface CohereCitationDocument {
|
|
24
|
+
/**
|
|
25
|
+
* Unique identifier for the document.
|
|
26
|
+
*/
|
|
27
|
+
"id"?: string;
|
|
28
|
+
/**
|
|
29
|
+
* The actual document content or reference.
|
|
30
|
+
*/
|
|
31
|
+
"document"?: any;
|
|
32
|
+
}
|
|
33
|
+
export declare namespace CohereCitationDocument {
|
|
34
|
+
function getJsonObj(obj: CohereCitationDocument): object;
|
|
35
|
+
function getDeserializedJsonObj(obj: CohereCitationDocument): object;
|
|
36
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
22
|
+
exports.CohereCitationDocument = void 0;
|
|
23
|
+
var CohereCitationDocument;
|
|
24
|
+
(function (CohereCitationDocument) {
|
|
25
|
+
function getJsonObj(obj) {
|
|
26
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
27
|
+
return jsonObj;
|
|
28
|
+
}
|
|
29
|
+
CohereCitationDocument.getJsonObj = getJsonObj;
|
|
30
|
+
function getDeserializedJsonObj(obj) {
|
|
31
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
32
|
+
return jsonObj;
|
|
33
|
+
}
|
|
34
|
+
CohereCitationDocument.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
35
|
+
})(CohereCitationDocument = exports.CohereCitationDocument || (exports.CohereCitationDocument = {}));
|
|
36
|
+
//# sourceMappingURL=cohere-citation-document.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cohere-citation-document.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-citation-document.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;AAmBH,IAAiB,sBAAsB,CAWtC;AAXD,WAAiB,sBAAsB;IACrC,SAAgB,UAAU,CAAC,GAA2B;QACpD,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,iCAAU,aAIzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAA2B;QAChE,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,6CAAsB,yBAIrC,CAAA;AACH,CAAC,EAXgB,sBAAsB,GAAtB,8BAAsB,KAAtB,8BAAsB,QAWtC"}
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
import * as model from "../model";
|
|
21
|
+
/**
|
|
22
|
+
* A citation source object.
|
|
23
|
+
*/
|
|
24
|
+
export interface CohereCitationSourceV2 {
|
|
25
|
+
/**
|
|
26
|
+
* The source type.
|
|
27
|
+
*/
|
|
28
|
+
"type": CohereCitationSourceV2.Type;
|
|
29
|
+
"tool"?: model.CohereCitationToolV2;
|
|
30
|
+
"document"?: model.CohereCitationDocument;
|
|
31
|
+
}
|
|
32
|
+
export declare namespace CohereCitationSourceV2 {
|
|
33
|
+
enum Type {
|
|
34
|
+
Tool = "TOOL",
|
|
35
|
+
Document = "DOCUMENT",
|
|
36
|
+
/**
|
|
37
|
+
* This value is used if a service returns a value for this enum that is not recognized by this
|
|
38
|
+
* version of the SDK.
|
|
39
|
+
*/
|
|
40
|
+
UnknownValue = "UNKNOWN_VALUE"
|
|
41
|
+
}
|
|
42
|
+
function getJsonObj(obj: CohereCitationSourceV2): object;
|
|
43
|
+
function getDeserializedJsonObj(obj: CohereCitationSourceV2): object;
|
|
44
|
+
}
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
22
|
+
if (k2 === undefined) k2 = k;
|
|
23
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
24
|
+
}) : (function(o, m, k, k2) {
|
|
25
|
+
if (k2 === undefined) k2 = k;
|
|
26
|
+
o[k2] = m[k];
|
|
27
|
+
}));
|
|
28
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
29
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
30
|
+
}) : function(o, v) {
|
|
31
|
+
o["default"] = v;
|
|
32
|
+
});
|
|
33
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
34
|
+
if (mod && mod.__esModule) return mod;
|
|
35
|
+
var result = {};
|
|
36
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
37
|
+
__setModuleDefault(result, mod);
|
|
38
|
+
return result;
|
|
39
|
+
};
|
|
40
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
+
exports.CohereCitationSourceV2 = void 0;
|
|
42
|
+
const model = __importStar(require("../model"));
|
|
43
|
+
var CohereCitationSourceV2;
|
|
44
|
+
(function (CohereCitationSourceV2) {
|
|
45
|
+
let Type;
|
|
46
|
+
(function (Type) {
|
|
47
|
+
Type["Tool"] = "TOOL";
|
|
48
|
+
Type["Document"] = "DOCUMENT";
|
|
49
|
+
/**
|
|
50
|
+
* This value is used if a service returns a value for this enum that is not recognized by this
|
|
51
|
+
* version of the SDK.
|
|
52
|
+
*/
|
|
53
|
+
Type["UnknownValue"] = "UNKNOWN_VALUE";
|
|
54
|
+
})(Type = CohereCitationSourceV2.Type || (CohereCitationSourceV2.Type = {}));
|
|
55
|
+
function getJsonObj(obj) {
|
|
56
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
57
|
+
"tool": obj.tool ? model.CohereCitationToolV2.getJsonObj(obj.tool) : undefined,
|
|
58
|
+
"document": obj.document ? model.CohereCitationDocument.getJsonObj(obj.document) : undefined
|
|
59
|
+
});
|
|
60
|
+
return jsonObj;
|
|
61
|
+
}
|
|
62
|
+
CohereCitationSourceV2.getJsonObj = getJsonObj;
|
|
63
|
+
function getDeserializedJsonObj(obj) {
|
|
64
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
65
|
+
"tool": obj.tool ? model.CohereCitationToolV2.getDeserializedJsonObj(obj.tool) : undefined,
|
|
66
|
+
"document": obj.document
|
|
67
|
+
? model.CohereCitationDocument.getDeserializedJsonObj(obj.document)
|
|
68
|
+
: undefined
|
|
69
|
+
});
|
|
70
|
+
return jsonObj;
|
|
71
|
+
}
|
|
72
|
+
CohereCitationSourceV2.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
73
|
+
})(CohereCitationSourceV2 = exports.CohereCitationSourceV2 || (exports.CohereCitationSourceV2 = {}));
|
|
74
|
+
//# sourceMappingURL=cohere-citation-source-v2.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cohere-citation-source-v2.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-citation-source-v2.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAelC,IAAiB,sBAAsB,CAmCtC;AAnCD,WAAiB,sBAAsB;IACrC,IAAY,IAQX;IARD,WAAY,IAAI;QACd,qBAAa,CAAA;QACb,6BAAqB,CAAA;QACrB;;;WAGG;QACH,sCAA8B,CAAA;IAChC,CAAC,EARW,IAAI,GAAJ,2BAAI,KAAJ,2BAAI,QAQf;IAED,SAAgB,UAAU,CAAC,GAA2B;QACpD,MAAM,OAAO,mCACR,GAAG,GACH;YACD,MAAM,EAAE,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,oBAAoB,CAAC,UAAU,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,SAAS;YAC9E,UAAU,EAAE,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,sBAAsB,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,SAAS;SAC7F,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAVe,iCAAU,aAUzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAA2B;QAChE,MAAM,OAAO,mCACR,GAAG,GACH;YACD,MAAM,EAAE,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,oBAAoB,CAAC,sBAAsB,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,SAAS;YAC1F,UAAU,EAAE,GAAG,CAAC,QAAQ;gBACtB,CAAC,CAAC,KAAK,CAAC,sBAAsB,CAAC,sBAAsB,CAAC,GAAG,CAAC,QAAQ,CAAC;gBACnE,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAZe,6CAAsB,yBAYrC,CAAA;AACH,CAAC,EAnCgB,sBAAsB,GAAtB,8BAAsB,KAAtB,8BAAsB,QAmCtC"}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
/**
|
|
21
|
+
* A citation source referencing a tool result.
|
|
22
|
+
*/
|
|
23
|
+
export interface CohereCitationToolV2 {
|
|
24
|
+
/**
|
|
25
|
+
* Unique identifier of the tool call.
|
|
26
|
+
*/
|
|
27
|
+
"id"?: string;
|
|
28
|
+
/**
|
|
29
|
+
* Output from the tool.
|
|
30
|
+
*/
|
|
31
|
+
"toolOutput"?: any;
|
|
32
|
+
}
|
|
33
|
+
export declare namespace CohereCitationToolV2 {
|
|
34
|
+
function getJsonObj(obj: CohereCitationToolV2): object;
|
|
35
|
+
function getDeserializedJsonObj(obj: CohereCitationToolV2): object;
|
|
36
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
22
|
+
exports.CohereCitationToolV2 = void 0;
|
|
23
|
+
var CohereCitationToolV2;
|
|
24
|
+
(function (CohereCitationToolV2) {
|
|
25
|
+
function getJsonObj(obj) {
|
|
26
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
27
|
+
return jsonObj;
|
|
28
|
+
}
|
|
29
|
+
CohereCitationToolV2.getJsonObj = getJsonObj;
|
|
30
|
+
function getDeserializedJsonObj(obj) {
|
|
31
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
32
|
+
return jsonObj;
|
|
33
|
+
}
|
|
34
|
+
CohereCitationToolV2.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
35
|
+
})(CohereCitationToolV2 = exports.CohereCitationToolV2 || (exports.CohereCitationToolV2 = {}));
|
|
36
|
+
//# sourceMappingURL=cohere-citation-tool-v2.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cohere-citation-tool-v2.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-citation-tool-v2.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;AAmBH,IAAiB,oBAAoB,CAWpC;AAXD,WAAiB,oBAAoB;IACnC,SAAgB,UAAU,CAAC,GAAyB;QAClD,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,+BAAU,aAIzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAyB;QAC9D,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,2CAAsB,yBAIrC,CAAA;AACH,CAAC,EAXgB,oBAAoB,GAApB,4BAAoB,KAApB,4BAAoB,QAWpC"}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
import * as model from "../model";
|
|
21
|
+
/**
|
|
22
|
+
* A source reference or citation for a piece of content.
|
|
23
|
+
*/
|
|
24
|
+
export interface CohereCitationV2 {
|
|
25
|
+
/**
|
|
26
|
+
* Start index of the cited snippet in the original source text. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
27
|
+
*/
|
|
28
|
+
"start"?: number;
|
|
29
|
+
/**
|
|
30
|
+
* End index of the cited snippet in the original source text. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
31
|
+
*/
|
|
32
|
+
"end"?: number;
|
|
33
|
+
/**
|
|
34
|
+
* Text snippet that is being cited.
|
|
35
|
+
*/
|
|
36
|
+
"text"?: string;
|
|
37
|
+
/**
|
|
38
|
+
* Index of the content block in which this citation appears. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
39
|
+
*/
|
|
40
|
+
"contentIndex"?: number;
|
|
41
|
+
/**
|
|
42
|
+
* The type of citation indicating what part of the response it is for.
|
|
43
|
+
*/
|
|
44
|
+
"type"?: CohereCitationV2.Type;
|
|
45
|
+
/**
|
|
46
|
+
* List of source objects for this citation.
|
|
47
|
+
*/
|
|
48
|
+
"sources"?: Array<model.CohereCitationSourceV2>;
|
|
49
|
+
}
|
|
50
|
+
export declare namespace CohereCitationV2 {
|
|
51
|
+
enum Type {
|
|
52
|
+
TextContent = "TEXT_CONTENT",
|
|
53
|
+
ThinkingContent = "THINKING_CONTENT",
|
|
54
|
+
Plan = "PLAN",
|
|
55
|
+
/**
|
|
56
|
+
* This value is used if a service returns a value for this enum that is not recognized by this
|
|
57
|
+
* version of the SDK.
|
|
58
|
+
*/
|
|
59
|
+
UnknownValue = "UNKNOWN_VALUE"
|
|
60
|
+
}
|
|
61
|
+
function getJsonObj(obj: CohereCitationV2): object;
|
|
62
|
+
function getDeserializedJsonObj(obj: CohereCitationV2): object;
|
|
63
|
+
}
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
22
|
+
if (k2 === undefined) k2 = k;
|
|
23
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
24
|
+
}) : (function(o, m, k, k2) {
|
|
25
|
+
if (k2 === undefined) k2 = k;
|
|
26
|
+
o[k2] = m[k];
|
|
27
|
+
}));
|
|
28
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
29
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
30
|
+
}) : function(o, v) {
|
|
31
|
+
o["default"] = v;
|
|
32
|
+
});
|
|
33
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
34
|
+
if (mod && mod.__esModule) return mod;
|
|
35
|
+
var result = {};
|
|
36
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
37
|
+
__setModuleDefault(result, mod);
|
|
38
|
+
return result;
|
|
39
|
+
};
|
|
40
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
+
exports.CohereCitationV2 = void 0;
|
|
42
|
+
const model = __importStar(require("../model"));
|
|
43
|
+
var CohereCitationV2;
|
|
44
|
+
(function (CohereCitationV2) {
|
|
45
|
+
let Type;
|
|
46
|
+
(function (Type) {
|
|
47
|
+
Type["TextContent"] = "TEXT_CONTENT";
|
|
48
|
+
Type["ThinkingContent"] = "THINKING_CONTENT";
|
|
49
|
+
Type["Plan"] = "PLAN";
|
|
50
|
+
/**
|
|
51
|
+
* This value is used if a service returns a value for this enum that is not recognized by this
|
|
52
|
+
* version of the SDK.
|
|
53
|
+
*/
|
|
54
|
+
Type["UnknownValue"] = "UNKNOWN_VALUE";
|
|
55
|
+
})(Type = CohereCitationV2.Type || (CohereCitationV2.Type = {}));
|
|
56
|
+
function getJsonObj(obj) {
|
|
57
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
58
|
+
"sources": obj.sources
|
|
59
|
+
? obj.sources.map(item => {
|
|
60
|
+
return model.CohereCitationSourceV2.getJsonObj(item);
|
|
61
|
+
})
|
|
62
|
+
: undefined
|
|
63
|
+
});
|
|
64
|
+
return jsonObj;
|
|
65
|
+
}
|
|
66
|
+
CohereCitationV2.getJsonObj = getJsonObj;
|
|
67
|
+
function getDeserializedJsonObj(obj) {
|
|
68
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
69
|
+
"sources": obj.sources
|
|
70
|
+
? obj.sources.map(item => {
|
|
71
|
+
return model.CohereCitationSourceV2.getDeserializedJsonObj(item);
|
|
72
|
+
})
|
|
73
|
+
: undefined
|
|
74
|
+
});
|
|
75
|
+
return jsonObj;
|
|
76
|
+
}
|
|
77
|
+
CohereCitationV2.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
78
|
+
})(CohereCitationV2 = exports.CohereCitationV2 || (exports.CohereCitationV2 = {}));
|
|
79
|
+
//# sourceMappingURL=cohere-citation-v2.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cohere-citation-v2.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-citation-v2.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAiClC,IAAiB,gBAAgB,CAwChC;AAxCD,WAAiB,gBAAgB;IAC/B,IAAY,IASX;IATD,WAAY,IAAI;QACd,oCAA4B,CAAA;QAC5B,4CAAoC,CAAA;QACpC,qBAAa,CAAA;QACb;;;WAGG;QACH,sCAA8B,CAAA;IAChC,CAAC,EATW,IAAI,GAAJ,qBAAI,KAAJ,qBAAI,QASf;IAED,SAAgB,UAAU,CAAC,GAAqB;QAC9C,MAAM,OAAO,mCACR,GAAG,GACH;YACD,SAAS,EAAE,GAAG,CAAC,OAAO;gBACpB,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACrB,OAAO,KAAK,CAAC,sBAAsB,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBACvD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAbe,2BAAU,aAazB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAqB;QAC1D,MAAM,OAAO,mCACR,GAAG,GACH;YACD,SAAS,EAAE,GAAG,CAAC,OAAO;gBACpB,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACrB,OAAO,KAAK,CAAC,sBAAsB,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBACnE,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAbe,uCAAsB,yBAarC,CAAA;AACH,CAAC,EAxCgB,gBAAgB,GAAhB,wBAAgB,KAAhB,wBAAgB,QAwChC"}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
/**
|
|
21
|
+
* The base class for the chat content.
|
|
22
|
+
*/
|
|
23
|
+
export interface CohereContentV2 {
|
|
24
|
+
"type": string;
|
|
25
|
+
}
|
|
26
|
+
export declare namespace CohereContentV2 {
|
|
27
|
+
function getJsonObj(obj: CohereContentV2): object;
|
|
28
|
+
function getDeserializedJsonObj(obj: CohereContentV2): object;
|
|
29
|
+
}
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Generative AI Service Inference API
|
|
4
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
5
|
+
|
|
6
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
7
|
+
|
|
8
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
9
|
+
|
|
10
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
11
|
+
|
|
12
|
+
* OpenAPI spec version: 20231130
|
|
13
|
+
*
|
|
14
|
+
*
|
|
15
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
16
|
+
* Do not edit the class manually.
|
|
17
|
+
*
|
|
18
|
+
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
|
19
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
20
|
+
*/
|
|
21
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
22
|
+
if (k2 === undefined) k2 = k;
|
|
23
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
24
|
+
}) : (function(o, m, k, k2) {
|
|
25
|
+
if (k2 === undefined) k2 = k;
|
|
26
|
+
o[k2] = m[k];
|
|
27
|
+
}));
|
|
28
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
29
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
30
|
+
}) : function(o, v) {
|
|
31
|
+
o["default"] = v;
|
|
32
|
+
});
|
|
33
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
34
|
+
if (mod && mod.__esModule) return mod;
|
|
35
|
+
var result = {};
|
|
36
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
37
|
+
__setModuleDefault(result, mod);
|
|
38
|
+
return result;
|
|
39
|
+
};
|
|
40
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
+
exports.CohereContentV2 = void 0;
|
|
42
|
+
const model = __importStar(require("../model"));
|
|
43
|
+
const common = require("oci-common");
|
|
44
|
+
var CohereContentV2;
|
|
45
|
+
(function (CohereContentV2) {
|
|
46
|
+
function getJsonObj(obj) {
|
|
47
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
48
|
+
if (obj && "type" in obj && obj.type) {
|
|
49
|
+
switch (obj.type) {
|
|
50
|
+
case "THINKING":
|
|
51
|
+
return model.CohereThinkingContentV2.getJsonObj(jsonObj, true);
|
|
52
|
+
case "TEXT":
|
|
53
|
+
return model.CohereTextContentV2.getJsonObj(jsonObj, true);
|
|
54
|
+
case "IMAGE_URL":
|
|
55
|
+
return model.CohereImageContentV2.getJsonObj(jsonObj, true);
|
|
56
|
+
case "DOCUMENT":
|
|
57
|
+
return model.CohereDocumentContentV2.getJsonObj(jsonObj, true);
|
|
58
|
+
default:
|
|
59
|
+
if (common.LOG.logger)
|
|
60
|
+
common.LOG.logger.info(`Unknown value for: ${obj.type}`);
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
return jsonObj;
|
|
64
|
+
}
|
|
65
|
+
CohereContentV2.getJsonObj = getJsonObj;
|
|
66
|
+
function getDeserializedJsonObj(obj) {
|
|
67
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
68
|
+
if (obj && "type" in obj && obj.type) {
|
|
69
|
+
switch (obj.type) {
|
|
70
|
+
case "THINKING":
|
|
71
|
+
return model.CohereThinkingContentV2.getDeserializedJsonObj(jsonObj, true);
|
|
72
|
+
case "TEXT":
|
|
73
|
+
return model.CohereTextContentV2.getDeserializedJsonObj(jsonObj, true);
|
|
74
|
+
case "IMAGE_URL":
|
|
75
|
+
return model.CohereImageContentV2.getDeserializedJsonObj(jsonObj, true);
|
|
76
|
+
case "DOCUMENT":
|
|
77
|
+
return model.CohereDocumentContentV2.getDeserializedJsonObj(jsonObj, true);
|
|
78
|
+
default:
|
|
79
|
+
if (common.LOG.logger)
|
|
80
|
+
common.LOG.logger.info(`Unknown value for: ${obj.type}`);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
return jsonObj;
|
|
84
|
+
}
|
|
85
|
+
CohereContentV2.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
86
|
+
})(CohereContentV2 = exports.CohereContentV2 || (exports.CohereContentV2 = {}));
|
|
87
|
+
//# sourceMappingURL=cohere-content-v2.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cohere-content-v2.js","sourceRoot":"","sources":["../../../../../lib/generativeaiinference/lib/model/cohere-content-v2.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;GAkBG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAClC,qCAAsC;AAStC,IAAiB,eAAe,CA+D/B;AA/DD,WAAiB,eAAe;IAC9B,SAAgB,UAAU,CAAC,GAAoB;QAC7C,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,IAAI,GAAG,IAAI,MAAM,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,EAAE;YACpC,QAAQ,GAAG,CAAC,IAAI,EAAE;gBAChB,KAAK,UAAU;oBACb,OAAO,KAAK,CAAC,uBAAuB,CAAC,UAAU,CACL,OAAQ,EAChD,IAAI,CACL,CAAC;gBACJ,KAAK,MAAM;oBACT,OAAO,KAAK,CAAC,mBAAmB,CAAC,UAAU,CACL,OAAQ,EAC5C,IAAI,CACL,CAAC;gBACJ,KAAK,WAAW;oBACd,OAAO,KAAK,CAAC,oBAAoB,CAAC,UAAU,CACL,OAAQ,EAC7C,IAAI,CACL,CAAC;gBACJ,KAAK,UAAU;oBACb,OAAO,KAAK,CAAC,uBAAuB,CAAC,UAAU,CACL,OAAQ,EAChD,IAAI,CACL,CAAC;gBACJ;oBACE,IAAI,MAAM,CAAC,GAAG,CAAC,MAAM;wBAAE,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,sBAAsB,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;aACnF;SACF;QACD,OAAO,OAAO,CAAC;IACjB,CAAC;IA9Be,0BAAU,aA8BzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAoB;QACzD,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,IAAI,GAAG,IAAI,MAAM,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,EAAE;YACpC,QAAQ,GAAG,CAAC,IAAI,EAAE;gBAChB,KAAK,UAAU;oBACb,OAAO,KAAK,CAAC,uBAAuB,CAAC,sBAAsB,CACjB,OAAQ,EAChD,IAAI,CACL,CAAC;gBACJ,KAAK,MAAM;oBACT,OAAO,KAAK,CAAC,mBAAmB,CAAC,sBAAsB,CACjB,OAAQ,EAC5C,IAAI,CACL,CAAC;gBACJ,KAAK,WAAW;oBACd,OAAO,KAAK,CAAC,oBAAoB,CAAC,sBAAsB,CACjB,OAAQ,EAC7C,IAAI,CACL,CAAC;gBACJ,KAAK,UAAU;oBACb,OAAO,KAAK,CAAC,uBAAuB,CAAC,sBAAsB,CACjB,OAAQ,EAChD,IAAI,CACL,CAAC;gBACJ;oBACE,IAAI,MAAM,CAAC,GAAG,CAAC,MAAM;wBAAE,MAAM,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,sBAAsB,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;aACnF;SACF;QACD,OAAO,OAAO,CAAC;IACjB,CAAC;IA9Be,sCAAsB,yBA8BrC,CAAA;AACH,CAAC,EA/DgB,eAAe,GAAf,uBAAe,KAAf,uBAAe,QA+D/B"}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generative AI Service Inference API
|
|
3
|
+
* OCI Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases for text generation, summarization, and text embeddings.
|
|
4
|
+
|
|
5
|
+
Use the Generative AI service inference API to access your custom model endpoints, or to try the out-of-the-box models to {@link #eNGenerative-ai-inferenceLatestChatResultChat(ENGenerative-ai-inferenceLatestChatResultChatRequest) eNGenerative-ai-inferenceLatestChatResultChat}, {@link #eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText(ENGenerative-ai-inferenceLatestGenerateTextResultGenerateTextRequest) eNGenerative-ai-inferenceLatestGenerateTextResultGenerateText}, {@link #eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText(ENGenerative-ai-inferenceLatestSummarizeTextResultSummarizeTextRequest) eNGenerative-ai-inferenceLatestSummarizeTextResultSummarizeText}, and {@link #eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText(ENGenerative-ai-inferenceLatestEmbedTextResultEmbedTextRequest) eNGenerative-ai-inferenceLatestEmbedTextResultEmbedText}.
|
|
6
|
+
|
|
7
|
+
To use a Generative AI custom model for inference, you must first create an endpoint for that model. Use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest} to {@link #eNGenerative-aiLatestModel(ENGenerative-aiLatestModelRequest) eNGenerative-aiLatestModel} by fine-tuning an out-of-the-box model, or a previous version of a custom model, using your own data. Fine-tune the custom model on a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster}. Then, create a {@link #eNGenerative-aiLatestDedicatedAiCluster(ENGenerative-aiLatestDedicatedAiClusterRequest) eNGenerative-aiLatestDedicatedAiCluster} with an {@link Endpoint} to host your custom model. For resource management in the Generative AI service, use the {@link #eNGenerative-aiLatest(ENGenerative-aiLatestRequest) eNGenerative-aiLatest}.
|
|
8
|
+
|
|
9
|
+
To learn more about the service, see the [Generative AI documentation](https://docs.oracle.com/iaas/Content/generative-ai/home.htm).
|
|
10
|
+
|
|
11
|
+
* OpenAPI spec version: 20231130
|
|
12
|
+
*
|
|
13
|
+
*
|
|
14
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
15
|
+
* Do not edit the class manually.
|
|
16
|
+
*
|
|
17
|
+
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
|
18
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
19
|
+
*/
|
|
20
|
+
import * as model from "../model";
|
|
21
|
+
/**
|
|
22
|
+
* Represents a single instance of Chat Document content
|
|
23
|
+
*/
|
|
24
|
+
export interface CohereDocumentContentV2 extends model.CohereContentV2 {
|
|
25
|
+
/**
|
|
26
|
+
* Relevant information that could be used by the model to generate a more accurate reply. The content of each document are generally short (should be under 300 words). Metadata should be used to provide additional information, both the key name and the value will be passed to the model.
|
|
27
|
+
*/
|
|
28
|
+
"document"?: any;
|
|
29
|
+
"type": string;
|
|
30
|
+
}
|
|
31
|
+
export declare namespace CohereDocumentContentV2 {
|
|
32
|
+
function getJsonObj(obj: CohereDocumentContentV2, isParentJsonObj?: boolean): object;
|
|
33
|
+
const type = "DOCUMENT";
|
|
34
|
+
function getDeserializedJsonObj(obj: CohereDocumentContentV2, isParentJsonObj?: boolean): object;
|
|
35
|
+
}
|