oci-cli 3.72.0__py3-none-any.whl → 3.72.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oci_cli/bin/iot.psm1 +2 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/action-param-values-summary/list-params-for-action-type.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-container-database/create-autonomous-container-database-aws-encryption-key-details.txt +396 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-container-database/create-autonomous-container-database-azure-encryption-key-details.txt +396 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-container-database/create-autonomous-container-database-create-autonomous-container-database-details.txt +11 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-container-database/create-autonomous-container-database-create-autonomous-container-database-from-backup-details.txt +11 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-container-database/create-autonomous-container-database-external-hsm-encryption-details.txt +396 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-container-database/create-autonomous-container-database-google-cloud-provider-encryption-key-details.txt +396 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-container-database/create.txt +11 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-container-database.txt +9 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/change-disaster-recovery-configuration.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/configure-saas-admin-user.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/create-adb-cross-region-data-guard-details.txt +18 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/create-autonomous-database-create-cross-region-disaster-recovery-details.txt +18 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/create-autonomous-database-gcp-key-details.txt +18 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/create-autonomous-database-undelete-autonomous-database-details.txt +18 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/create-cross-tenancy-disaster-recovery-details.txt +18 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/create-from-backup-id.txt +18 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/create-from-backup-timestamp.txt +18 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/create-from-clone.txt +18 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/create-refreshable-clone.txt +18 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/create.txt +18 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/fail-over.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/import-transportable-tablespace.txt +142 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/list-clones.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/list.txt +2 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/manual-refresh.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/restart.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/restore.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/rotate-key.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/shrink.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/start.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/stop.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/switchover.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/update-autonomous-database-gcp-key-details.txt +9 -4
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database/update.txt +9 -4
- oci_cli/help_text_producer/data_files/text/cmdref/db/autonomous-database.txt +2 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/cloud-autonomous-vm-cluster/register-cloud-autonomous-vm-cluster-pkcs.txt +140 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/cloud-autonomous-vm-cluster/unregister-cloud-autonomous-vm-cluster-pkcs.txt +140 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/cloud-autonomous-vm-cluster.txt +4 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/database/run-data-patch.txt +165 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/database/update.txt +11 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/database.txt +2 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/db-connection-bundle/download.txt +114 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/db-connection-bundle/get.txt +92 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/db-connection-bundle/list.txt +157 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/db-connection-bundle.txt +18 -0
- oci_cli/help_text_producer/data_files/text/cmdref/db/exascale-db-storage-vault/create.txt +3 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/exascale-db-storage-vault/update.txt +3 -2
- oci_cli/help_text_producer/data_files/text/cmdref/db/maintenance-run/create.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/maintenance-run/list.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/maintenance-run/update.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/maintenance-run-history/list.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db/scheduled-action/create.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/db.txt +26 -0
- oci_cli/help_text_producer/data_files/text/cmdref/delegate-access-control/work-request/work-request-error/{list.txt → list-errors.txt} +5 -5
- oci_cli/help_text_producer/data_files/text/cmdref/delegate-access-control/work-request/work-request-error.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/delegate-access-control/work-request/{work-request-log-entry/list-work-request-logs.txt → work-request-log/list-logs.txt} +5 -5
- oci_cli/help_text_producer/data_files/text/cmdref/delegate-access-control/work-request/{work-request-log-entry.txt → work-request-log.txt} +3 -3
- oci_cli/help_text_producer/data_files/text/cmdref/delegate-access-control/work-request.txt +3 -3
- oci_cli/help_text_producer/data_files/text/cmdref/delegate-access-control.txt +3 -3
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/api-key/change-compartment.txt +114 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/api-key/create.txt +181 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/api-key/delete.txt +138 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/api-key/get.txt +100 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/api-key/renew.txt +184 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/api-key/set-api-key-state.txt +181 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/api-key/update.txt +183 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/api-key-collection/list-api-keys.txt +150 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/api-key-collection.txt +14 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/api-key.txt +30 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/dedicated-ai-cluster/create.txt +6 -5
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/endpoint/create.txt +22 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/endpoint/update.txt +22 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/generative-ai-private-endpoint/create.txt +5 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/generative-ai-private-endpoint/update.txt +5 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai/model-collection/list-models.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai-inference/chat-result/chat-cohere-chat-request-v2.txt +350 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai-inference/chat-result/chat-generic-chat-request.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai-inference/chat-result.txt +2 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai-inference/embed-text-result/embed-text.txt +16 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai-inference/rerank-text-result/rerank-text-dedicated-serving-mode.txt +4 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai-inference/rerank-text-result/rerank-text-on-demand-serving-mode.txt +4 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai-inference/rerank-text-result/rerank-text.txt +4 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai-inference.txt +2 -0
- oci_cli/help_text_producer/data_files/text/cmdref/generative-ai.txt +20 -0
- oci_cli/help_text_producer/data_files/text/cmdref/iot/digital-twin-instance/get-content.txt +3 -2
- oci_cli/help_text_producer/data_files/text/cmdref/iot/domain-group/create.txt +10 -0
- oci_cli/help_text_producer/data_files/text/cmdref/iot/domain-group/list.txt +8 -0
- oci_cli/help_text_producer/data_files/text/cmdref/organizations/work-request-error/{list.txt → list-errors.txt} +5 -5
- oci_cli/help_text_producer/data_files/text/cmdref/organizations/work-request-error.txt +1 -1
- oci_cli/help_text_producer/data_files/text/cmdref/organizations/{work-request-log-entry → work-request-log}/list.txt +19 -1
- oci_cli/help_text_producer/data_files/text/cmdref/organizations/{work-request-log-entry.txt → work-request-log.txt} +2 -2
- oci_cli/help_text_producer/data_files/text/cmdref/organizations.txt +2 -2
- oci_cli/version.py +1 -1
- {oci_cli-3.72.0.dist-info → oci_cli-3.72.1.dist-info}/METADATA +2 -2
- {oci_cli-3.72.0.dist-info → oci_cli-3.72.1.dist-info}/RECORD +107 -84
- services/database/src/oci_cli_database/generated/database_cli.py +1446 -140
- services/generative_ai/src/oci_cli_generative_ai/generated/generativeai_cli.py +519 -12
- services/generative_ai_inference/src/oci_cli_generative_ai_inference/generated/generativeaiinference_cli.py +169 -7
- services/identity/src/oci_cli_identity/identity_cli_extended.py +2 -0
- services/iot/src/oci_cli_iot/generated/iot_cli.py +10 -3
- {oci_cli-3.72.0.dist-info → oci_cli-3.72.1.dist-info}/LICENSE.txt +0 -0
- {oci_cli-3.72.0.dist-info → oci_cli-3.72.1.dist-info}/THIRD_PARTY_LICENSES.txt +0 -0
- {oci_cli-3.72.0.dist-info → oci_cli-3.72.1.dist-info}/WHEEL +0 -0
- {oci_cli-3.72.0.dist-info → oci_cli-3.72.1.dist-info}/entry_points.txt +0 -0
- {oci_cli-3.72.0.dist-info → oci_cli-3.72.1.dist-info}/top_level.txt +0 -0
|
@@ -222,13 +222,139 @@ def chat_on_demand_serving_mode(ctx, from_json, compartment_id, chat_request, se
|
|
|
222
222
|
cli_util.render_response(result, ctx)
|
|
223
223
|
|
|
224
224
|
|
|
225
|
+
@chat_result_group.command(name=cli_util.override('generative_ai_inference.chat_cohere_chat_request_v2.command_name', 'chat-cohere-chat-request-v2'), help=u"""Creates a response for the given conversation. \n[Command Reference](chat)""")
|
|
226
|
+
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of compartment in which to call the Generative AI service to chat.""")
|
|
227
|
+
@cli_util.option('--serving-mode', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
228
|
+
@cli_util.option('--chat-request-messages', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""A list of chat messages in chronological order, representing a conversation between the user and the model.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
229
|
+
@cli_util.option('--chat-request-documents', type=custom_types.CLI_COMPLEX_TYPE, help=u"""A list of relevant documents that the model can refer to for generating grounded responses to the user's requests. Some example keys that you can add to the dictionary are \"text\", \"author\", and \"date\". Keep the total word count of the strings in the dictionary to 300 words or less.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
230
|
+
@cli_util.option('--chat-request-citation-options', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
231
|
+
@cli_util.option('--chat-request-tools-choice', type=custom_types.CliCaseInsensitiveChoice(["REQUIRED", "NONE"]), help=u"""Used to control whether or not the model will be forced to use a tool when answering. When REQUIRED is specified, the model will be forced to use at least one of the user-defined tools, and the tools parameter must be passed in the request. When NONE is specified, the model will be forced not to use one of the specified tools, and give a direct response. If tool_choice isn\u2019t specified, then the model is free to choose whether to use the specified tools or not. Note:This parameter is only compatible with models Command-r7b and newer.""")
|
|
232
|
+
@cli_util.option('--chat-request-tools', type=custom_types.CLI_COMPLEX_TYPE, help=u"""A list of available tools (functions) that the model may suggest invoking before producing a text response.
|
|
233
|
+
|
|
234
|
+
This option is a JSON list with items of type CohereToolV2. For documentation on CohereToolV2 please see our API reference: https://docs.cloud.oracle.com/api/#/en/generativeaiinference/20231130/datatypes/CohereToolV2.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
235
|
+
@cli_util.option('--chat-request-is-strict-tools-enabled', type=click.BOOL, help=u"""When set to true, tool calls in the Assistant message will be forced to follow the tool definition strictly. Note:The first few requests with a new set of tools will take longer to process.""")
|
|
236
|
+
@cli_util.option('--chat-request-is-log-probs-enabled', type=click.BOOL, help=u"""The log probabilities of the generated tokens will be included in the response.""")
|
|
237
|
+
@cli_util.option('--chat-request-thinking', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
238
|
+
@cli_util.option('--chat-request-response-format', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
239
|
+
@cli_util.option('--chat-request-is-search-queries-only', type=click.BOOL, help=u"""When set to true, the response contains only a list of generated search queries without the search results and the model will not respond to the user's message.""")
|
|
240
|
+
@cli_util.option('--chat-request-stream-options', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
241
|
+
@cli_util.option('--chat-request-is-stream', type=click.BOOL, help=u"""Whether to stream the partial progress of the model's response. When set to true, as tokens become available, they are sent as data-only server-sent events.""")
|
|
242
|
+
@cli_util.option('--chat-request-max-tokens', type=click.INT, help=u"""The maximum number of output tokens that the model will generate for the response. The token count of your prompt plus maxTokens must not exceed the model's context length. For on-demand inferencing, the response length is capped at 4,000 tokens for each run.""")
|
|
243
|
+
@cli_util.option('--chat-request-temperature', help=u"""A number that sets the randomness of the generated output. A lower temperature means less random generations. Use lower numbers for tasks such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.""")
|
|
244
|
+
@cli_util.option('--chat-request-top-k', type=click.INT, help=u"""A sampling method in which the model chooses the next token randomly from the top k most likely tokens. A higher value for k generates more random output, which makes the output text sound more natural. The default value for k is 0 which disables this method and considers all tokens. To set a number for the likely tokens, choose an integer between 1 and 500.
|
|
245
|
+
|
|
246
|
+
If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20 but only the probabilities of the top 10 add up to the value of p, then only the top 10 tokens are chosen.""")
|
|
247
|
+
@cli_util.option('--chat-request-top-p', help=u"""If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step.
|
|
248
|
+
|
|
249
|
+
To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1.0 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k.""")
|
|
250
|
+
@cli_util.option('--chat-request-frequency-penalty', help=u"""To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. Set to 0 to disable.""")
|
|
251
|
+
@cli_util.option('--chat-request-presence-penalty', help=u"""To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens.
|
|
252
|
+
|
|
253
|
+
Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable.""")
|
|
254
|
+
@cli_util.option('--chat-request-seed', type=click.INT, help=u"""If specified, the backend will make a best effort to sample tokens deterministically, so that repeated requests with the same seed and parameters yield the same result. However, determinism cannot be fully guaranteed.""")
|
|
255
|
+
@cli_util.option('--chat-request-stop-sequences', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Stop the model generation when it reaches a stop sequence defined in this parameter.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
256
|
+
@cli_util.option('--chat-request-priority', type=click.INT, help=u"""The priority of the request (lower means earlier handling; default 0 highest priority). Higher priority requests are handled first, and dropped last when the system is under load.""")
|
|
257
|
+
@cli_util.option('--chat-request-is-raw-prompting', type=click.BOOL, help=u"""When enabled, the user\u2019s `message` will be sent to the model without any preprocessing.""")
|
|
258
|
+
@cli_util.option('--chat-request-safety-mode', type=custom_types.CliCaseInsensitiveChoice(["CONTEXTUAL", "STRICT", "OFF"]), help=u"""Safety mode: Adds a safety instruction for the model to use when generating responses. Contextual: (Default) Puts fewer constraints on the output. It maintains core protections by aiming to reject harmful or illegal suggestions, but it allows profanity and some toxic content, sexually explicit and violent content, and content that contains medical, financial, or legal information. Contextual mode is suited for entertainment, creative, or academic use. Strict: Aims to avoid sensitive topics, such as violent or sexual acts and profanity. This mode aims to provide a safer experience by prohibiting responses or recommendations that it finds inappropriate. Strict mode is suited for corporate use, such as for corporate communications and customer service. Off: No safety mode is applied. Note: This parameter is only compatible with models cohere.command-r-08-2024, cohere.command-r-plus-08-2024 and Cohere models released after these models. See [release dates].""")
|
|
259
|
+
@json_skeleton_utils.get_cli_json_input_option({'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}, 'chat-request-messages': {'module': 'generative_ai_inference', 'class': 'list[CohereMessageV2]'}, 'chat-request-documents': {'module': 'generative_ai_inference', 'class': 'list[object]'}, 'chat-request-citation-options': {'module': 'generative_ai_inference', 'class': 'CitationOptionsV2'}, 'chat-request-tools': {'module': 'generative_ai_inference', 'class': 'list[CohereToolV2]'}, 'chat-request-thinking': {'module': 'generative_ai_inference', 'class': 'CohereThinkingV2'}, 'chat-request-response-format': {'module': 'generative_ai_inference', 'class': 'CohereResponseFormat'}, 'chat-request-stream-options': {'module': 'generative_ai_inference', 'class': 'StreamOptions'}, 'chat-request-stop-sequences': {'module': 'generative_ai_inference', 'class': 'list[string]'}})
|
|
260
|
+
@cli_util.help_option
|
|
261
|
+
@click.pass_context
|
|
262
|
+
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}, 'chat-request-messages': {'module': 'generative_ai_inference', 'class': 'list[CohereMessageV2]'}, 'chat-request-documents': {'module': 'generative_ai_inference', 'class': 'list[object]'}, 'chat-request-citation-options': {'module': 'generative_ai_inference', 'class': 'CitationOptionsV2'}, 'chat-request-tools': {'module': 'generative_ai_inference', 'class': 'list[CohereToolV2]'}, 'chat-request-thinking': {'module': 'generative_ai_inference', 'class': 'CohereThinkingV2'}, 'chat-request-response-format': {'module': 'generative_ai_inference', 'class': 'CohereResponseFormat'}, 'chat-request-stream-options': {'module': 'generative_ai_inference', 'class': 'StreamOptions'}, 'chat-request-stop-sequences': {'module': 'generative_ai_inference', 'class': 'list[string]'}}, output_type={'module': 'generative_ai_inference', 'class': 'ChatResult'})
|
|
263
|
+
@cli_util.wrap_exceptions
|
|
264
|
+
def chat_cohere_chat_request_v2(ctx, from_json, compartment_id, serving_mode, chat_request_messages, chat_request_documents, chat_request_citation_options, chat_request_tools_choice, chat_request_tools, chat_request_is_strict_tools_enabled, chat_request_is_log_probs_enabled, chat_request_thinking, chat_request_response_format, chat_request_is_search_queries_only, chat_request_stream_options, chat_request_is_stream, chat_request_max_tokens, chat_request_temperature, chat_request_top_k, chat_request_top_p, chat_request_frequency_penalty, chat_request_presence_penalty, chat_request_seed, chat_request_stop_sequences, chat_request_priority, chat_request_is_raw_prompting, chat_request_safety_mode):
|
|
265
|
+
|
|
266
|
+
kwargs = {}
|
|
267
|
+
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
|
|
268
|
+
|
|
269
|
+
_details = {}
|
|
270
|
+
_details['chatRequest'] = {}
|
|
271
|
+
_details['compartmentId'] = compartment_id
|
|
272
|
+
_details['servingMode'] = cli_util.parse_json_parameter("serving_mode", serving_mode)
|
|
273
|
+
_details['chatRequest']['messages'] = cli_util.parse_json_parameter("chat_request_messages", chat_request_messages)
|
|
274
|
+
|
|
275
|
+
if chat_request_documents is not None:
|
|
276
|
+
_details['chatRequest']['documents'] = cli_util.parse_json_parameter("chat_request_documents", chat_request_documents)
|
|
277
|
+
|
|
278
|
+
if chat_request_citation_options is not None:
|
|
279
|
+
_details['chatRequest']['citationOptions'] = cli_util.parse_json_parameter("chat_request_citation_options", chat_request_citation_options)
|
|
280
|
+
|
|
281
|
+
if chat_request_tools_choice is not None:
|
|
282
|
+
_details['chatRequest']['toolsChoice'] = chat_request_tools_choice
|
|
283
|
+
|
|
284
|
+
if chat_request_tools is not None:
|
|
285
|
+
_details['chatRequest']['tools'] = cli_util.parse_json_parameter("chat_request_tools", chat_request_tools)
|
|
286
|
+
|
|
287
|
+
if chat_request_is_strict_tools_enabled is not None:
|
|
288
|
+
_details['chatRequest']['isStrictToolsEnabled'] = chat_request_is_strict_tools_enabled
|
|
289
|
+
|
|
290
|
+
if chat_request_is_log_probs_enabled is not None:
|
|
291
|
+
_details['chatRequest']['isLogProbsEnabled'] = chat_request_is_log_probs_enabled
|
|
292
|
+
|
|
293
|
+
if chat_request_thinking is not None:
|
|
294
|
+
_details['chatRequest']['thinking'] = cli_util.parse_json_parameter("chat_request_thinking", chat_request_thinking)
|
|
295
|
+
|
|
296
|
+
if chat_request_response_format is not None:
|
|
297
|
+
_details['chatRequest']['responseFormat'] = cli_util.parse_json_parameter("chat_request_response_format", chat_request_response_format)
|
|
298
|
+
|
|
299
|
+
if chat_request_is_search_queries_only is not None:
|
|
300
|
+
_details['chatRequest']['isSearchQueriesOnly'] = chat_request_is_search_queries_only
|
|
301
|
+
|
|
302
|
+
if chat_request_stream_options is not None:
|
|
303
|
+
_details['chatRequest']['streamOptions'] = cli_util.parse_json_parameter("chat_request_stream_options", chat_request_stream_options)
|
|
304
|
+
|
|
305
|
+
if chat_request_is_stream is not None:
|
|
306
|
+
_details['chatRequest']['isStream'] = chat_request_is_stream
|
|
307
|
+
|
|
308
|
+
if chat_request_max_tokens is not None:
|
|
309
|
+
_details['chatRequest']['maxTokens'] = chat_request_max_tokens
|
|
310
|
+
|
|
311
|
+
if chat_request_temperature is not None:
|
|
312
|
+
_details['chatRequest']['temperature'] = chat_request_temperature
|
|
313
|
+
|
|
314
|
+
if chat_request_top_k is not None:
|
|
315
|
+
_details['chatRequest']['topK'] = chat_request_top_k
|
|
316
|
+
|
|
317
|
+
if chat_request_top_p is not None:
|
|
318
|
+
_details['chatRequest']['topP'] = chat_request_top_p
|
|
319
|
+
|
|
320
|
+
if chat_request_frequency_penalty is not None:
|
|
321
|
+
_details['chatRequest']['frequencyPenalty'] = chat_request_frequency_penalty
|
|
322
|
+
|
|
323
|
+
if chat_request_presence_penalty is not None:
|
|
324
|
+
_details['chatRequest']['presencePenalty'] = chat_request_presence_penalty
|
|
325
|
+
|
|
326
|
+
if chat_request_seed is not None:
|
|
327
|
+
_details['chatRequest']['seed'] = chat_request_seed
|
|
328
|
+
|
|
329
|
+
if chat_request_stop_sequences is not None:
|
|
330
|
+
_details['chatRequest']['stopSequences'] = cli_util.parse_json_parameter("chat_request_stop_sequences", chat_request_stop_sequences)
|
|
331
|
+
|
|
332
|
+
if chat_request_priority is not None:
|
|
333
|
+
_details['chatRequest']['priority'] = chat_request_priority
|
|
334
|
+
|
|
335
|
+
if chat_request_is_raw_prompting is not None:
|
|
336
|
+
_details['chatRequest']['isRawPrompting'] = chat_request_is_raw_prompting
|
|
337
|
+
|
|
338
|
+
if chat_request_safety_mode is not None:
|
|
339
|
+
_details['chatRequest']['safetyMode'] = chat_request_safety_mode
|
|
340
|
+
|
|
341
|
+
_details['chatRequest']['apiFormat'] = 'COHEREV2'
|
|
342
|
+
|
|
343
|
+
client = cli_util.build_client('generative_ai_inference', 'generative_ai_inference', ctx)
|
|
344
|
+
result = client.chat(
|
|
345
|
+
chat_details=_details,
|
|
346
|
+
**kwargs
|
|
347
|
+
)
|
|
348
|
+
cli_util.render_response(result, ctx)
|
|
349
|
+
|
|
350
|
+
|
|
225
351
|
@chat_result_group.command(name=cli_util.override('generative_ai_inference.chat_generic_chat_request.command_name', 'chat-generic-chat-request'), help=u"""Creates a response for the given conversation. \n[Command Reference](chat)""")
|
|
226
352
|
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of compartment in which to call the Generative AI service to chat.""")
|
|
227
353
|
@cli_util.option('--serving-mode', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
228
354
|
@cli_util.option('--chat-request-messages', type=custom_types.CLI_COMPLEX_TYPE, help=u"""The series of messages in a chat request. Includes the previous messages in a conversation. Each message includes a role (`USER` or the `CHATBOT`) and content.
|
|
229
355
|
|
|
230
356
|
This option is a JSON list with items of type Message. For documentation on Message please see our API reference: https://docs.cloud.oracle.com/api/#/en/generativeaiinference/20231130/datatypes/Message.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
231
|
-
@cli_util.option('--chat-request-reasoning-effort', type=custom_types.CliCaseInsensitiveChoice(["MINIMAL", "LOW", "MEDIUM", "HIGH"]), help=u"""Constrains effort on reasoning for reasoning models. Currently supported values are minimal, low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.""")
|
|
357
|
+
@cli_util.option('--chat-request-reasoning-effort', type=custom_types.CliCaseInsensitiveChoice(["NONE", "MINIMAL", "LOW", "MEDIUM", "HIGH"]), help=u"""Constrains effort on reasoning for reasoning models. Currently supported values are minimal, low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.""")
|
|
232
358
|
@cli_util.option('--chat-request-verbosity', type=custom_types.CliCaseInsensitiveChoice(["LOW", "MEDIUM", "HIGH"]), help=u"""Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses.""")
|
|
233
359
|
@cli_util.option('--chat-request-metadata', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.
|
|
234
360
|
|
|
@@ -521,6 +647,8 @@ An embedding is numeric representation of a piece of text. This text can be a ph
|
|
|
521
647
|
@cli_util.option('--serving-mode', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
|
|
522
648
|
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of compartment in which to call the Generative AI service to create text embeddings.""")
|
|
523
649
|
@cli_util.option('--is-echo', type=click.BOOL, help=u"""Whether or not to include the original inputs in the response. Results are index-based.""")
|
|
650
|
+
@cli_util.option('--embedding-types', type=custom_types.CliCaseInsensitiveChoice(["float", "int8", "uint8", "binary", "ubinary", "base64"]), help=u"""Specifies the types of embeddings you want to get back. Supports list of enums. Supported values :float, int8, uint8, binary, ubinary, base64. If nothing is passed default will be considered as float.""")
|
|
651
|
+
@cli_util.option('--output-dimensions', type=click.INT, help=u"""The number of dimensions of the output embedding. This is only available for embed-v4 and newer models. Possible values are 256, 512, 1024, and 1536.""")
|
|
524
652
|
@cli_util.option('--truncate', type=custom_types.CliCaseInsensitiveChoice(["NONE", "START", "END"]), help=u"""For an input that's longer than the maximum token length, specifies which part of the input text will be truncated.""")
|
|
525
653
|
@cli_util.option('--input-type', type=custom_types.CliCaseInsensitiveChoice(["SEARCH_DOCUMENT", "SEARCH_QUERY", "CLASSIFICATION", "CLUSTERING", "IMAGE"]), help=u"""Specifies the input type.""")
|
|
526
654
|
@json_skeleton_utils.get_cli_json_input_option({'inputs': {'module': 'generative_ai_inference', 'class': 'list[string]'}, 'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}})
|
|
@@ -528,7 +656,7 @@ An embedding is numeric representation of a piece of text. This text can be a ph
|
|
|
528
656
|
@click.pass_context
|
|
529
657
|
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'inputs': {'module': 'generative_ai_inference', 'class': 'list[string]'}, 'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}}, output_type={'module': 'generative_ai_inference', 'class': 'EmbedTextResult'})
|
|
530
658
|
@cli_util.wrap_exceptions
|
|
531
|
-
def embed_text(ctx, from_json, inputs, serving_mode, compartment_id, is_echo, truncate, input_type):
|
|
659
|
+
def embed_text(ctx, from_json, inputs, serving_mode, compartment_id, is_echo, embedding_types, output_dimensions, truncate, input_type):
|
|
532
660
|
|
|
533
661
|
kwargs = {}
|
|
534
662
|
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
|
|
@@ -541,6 +669,12 @@ def embed_text(ctx, from_json, inputs, serving_mode, compartment_id, is_echo, tr
|
|
|
541
669
|
if is_echo is not None:
|
|
542
670
|
_details['isEcho'] = is_echo
|
|
543
671
|
|
|
672
|
+
if embedding_types is not None:
|
|
673
|
+
_details['embeddingTypes'] = cli_util.parse_json_parameter("embedding_types", embedding_types)
|
|
674
|
+
|
|
675
|
+
if output_dimensions is not None:
|
|
676
|
+
_details['outputDimensions'] = output_dimensions
|
|
677
|
+
|
|
544
678
|
if truncate is not None:
|
|
545
679
|
_details['truncate'] = truncate
|
|
546
680
|
|
|
@@ -562,6 +696,8 @@ An embedding is numeric representation of a piece of text. This text can be a ph
|
|
|
562
696
|
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of compartment in which to call the Generative AI service to create text embeddings.""")
|
|
563
697
|
@cli_util.option('--serving-mode-endpoint-id', required=True, help=u"""The OCID of the endpoint to use.""")
|
|
564
698
|
@cli_util.option('--is-echo', type=click.BOOL, help=u"""Whether or not to include the original inputs in the response. Results are index-based.""")
|
|
699
|
+
@cli_util.option('--embedding-types', type=custom_types.CliCaseInsensitiveChoice(["float", "int8", "uint8", "binary", "ubinary", "base64"]), help=u"""Specifies the types of embeddings you want to get back. Supports list of enums. Supported values :float, int8, uint8, binary, ubinary, base64. If nothing is passed default will be considered as float.""")
|
|
700
|
+
@cli_util.option('--output-dimensions', type=click.INT, help=u"""The number of dimensions of the output embedding. This is only available for embed-v4 and newer models. Possible values are 256, 512, 1024, and 1536.""")
|
|
565
701
|
@cli_util.option('--truncate', type=custom_types.CliCaseInsensitiveChoice(["NONE", "START", "END"]), help=u"""For an input that's longer than the maximum token length, specifies which part of the input text will be truncated.""")
|
|
566
702
|
@cli_util.option('--input-type', type=custom_types.CliCaseInsensitiveChoice(["SEARCH_DOCUMENT", "SEARCH_QUERY", "CLASSIFICATION", "CLUSTERING", "IMAGE"]), help=u"""Specifies the input type.""")
|
|
567
703
|
@json_skeleton_utils.get_cli_json_input_option({'inputs': {'module': 'generative_ai_inference', 'class': 'list[string]'}})
|
|
@@ -569,7 +705,7 @@ An embedding is numeric representation of a piece of text. This text can be a ph
|
|
|
569
705
|
@click.pass_context
|
|
570
706
|
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'inputs': {'module': 'generative_ai_inference', 'class': 'list[string]'}}, output_type={'module': 'generative_ai_inference', 'class': 'EmbedTextResult'})
|
|
571
707
|
@cli_util.wrap_exceptions
|
|
572
|
-
def embed_text_dedicated_serving_mode(ctx, from_json, inputs, compartment_id, serving_mode_endpoint_id, is_echo, truncate, input_type):
|
|
708
|
+
def embed_text_dedicated_serving_mode(ctx, from_json, inputs, compartment_id, serving_mode_endpoint_id, is_echo, embedding_types, output_dimensions, truncate, input_type):
|
|
573
709
|
|
|
574
710
|
kwargs = {}
|
|
575
711
|
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
|
|
@@ -583,6 +719,12 @@ def embed_text_dedicated_serving_mode(ctx, from_json, inputs, compartment_id, se
|
|
|
583
719
|
if is_echo is not None:
|
|
584
720
|
_details['isEcho'] = is_echo
|
|
585
721
|
|
|
722
|
+
if embedding_types is not None:
|
|
723
|
+
_details['embeddingTypes'] = cli_util.parse_json_parameter("embedding_types", embedding_types)
|
|
724
|
+
|
|
725
|
+
if output_dimensions is not None:
|
|
726
|
+
_details['outputDimensions'] = output_dimensions
|
|
727
|
+
|
|
586
728
|
if truncate is not None:
|
|
587
729
|
_details['truncate'] = truncate
|
|
588
730
|
|
|
@@ -606,6 +748,8 @@ An embedding is numeric representation of a piece of text. This text can be a ph
|
|
|
606
748
|
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of compartment in which to call the Generative AI service to create text embeddings.""")
|
|
607
749
|
@cli_util.option('--serving-mode-model-id', required=True, help=u"""The unique ID of a model to use. You can use the [ListModels] API to list the available models.""")
|
|
608
750
|
@cli_util.option('--is-echo', type=click.BOOL, help=u"""Whether or not to include the original inputs in the response. Results are index-based.""")
|
|
751
|
+
@cli_util.option('--embedding-types', type=custom_types.CliCaseInsensitiveChoice(["float", "int8", "uint8", "binary", "ubinary", "base64"]), help=u"""Specifies the types of embeddings you want to get back. Supports list of enums. Supported values :float, int8, uint8, binary, ubinary, base64. If nothing is passed default will be considered as float.""")
|
|
752
|
+
@cli_util.option('--output-dimensions', type=click.INT, help=u"""The number of dimensions of the output embedding. This is only available for embed-v4 and newer models. Possible values are 256, 512, 1024, and 1536.""")
|
|
609
753
|
@cli_util.option('--truncate', type=custom_types.CliCaseInsensitiveChoice(["NONE", "START", "END"]), help=u"""For an input that's longer than the maximum token length, specifies which part of the input text will be truncated.""")
|
|
610
754
|
@cli_util.option('--input-type', type=custom_types.CliCaseInsensitiveChoice(["SEARCH_DOCUMENT", "SEARCH_QUERY", "CLASSIFICATION", "CLUSTERING", "IMAGE"]), help=u"""Specifies the input type.""")
|
|
611
755
|
@json_skeleton_utils.get_cli_json_input_option({'inputs': {'module': 'generative_ai_inference', 'class': 'list[string]'}})
|
|
@@ -613,7 +757,7 @@ An embedding is numeric representation of a piece of text. This text can be a ph
|
|
|
613
757
|
@click.pass_context
|
|
614
758
|
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'inputs': {'module': 'generative_ai_inference', 'class': 'list[string]'}}, output_type={'module': 'generative_ai_inference', 'class': 'EmbedTextResult'})
|
|
615
759
|
@cli_util.wrap_exceptions
|
|
616
|
-
def embed_text_on_demand_serving_mode(ctx, from_json, inputs, compartment_id, serving_mode_model_id, is_echo, truncate, input_type):
|
|
760
|
+
def embed_text_on_demand_serving_mode(ctx, from_json, inputs, compartment_id, serving_mode_model_id, is_echo, embedding_types, output_dimensions, truncate, input_type):
|
|
617
761
|
|
|
618
762
|
kwargs = {}
|
|
619
763
|
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
|
|
@@ -627,6 +771,12 @@ def embed_text_on_demand_serving_mode(ctx, from_json, inputs, compartment_id, se
|
|
|
627
771
|
if is_echo is not None:
|
|
628
772
|
_details['isEcho'] = is_echo
|
|
629
773
|
|
|
774
|
+
if embedding_types is not None:
|
|
775
|
+
_details['embeddingTypes'] = cli_util.parse_json_parameter("embedding_types", embedding_types)
|
|
776
|
+
|
|
777
|
+
if output_dimensions is not None:
|
|
778
|
+
_details['outputDimensions'] = output_dimensions
|
|
779
|
+
|
|
630
780
|
if truncate is not None:
|
|
631
781
|
_details['truncate'] = truncate
|
|
632
782
|
|
|
@@ -912,12 +1062,13 @@ Rerank assigns an index and a relevance score to each document, indicating which
|
|
|
912
1062
|
@cli_util.option('--top-n', type=click.INT, help=u"""The number of most relevant documents or indices to return. Defaults to the length of the documents.""")
|
|
913
1063
|
@cli_util.option('--is-echo', type=click.BOOL, help=u"""Whether or not to return the documents in the response.""")
|
|
914
1064
|
@cli_util.option('--max-chunks-per-document', type=click.INT, help=u"""The maximum number of chunks to produce internally from a document.""")
|
|
1065
|
+
@cli_util.option('--max-tokens-per-document', type=click.INT, help=u"""Used to truncate the long documents with the specified no of tokens.""")
|
|
915
1066
|
@json_skeleton_utils.get_cli_json_input_option({'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}, 'documents': {'module': 'generative_ai_inference', 'class': 'list[string]'}})
|
|
916
1067
|
@cli_util.help_option
|
|
917
1068
|
@click.pass_context
|
|
918
1069
|
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}, 'documents': {'module': 'generative_ai_inference', 'class': 'list[string]'}}, output_type={'module': 'generative_ai_inference', 'class': 'RerankTextResult'})
|
|
919
1070
|
@cli_util.wrap_exceptions
|
|
920
|
-
def rerank_text(ctx, from_json, input, compartment_id, serving_mode, documents, top_n, is_echo, max_chunks_per_document):
|
|
1071
|
+
def rerank_text(ctx, from_json, input, compartment_id, serving_mode, documents, top_n, is_echo, max_chunks_per_document, max_tokens_per_document):
|
|
921
1072
|
|
|
922
1073
|
kwargs = {}
|
|
923
1074
|
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
|
|
@@ -937,6 +1088,9 @@ def rerank_text(ctx, from_json, input, compartment_id, serving_mode, documents,
|
|
|
937
1088
|
if max_chunks_per_document is not None:
|
|
938
1089
|
_details['maxChunksPerDocument'] = max_chunks_per_document
|
|
939
1090
|
|
|
1091
|
+
if max_tokens_per_document is not None:
|
|
1092
|
+
_details['maxTokensPerDocument'] = max_tokens_per_document
|
|
1093
|
+
|
|
940
1094
|
client = cli_util.build_client('generative_ai_inference', 'generative_ai_inference', ctx)
|
|
941
1095
|
result = client.rerank_text(
|
|
942
1096
|
rerank_text_details=_details,
|
|
@@ -955,12 +1109,13 @@ Rerank assigns an index and a relevance score to each document, indicating which
|
|
|
955
1109
|
@cli_util.option('--top-n', type=click.INT, help=u"""The number of most relevant documents or indices to return. Defaults to the length of the documents.""")
|
|
956
1110
|
@cli_util.option('--is-echo', type=click.BOOL, help=u"""Whether or not to return the documents in the response.""")
|
|
957
1111
|
@cli_util.option('--max-chunks-per-document', type=click.INT, help=u"""The maximum number of chunks to produce internally from a document.""")
|
|
1112
|
+
@cli_util.option('--max-tokens-per-document', type=click.INT, help=u"""Used to truncate the long documents with the specified no of tokens.""")
|
|
958
1113
|
@json_skeleton_utils.get_cli_json_input_option({'documents': {'module': 'generative_ai_inference', 'class': 'list[string]'}})
|
|
959
1114
|
@cli_util.help_option
|
|
960
1115
|
@click.pass_context
|
|
961
1116
|
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'documents': {'module': 'generative_ai_inference', 'class': 'list[string]'}}, output_type={'module': 'generative_ai_inference', 'class': 'RerankTextResult'})
|
|
962
1117
|
@cli_util.wrap_exceptions
|
|
963
|
-
def rerank_text_dedicated_serving_mode(ctx, from_json, input, compartment_id, documents, serving_mode_endpoint_id, top_n, is_echo, max_chunks_per_document):
|
|
1118
|
+
def rerank_text_dedicated_serving_mode(ctx, from_json, input, compartment_id, documents, serving_mode_endpoint_id, top_n, is_echo, max_chunks_per_document, max_tokens_per_document):
|
|
964
1119
|
|
|
965
1120
|
kwargs = {}
|
|
966
1121
|
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
|
|
@@ -981,6 +1136,9 @@ def rerank_text_dedicated_serving_mode(ctx, from_json, input, compartment_id, do
|
|
|
981
1136
|
if max_chunks_per_document is not None:
|
|
982
1137
|
_details['maxChunksPerDocument'] = max_chunks_per_document
|
|
983
1138
|
|
|
1139
|
+
if max_tokens_per_document is not None:
|
|
1140
|
+
_details['maxTokensPerDocument'] = max_tokens_per_document
|
|
1141
|
+
|
|
984
1142
|
_details['servingMode']['servingType'] = 'DEDICATED'
|
|
985
1143
|
|
|
986
1144
|
client = cli_util.build_client('generative_ai_inference', 'generative_ai_inference', ctx)
|
|
@@ -1001,12 +1159,13 @@ Rerank assigns an index and a relevance score to each document, indicating which
|
|
|
1001
1159
|
@cli_util.option('--top-n', type=click.INT, help=u"""The number of most relevant documents or indices to return. Defaults to the length of the documents.""")
|
|
1002
1160
|
@cli_util.option('--is-echo', type=click.BOOL, help=u"""Whether or not to return the documents in the response.""")
|
|
1003
1161
|
@cli_util.option('--max-chunks-per-document', type=click.INT, help=u"""The maximum number of chunks to produce internally from a document.""")
|
|
1162
|
+
@cli_util.option('--max-tokens-per-document', type=click.INT, help=u"""Used to truncate the long documents with the specified no of tokens.""")
|
|
1004
1163
|
@json_skeleton_utils.get_cli_json_input_option({'documents': {'module': 'generative_ai_inference', 'class': 'list[string]'}})
|
|
1005
1164
|
@cli_util.help_option
|
|
1006
1165
|
@click.pass_context
|
|
1007
1166
|
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'documents': {'module': 'generative_ai_inference', 'class': 'list[string]'}}, output_type={'module': 'generative_ai_inference', 'class': 'RerankTextResult'})
|
|
1008
1167
|
@cli_util.wrap_exceptions
|
|
1009
|
-
def rerank_text_on_demand_serving_mode(ctx, from_json, input, compartment_id, documents, serving_mode_model_id, top_n, is_echo, max_chunks_per_document):
|
|
1168
|
+
def rerank_text_on_demand_serving_mode(ctx, from_json, input, compartment_id, documents, serving_mode_model_id, top_n, is_echo, max_chunks_per_document, max_tokens_per_document):
|
|
1010
1169
|
|
|
1011
1170
|
kwargs = {}
|
|
1012
1171
|
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
|
|
@@ -1027,6 +1186,9 @@ def rerank_text_on_demand_serving_mode(ctx, from_json, input, compartment_id, do
|
|
|
1027
1186
|
if max_chunks_per_document is not None:
|
|
1028
1187
|
_details['maxChunksPerDocument'] = max_chunks_per_document
|
|
1029
1188
|
|
|
1189
|
+
if max_tokens_per_document is not None:
|
|
1190
|
+
_details['maxTokensPerDocument'] = max_tokens_per_document
|
|
1191
|
+
|
|
1030
1192
|
_details['servingMode']['servingType'] = 'ON_DEMAND'
|
|
1031
1193
|
|
|
1032
1194
|
client = cli_util.build_client('generative_ai_inference', 'generative_ai_inference', ctx)
|
|
@@ -561,6 +561,8 @@ def list_compartments(ctx, from_json, all_pages, page_size, compartment_id, page
|
|
|
561
561
|
)
|
|
562
562
|
if with_root:
|
|
563
563
|
tenancy_id = get_tenancy_from_config(ctx)
|
|
564
|
+
if tenancy_id is None:
|
|
565
|
+
tenancy_id = compartment_id
|
|
564
566
|
tenancy_result = client.get_compartment(
|
|
565
567
|
compartment_id=tenancy_id,
|
|
566
568
|
)
|
|
@@ -985,6 +985,7 @@ def create_iot_domain(ctx, from_json, wait_for_state, max_wait_seconds, wait_int
|
|
|
985
985
|
|
|
986
986
|
@iot_domain_group_group.command(name=cli_util.override('iot.create_iot_domain_group.command_name', 'create'), help=u"""Creates a new IoT domain group. \n[Command Reference](createIotDomainGroup)""")
|
|
987
987
|
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment corresponding to the resource.""")
|
|
988
|
+
@cli_util.option('--type', type=custom_types.CliCaseInsensitiveChoice(["STANDARD", "LIGHTWEIGHT"]), help=u"""Type of the domain group. LIGHTWEIGHT uses fewer resources and has a higher Recovery Time Objective (RTO), making it suitable for development and testing. STANDARD is recommended for production.""")
|
|
988
989
|
@cli_util.option('--display-name', help=u"""A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.""")
|
|
989
990
|
@cli_util.option('--description', help=u"""A short description of the resource.""")
|
|
990
991
|
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags].
|
|
@@ -1001,7 +1002,7 @@ Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_comp
|
|
|
1001
1002
|
@click.pass_context
|
|
1002
1003
|
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'freeform-tags': {'module': 'iot', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'iot', 'class': 'dict(str, dict(str, object))'}}, output_type={'module': 'iot', 'class': 'IotDomainGroup'})
|
|
1003
1004
|
@cli_util.wrap_exceptions
|
|
1004
|
-
def create_iot_domain_group(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, compartment_id, display_name, description, freeform_tags, defined_tags):
|
|
1005
|
+
def create_iot_domain_group(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, compartment_id, type, display_name, description, freeform_tags, defined_tags):
|
|
1005
1006
|
|
|
1006
1007
|
kwargs = {}
|
|
1007
1008
|
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
|
|
@@ -1009,6 +1010,9 @@ def create_iot_domain_group(ctx, from_json, wait_for_state, max_wait_seconds, wa
|
|
|
1009
1010
|
_details = {}
|
|
1010
1011
|
_details['compartmentId'] = compartment_id
|
|
1011
1012
|
|
|
1013
|
+
if type is not None:
|
|
1014
|
+
_details['type'] = type
|
|
1015
|
+
|
|
1012
1016
|
if display_name is not None:
|
|
1013
1017
|
_details['displayName'] = display_name
|
|
1014
1018
|
|
|
@@ -1468,7 +1472,7 @@ def get_digital_twin_instance(ctx, from_json, digital_twin_instance_id):
|
|
|
1468
1472
|
cli_util.render_response(result, ctx)
|
|
1469
1473
|
|
|
1470
1474
|
|
|
1471
|
-
@digital_twin_instance_group.command(name=cli_util.override('iot.get_digital_twin_instance_content.command_name', 'get-digital-twin-instance-content'), help=u"""Retrieves the
|
|
1475
|
+
@digital_twin_instance_group.command(name=cli_util.override('iot.get_digital_twin_instance_content.command_name', 'get-digital-twin-instance-content'), help=u"""Retrieves the content associated with a digital twin instance identified by the specified OCID. The content can be retrieved only when a model is associated with the digital twin instance. \n[Command Reference](getDigitalTwinInstanceContent)""")
|
|
1472
1476
|
@cli_util.option('--digital-twin-instance-id', required=True, help=u"""The [OCID] of digital twin instance.""")
|
|
1473
1477
|
@cli_util.option('--should-include-metadata', type=click.BOOL, help=u"""If set to true , digital twin instance metadata is included in the response.""")
|
|
1474
1478
|
@json_skeleton_utils.get_cli_json_input_option({})
|
|
@@ -2098,6 +2102,7 @@ def list_digital_twin_relationships(ctx, from_json, all_pages, page_size, iot_do
|
|
|
2098
2102
|
@cli_util.option('--id', help=u"""Filter resources by [OCID]. Must be a valid OCID of the resource type.""")
|
|
2099
2103
|
@cli_util.option('--display-name', help=u"""Filter resources whose display name matches the specified value.""")
|
|
2100
2104
|
@cli_util.option('--lifecycle-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]), help=u"""Filter resources whose lifecycleState matches the specified value.""")
|
|
2105
|
+
@cli_util.option('--type', type=custom_types.CliCaseInsensitiveChoice(["STANDARD", "LIGHTWEIGHT"]), help=u"""Filter resources by type. Valid values are LIGHTWEIGHT or STANDARD.""")
|
|
2101
2106
|
@cli_util.option('--limit', type=click.INT, help=u"""For list pagination. The maximum number of results per page, or items to return in a paginated \"List\" call. For important details about how pagination works, see [List Pagination].""")
|
|
2102
2107
|
@cli_util.option('--page', help=u"""For list pagination: The value of the opc-next-page response header from the previous \"List\" call. For important details on how pagination works, see [List Pagination].""")
|
|
2103
2108
|
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""Specifies sort order to use, either ASC (ascending) or DESC (descending).""")
|
|
@@ -2109,7 +2114,7 @@ def list_digital_twin_relationships(ctx, from_json, all_pages, page_size, iot_do
|
|
|
2109
2114
|
@click.pass_context
|
|
2110
2115
|
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'iot', 'class': 'IotDomainGroupCollection'})
|
|
2111
2116
|
@cli_util.wrap_exceptions
|
|
2112
|
-
def list_iot_domain_groups(ctx, from_json, all_pages, page_size, compartment_id, id, display_name, lifecycle_state, limit, page, sort_order, sort_by):
|
|
2117
|
+
def list_iot_domain_groups(ctx, from_json, all_pages, page_size, compartment_id, id, display_name, lifecycle_state, type, limit, page, sort_order, sort_by):
|
|
2113
2118
|
|
|
2114
2119
|
if all_pages and limit:
|
|
2115
2120
|
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
|
|
@@ -2121,6 +2126,8 @@ def list_iot_domain_groups(ctx, from_json, all_pages, page_size, compartment_id,
|
|
|
2121
2126
|
kwargs['display_name'] = display_name
|
|
2122
2127
|
if lifecycle_state is not None:
|
|
2123
2128
|
kwargs['lifecycle_state'] = lifecycle_state
|
|
2129
|
+
if type is not None:
|
|
2130
|
+
kwargs['type'] = type
|
|
2124
2131
|
if limit is not None:
|
|
2125
2132
|
kwargs['limit'] = limit
|
|
2126
2133
|
if page is not None:
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|