@elastic/elasticsearch 8.18.2 → 8.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/index.d.ts +1 -0
  2. package/lib/api/api/async_search.d.ts +4 -4
  3. package/lib/api/api/autoscaling.d.ts +4 -4
  4. package/lib/api/api/bulk.d.ts +1 -1
  5. package/lib/api/api/cat.d.ts +26 -26
  6. package/lib/api/api/ccr.d.ts +13 -13
  7. package/lib/api/api/clear_scroll.d.ts +1 -1
  8. package/lib/api/api/close_point_in_time.d.ts +1 -1
  9. package/lib/api/api/cluster.d.ts +17 -17
  10. package/lib/api/api/connector.d.ts +30 -30
  11. package/lib/api/api/count.d.ts +1 -1
  12. package/lib/api/api/create.d.ts +1 -1
  13. package/lib/api/api/dangling_indices.d.ts +3 -3
  14. package/lib/api/api/delete.d.ts +1 -1
  15. package/lib/api/api/delete_by_query.d.ts +1 -1
  16. package/lib/api/api/delete_by_query_rethrottle.d.ts +1 -1
  17. package/lib/api/api/delete_script.d.ts +1 -1
  18. package/lib/api/api/enrich.d.ts +5 -5
  19. package/lib/api/api/eql.d.ts +3 -3
  20. package/lib/api/api/esql.d.ts +5 -5
  21. package/lib/api/api/esql.js +1 -1
  22. package/lib/api/api/esql.js.map +1 -1
  23. package/lib/api/api/exists.d.ts +1 -1
  24. package/lib/api/api/exists_source.d.ts +1 -1
  25. package/lib/api/api/explain.d.ts +1 -1
  26. package/lib/api/api/features.d.ts +2 -2
  27. package/lib/api/api/field_caps.d.ts +1 -1
  28. package/lib/api/api/fleet.d.ts +4 -4
  29. package/lib/api/api/get.d.ts +1 -1
  30. package/lib/api/api/get_script.d.ts +1 -1
  31. package/lib/api/api/get_script_context.d.ts +1 -1
  32. package/lib/api/api/get_script_languages.d.ts +1 -1
  33. package/lib/api/api/get_source.d.ts +1 -1
  34. package/lib/api/api/graph.d.ts +1 -1
  35. package/lib/api/api/health_report.d.ts +1 -1
  36. package/lib/api/api/ilm.d.ts +11 -11
  37. package/lib/api/api/index.d.ts +1 -1
  38. package/lib/api/api/indices.d.ts +94 -59
  39. package/lib/api/api/indices.js +115 -0
  40. package/lib/api/api/indices.js.map +1 -1
  41. package/lib/api/api/inference.d.ts +64 -43
  42. package/lib/api/api/inference.js +116 -2
  43. package/lib/api/api/inference.js.map +1 -1
  44. package/lib/api/api/info.d.ts +2 -2
  45. package/lib/api/api/ingest.d.ts +9 -9
  46. package/lib/api/api/knn_search.d.ts +1 -1
  47. package/lib/api/api/license.d.ts +7 -7
  48. package/lib/api/api/logstash.d.ts +3 -3
  49. package/lib/api/api/mget.d.ts +1 -1
  50. package/lib/api/api/migration.d.ts +3 -3
  51. package/lib/api/api/ml.d.ts +75 -75
  52. package/lib/api/api/msearch.d.ts +1 -1
  53. package/lib/api/api/msearch_template.d.ts +1 -1
  54. package/lib/api/api/mtermvectors.d.ts +1 -1
  55. package/lib/api/api/nodes.d.ts +7 -7
  56. package/lib/api/api/open_point_in_time.d.ts +1 -1
  57. package/lib/api/api/ping.d.ts +1 -1
  58. package/lib/api/api/profiling.d.ts +4 -4
  59. package/lib/api/api/put_script.d.ts +1 -1
  60. package/lib/api/api/query_rules.d.ts +8 -8
  61. package/lib/api/api/rank_eval.d.ts +1 -1
  62. package/lib/api/api/reindex.d.ts +1 -1
  63. package/lib/api/api/reindex_rethrottle.d.ts +1 -1
  64. package/lib/api/api/render_search_template.d.ts +1 -1
  65. package/lib/api/api/rollup.d.ts +8 -8
  66. package/lib/api/api/scripts_painless_execute.d.ts +1 -1
  67. package/lib/api/api/scroll.d.ts +1 -1
  68. package/lib/api/api/search.d.ts +1 -1
  69. package/lib/api/api/search_application.d.ts +10 -10
  70. package/lib/api/api/search_mvt.d.ts +1 -1
  71. package/lib/api/api/search_shards.d.ts +1 -1
  72. package/lib/api/api/search_template.d.ts +1 -1
  73. package/lib/api/api/searchable_snapshots.d.ts +4 -4
  74. package/lib/api/api/security.d.ts +64 -64
  75. package/lib/api/api/shutdown.d.ts +3 -3
  76. package/lib/api/api/simulate.d.ts +1 -1
  77. package/lib/api/api/slm.d.ts +9 -9
  78. package/lib/api/api/snapshot.d.ts +13 -13
  79. package/lib/api/api/sql.d.ts +6 -6
  80. package/lib/api/api/ssl.d.ts +1 -1
  81. package/lib/api/api/streams.d.ts +32 -0
  82. package/lib/api/api/streams.js +93 -0
  83. package/lib/api/api/streams.js.map +1 -0
  84. package/lib/api/api/synonyms.d.ts +7 -7
  85. package/lib/api/api/tasks.d.ts +3 -3
  86. package/lib/api/api/terms_enum.d.ts +1 -1
  87. package/lib/api/api/termvectors.d.ts +1 -1
  88. package/lib/api/api/text_structure.d.ts +4 -4
  89. package/lib/api/api/transform.d.ts +12 -12
  90. package/lib/api/api/update.d.ts +1 -1
  91. package/lib/api/api/update_by_query.d.ts +1 -1
  92. package/lib/api/api/update_by_query_rethrottle.d.ts +1 -1
  93. package/lib/api/api/watcher.d.ts +13 -13
  94. package/lib/api/api/xpack.d.ts +2 -2
  95. package/lib/api/index.d.ts +4 -0
  96. package/lib/api/index.js +14 -2
  97. package/lib/api/index.js.map +1 -1
  98. package/lib/api/types.d.ts +754 -173
  99. package/lib/api/typesWithBodyKey.d.ts +763 -173
  100. package/lib/client.js +9 -12
  101. package/lib/client.js.map +1 -1
  102. package/lib/helpers.js +1 -1
  103. package/lib/helpers.js.map +1 -1
  104. package/package.json +11 -11
@@ -8,190 +8,211 @@ export default class Inference {
8
8
  transport: Transport;
9
9
  constructor(transport: Transport);
10
10
  /**
11
- * Perform chat completion inference
12
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/chat-completion-inference-api.html | Elasticsearch API documentation}
11
+ * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai`, `hugging_face` or the `elastic` service, use the Chat completion inference API.
12
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/chat-completion-inference-api.html | Elasticsearch API documentation}
13
13
  */
14
14
  chatCompletionUnified(this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceChatCompletionUnifiedResponse>;
15
15
  chatCompletionUnified(this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceChatCompletionUnifiedResponse, unknown>>;
16
16
  chatCompletionUnified(this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise<T.InferenceChatCompletionUnifiedResponse>;
17
17
  /**
18
18
  * Perform completion inference on the service
19
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/post-inference-api.html | Elasticsearch API documentation}
19
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/post-inference-api.html | Elasticsearch API documentation}
20
20
  */
21
21
  completion(this: That, params: T.InferenceCompletionRequest | TB.InferenceCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceCompletionResponse>;
22
22
  completion(this: That, params: T.InferenceCompletionRequest | TB.InferenceCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceCompletionResponse, unknown>>;
23
23
  completion(this: That, params: T.InferenceCompletionRequest | TB.InferenceCompletionRequest, options?: TransportRequestOptions): Promise<T.InferenceCompletionResponse>;
24
24
  /**
25
25
  * Delete an inference endpoint
26
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/delete-inference-api.html | Elasticsearch API documentation}
26
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-inference-api.html | Elasticsearch API documentation}
27
27
  */
28
28
  delete(this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceDeleteResponse>;
29
29
  delete(this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceDeleteResponse, unknown>>;
30
30
  delete(this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptions): Promise<T.InferenceDeleteResponse>;
31
31
  /**
32
32
  * Get an inference endpoint
33
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/get-inference-api.html | Elasticsearch API documentation}
33
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-inference-api.html | Elasticsearch API documentation}
34
34
  */
35
35
  get(this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceGetResponse>;
36
36
  get(this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceGetResponse, unknown>>;
37
37
  get(this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptions): Promise<T.InferenceGetResponse>;
38
38
  /**
39
39
  * Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
40
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/post-inference-api.html | Elasticsearch API documentation}
40
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/post-inference-api.html | Elasticsearch API documentation}
41
41
  */
42
42
  inference(this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceInferenceResponse>;
43
43
  inference(this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceInferenceResponse, unknown>>;
44
44
  inference(this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise<T.InferenceInferenceResponse>;
45
45
  /**
46
- * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
47
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/put-inference-api.html | Elasticsearch API documentation}
46
+ * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`completion`, `chat_completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`)
47
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-inference-api.html | Elasticsearch API documentation}
48
48
  */
49
49
  put(this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutResponse>;
50
50
  put(this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutResponse, unknown>>;
51
51
  put(this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptions): Promise<T.InferencePutResponse>;
52
52
  /**
53
- * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
54
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-alibabacloud-ai-search.html | Elasticsearch API documentation}
53
+ * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.
54
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-alibabacloud-ai-search.html | Elasticsearch API documentation}
55
55
  */
56
56
  putAlibabacloud(this: That, params: T.InferencePutAlibabacloudRequest | TB.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAlibabacloudResponse>;
57
57
  putAlibabacloud(this: That, params: T.InferencePutAlibabacloudRequest | TB.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutAlibabacloudResponse, unknown>>;
58
58
  putAlibabacloud(this: That, params: T.InferencePutAlibabacloudRequest | TB.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise<T.InferencePutAlibabacloudResponse>;
59
59
  /**
60
- * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
61
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-amazon-bedrock.html | Elasticsearch API documentation}
60
+ * Create an Amazon Bedrock inference endpoint. Create an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
61
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-amazon-bedrock.html | Elasticsearch API documentation}
62
62
  */
63
63
  putAmazonbedrock(this: That, params: T.InferencePutAmazonbedrockRequest | TB.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAmazonbedrockResponse>;
64
64
  putAmazonbedrock(this: That, params: T.InferencePutAmazonbedrockRequest | TB.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutAmazonbedrockResponse, unknown>>;
65
65
  putAmazonbedrock(this: That, params: T.InferencePutAmazonbedrockRequest | TB.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise<T.InferencePutAmazonbedrockResponse>;
66
66
  /**
67
- * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
68
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-anthropic.html | Elasticsearch API documentation}
67
+ * Create an Amazon SageMaker inference endpoint. Create an inference endpoint to perform an inference task with the `amazon_sagemaker` service.
68
+ * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker | Elasticsearch API documentation}
69
+ */
70
+ putAmazonsagemaker(this: That, params: T.InferencePutAmazonsagemakerRequest | TB.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAmazonsagemakerResponse>;
71
+ putAmazonsagemaker(this: That, params: T.InferencePutAmazonsagemakerRequest | TB.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutAmazonsagemakerResponse, unknown>>;
72
+ putAmazonsagemaker(this: That, params: T.InferencePutAmazonsagemakerRequest | TB.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptions): Promise<T.InferencePutAmazonsagemakerResponse>;
73
+ /**
74
+ * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service.
75
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-anthropic.html | Elasticsearch API documentation}
69
76
  */
70
77
  putAnthropic(this: That, params: T.InferencePutAnthropicRequest | TB.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAnthropicResponse>;
71
78
  putAnthropic(this: That, params: T.InferencePutAnthropicRequest | TB.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutAnthropicResponse, unknown>>;
72
79
  putAnthropic(this: That, params: T.InferencePutAnthropicRequest | TB.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise<T.InferencePutAnthropicResponse>;
73
80
  /**
74
- * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
75
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-azure-ai-studio.html | Elasticsearch API documentation}
81
+ * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service.
82
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-azure-ai-studio.html | Elasticsearch API documentation}
76
83
  */
77
84
  putAzureaistudio(this: That, params: T.InferencePutAzureaistudioRequest | TB.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAzureaistudioResponse>;
78
85
  putAzureaistudio(this: That, params: T.InferencePutAzureaistudioRequest | TB.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutAzureaistudioResponse, unknown>>;
79
86
  putAzureaistudio(this: That, params: T.InferencePutAzureaistudioRequest | TB.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise<T.InferencePutAzureaistudioResponse>;
80
87
  /**
81
- * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
82
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-azure-openai.html | Elasticsearch API documentation}
88
+ * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).
89
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-azure-openai.html | Elasticsearch API documentation}
83
90
  */
84
91
  putAzureopenai(this: That, params: T.InferencePutAzureopenaiRequest | TB.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAzureopenaiResponse>;
85
92
  putAzureopenai(this: That, params: T.InferencePutAzureopenaiRequest | TB.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutAzureopenaiResponse, unknown>>;
86
93
  putAzureopenai(this: That, params: T.InferencePutAzureopenaiRequest | TB.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutAzureopenaiResponse>;
87
94
  /**
88
- * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
89
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-cohere.html | Elasticsearch API documentation}
95
+ * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service.
96
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-cohere.html | Elasticsearch API documentation}
90
97
  */
91
98
  putCohere(this: That, params: T.InferencePutCohereRequest | TB.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutCohereResponse>;
92
99
  putCohere(this: That, params: T.InferencePutCohereRequest | TB.InferencePutCohereRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutCohereResponse, unknown>>;
93
100
  putCohere(this: That, params: T.InferencePutCohereRequest | TB.InferencePutCohereRequest, options?: TransportRequestOptions): Promise<T.InferencePutCohereResponse>;
101
+ /**
102
+ * Create a custom inference endpoint. The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. Templates are portions of a string that start with `${` and end with `}`. The parameters `secret_parameters` and `task_settings` are checked for keys for template replacement. Template replacement is supported in the `request`, `headers`, `url`, and `query_parameters`. If the definition (key) is not found for a template, an error message is returned. In case of an endpoint definition like the following: ``` PUT _inference/text_embedding/test-text-embedding { "service": "custom", "service_settings": { "secret_parameters": { "api_key": "<some api key>" }, "url": "...endpoints.huggingface.cloud/v1/embeddings", "headers": { "Authorization": "Bearer ${api_key}", "Content-Type": "application/json" }, "request": "{\"input\": ${input}}", "response": { "json_parser": { "text_embeddings":"$.data[*].embedding[*]" } } } } ``` To replace `${api_key}` the `secret_parameters` and `task_settings` are checked for a key named `api_key`. > info > Templates should not be surrounded by quotes. Pre-defined templates: * `${input}` refers to the array of input strings that comes from the `input` field of the subsequent inference requests. * `${input_type}` refers to the input type translation values. * `${query}` refers to the query field used specifically for reranking tasks. * `${top_n}` refers to the `top_n` field available when performing rerank requests. * `${return_documents}` refers to the `return_documents` field available when performing rerank requests.
103
+ * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom | Elasticsearch API documentation}
104
+ */
105
+ putCustom(this: That, params: T.InferencePutCustomRequest | TB.InferencePutCustomRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutCustomResponse>;
106
+ putCustom(this: That, params: T.InferencePutCustomRequest | TB.InferencePutCustomRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutCustomResponse, unknown>>;
107
+ putCustom(this: That, params: T.InferencePutCustomRequest | TB.InferencePutCustomRequest, options?: TransportRequestOptions): Promise<T.InferencePutCustomResponse>;
108
+ /**
109
+ * Create a DeepSeek inference endpoint. Create an inference endpoint to perform an inference task with the `deepseek` service.
110
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-deepseek.html | Elasticsearch API documentation}
111
+ */
112
+ putDeepseek(this: That, params: T.InferencePutDeepseekRequest | TB.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutDeepseekResponse>;
113
+ putDeepseek(this: That, params: T.InferencePutDeepseekRequest | TB.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutDeepseekResponse, unknown>>;
114
+ putDeepseek(this: That, params: T.InferencePutDeepseekRequest | TB.InferencePutDeepseekRequest, options?: TransportRequestOptions): Promise<T.InferencePutDeepseekResponse>;
94
115
  /**
95
116
  * Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
96
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-elasticsearch.html | Elasticsearch API documentation}
117
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-elasticsearch.html | Elasticsearch API documentation}
97
118
  */
98
119
  putElasticsearch(this: That, params: T.InferencePutElasticsearchRequest | TB.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutElasticsearchResponse>;
99
120
  putElasticsearch(this: That, params: T.InferencePutElasticsearchRequest | TB.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutElasticsearchResponse, unknown>>;
100
121
  putElasticsearch(this: That, params: T.InferencePutElasticsearchRequest | TB.InferencePutElasticsearchRequest, options?: TransportRequestOptions): Promise<T.InferencePutElasticsearchResponse>;
101
122
  /**
102
123
  * Create an ELSER inference endpoint. Create an inference endpoint to perform an inference task with the `elser` service. You can also deploy ELSER by using the Elasticsearch inference integration. > info > Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings. The API request will automatically download and deploy the ELSER model if it isn't already downloaded. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
103
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-elser.html | Elasticsearch API documentation}
124
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-elser.html | Elasticsearch API documentation}
104
125
  */
105
126
  putElser(this: That, params: T.InferencePutElserRequest | TB.InferencePutElserRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutElserResponse>;
106
127
  putElser(this: That, params: T.InferencePutElserRequest | TB.InferencePutElserRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutElserResponse, unknown>>;
107
128
  putElser(this: That, params: T.InferencePutElserRequest | TB.InferencePutElserRequest, options?: TransportRequestOptions): Promise<T.InferencePutElserResponse>;
108
129
  /**
109
- * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
110
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-google-ai-studio.html | Elasticsearch API documentation}
130
+ * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service.
131
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-google-ai-studio.html | Elasticsearch API documentation}
111
132
  */
112
133
  putGoogleaistudio(this: That, params: T.InferencePutGoogleaistudioRequest | TB.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutGoogleaistudioResponse>;
113
134
  putGoogleaistudio(this: That, params: T.InferencePutGoogleaistudioRequest | TB.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutGoogleaistudioResponse, unknown>>;
114
135
  putGoogleaistudio(this: That, params: T.InferencePutGoogleaistudioRequest | TB.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise<T.InferencePutGoogleaistudioResponse>;
115
136
  /**
116
- * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
117
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-google-vertex-ai.html | Elasticsearch API documentation}
137
+ * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service.
138
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-google-vertex-ai.html | Elasticsearch API documentation}
118
139
  */
119
140
  putGooglevertexai(this: That, params: T.InferencePutGooglevertexaiRequest | TB.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutGooglevertexaiResponse>;
120
141
  putGooglevertexai(this: That, params: T.InferencePutGooglevertexaiRequest | TB.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutGooglevertexaiResponse, unknown>>;
121
142
  putGooglevertexai(this: That, params: T.InferencePutGooglevertexaiRequest | TB.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutGooglevertexaiResponse>;
122
143
  /**
123
- * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
124
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-hugging-face.html | Elasticsearch API documentation}
144
+ * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. Supported tasks include: `text_embedding`, `completion`, and `chat_completion`. To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint. Select a model that supports the task you intend to use. For Elastic's `text_embedding` task: The selected model must support the `Sentence Embeddings` task. On the new endpoint creation page, select the `Sentence Embeddings` task under the `Advanced Configuration` section. After the endpoint has initialized, copy the generated endpoint URL. Recommended models for `text_embedding` task: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` For Elastic's `chat_completion` and `completion` tasks: The selected model must support the `Text Generation` task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for `Text Generation`. When creating dedicated endpoint select the `Text Generation` task. After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes `/v1/chat/completions` part in URL. Then, copy the full endpoint URL for use. Recommended models for `chat_completion` and `completion` tasks: * `Mistral-7B-Instruct-v0.2` * `QwQ-32B` * `Phi-3-mini-128k-instruct` For Elastic's `rerank` task: The selected model must support the `sentence-ranking` task and expose OpenAI API. HuggingFace supports only dedicated (not serverless) endpoints for `Rerank` so far. After the endpoint is initialized, copy the full endpoint URL for use. Tested models for `rerank` task: * `bge-reranker-base` * `jina-reranker-v1-turbo-en-GGUF`
145
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-hugging-face.html | Elasticsearch API documentation}
125
146
  */
126
147
  putHuggingFace(this: That, params: T.InferencePutHuggingFaceRequest | TB.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutHuggingFaceResponse>;
127
148
  putHuggingFace(this: That, params: T.InferencePutHuggingFaceRequest | TB.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutHuggingFaceResponse, unknown>>;
128
149
  putHuggingFace(this: That, params: T.InferencePutHuggingFaceRequest | TB.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise<T.InferencePutHuggingFaceResponse>;
129
150
  /**
130
- * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to <https://jina.ai/reranker>. To review the available `text_embedding` models, refer to the <https://jina.ai/embeddings/>. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
131
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-jinaai.html | Elasticsearch API documentation}
151
+ * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to <https://jina.ai/reranker>. To review the available `text_embedding` models, refer to the <https://jina.ai/embeddings/>.
152
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-jinaai.html | Elasticsearch API documentation}
132
153
  */
133
154
  putJinaai(this: That, params: T.InferencePutJinaaiRequest | TB.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutJinaaiResponse>;
134
155
  putJinaai(this: That, params: T.InferencePutJinaaiRequest | TB.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutJinaaiResponse, unknown>>;
135
156
  putJinaai(this: That, params: T.InferencePutJinaaiRequest | TB.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutJinaaiResponse>;
136
157
  /**
137
- * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
138
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/{brnach}/infer-service-mistral.html | Elasticsearch API documentation}
158
+ * Create a Mistral inference endpoint. Create an inference endpoint to perform an inference task with the `mistral` service.
159
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-mistral.html | Elasticsearch API documentation}
139
160
  */
140
161
  putMistral(this: That, params: T.InferencePutMistralRequest | TB.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutMistralResponse>;
141
162
  putMistral(this: That, params: T.InferencePutMistralRequest | TB.InferencePutMistralRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutMistralResponse, unknown>>;
142
163
  putMistral(this: That, params: T.InferencePutMistralRequest | TB.InferencePutMistralRequest, options?: TransportRequestOptions): Promise<T.InferencePutMistralResponse>;
143
164
  /**
144
- * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
145
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-openai.html | Elasticsearch API documentation}
165
+ * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs.
166
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-openai.html | Elasticsearch API documentation}
146
167
  */
147
168
  putOpenai(this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutOpenaiResponse>;
148
169
  putOpenai(this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutOpenaiResponse, unknown>>;
149
170
  putOpenai(this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutOpenaiResponse>;
150
171
  /**
151
172
  * Create a VoyageAI inference endpoint. Create an inference endpoint to perform an inference task with the `voyageai` service. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
152
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-voyageai.html | Elasticsearch API documentation}
173
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-voyageai.html | Elasticsearch API documentation}
153
174
  */
154
175
  putVoyageai(this: That, params: T.InferencePutVoyageaiRequest | TB.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutVoyageaiResponse>;
155
176
  putVoyageai(this: That, params: T.InferencePutVoyageaiRequest | TB.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutVoyageaiResponse, unknown>>;
156
177
  putVoyageai(this: That, params: T.InferencePutVoyageaiRequest | TB.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutVoyageaiResponse>;
157
178
  /**
158
- * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
159
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-watsonx-ai.html | Elasticsearch API documentation}
179
+ * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.
180
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-watsonx-ai.html | Elasticsearch API documentation}
160
181
  */
161
182
  putWatsonx(this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutWatsonxResponse>;
162
183
  putWatsonx(this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutWatsonxResponse, unknown>>;
163
184
  putWatsonx(this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise<T.InferencePutWatsonxResponse>;
164
185
  /**
165
- * Perform rereanking inference on the service
166
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/post-inference-api.html | Elasticsearch API documentation}
186
+ * Perform reranking inference on the service
187
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/post-inference-api.html | Elasticsearch API documentation}
167
188
  */
168
189
  rerank(this: That, params: T.InferenceRerankRequest | TB.InferenceRerankRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceRerankResponse>;
169
190
  rerank(this: That, params: T.InferenceRerankRequest | TB.InferenceRerankRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceRerankResponse, unknown>>;
170
191
  rerank(this: That, params: T.InferenceRerankRequest | TB.InferenceRerankRequest, options?: TransportRequestOptions): Promise<T.InferenceRerankResponse>;
171
192
  /**
172
193
  * Perform sparse embedding inference on the service
173
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/post-inference-api.html | Elasticsearch API documentation}
194
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/post-inference-api.html | Elasticsearch API documentation}
174
195
  */
175
196
  sparseEmbedding(this: That, params: T.InferenceSparseEmbeddingRequest | TB.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceSparseEmbeddingResponse>;
176
197
  sparseEmbedding(this: That, params: T.InferenceSparseEmbeddingRequest | TB.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceSparseEmbeddingResponse, unknown>>;
177
198
  sparseEmbedding(this: That, params: T.InferenceSparseEmbeddingRequest | TB.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise<T.InferenceSparseEmbeddingResponse>;
178
199
  /**
179
200
  * Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming.
180
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/stream-inference-api.html | Elasticsearch API documentation}
201
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/stream-inference-api.html | Elasticsearch API documentation}
181
202
  */
182
203
  streamCompletion(this: That, params: T.InferenceStreamCompletionRequest | TB.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceStreamCompletionResponse>;
183
204
  streamCompletion(this: That, params: T.InferenceStreamCompletionRequest | TB.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceStreamCompletionResponse, unknown>>;
184
205
  streamCompletion(this: That, params: T.InferenceStreamCompletionRequest | TB.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise<T.InferenceStreamCompletionResponse>;
185
206
  /**
186
207
  * Perform text embedding inference on the service
187
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/post-inference-api.html | Elasticsearch API documentation}
208
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/post-inference-api.html | Elasticsearch API documentation}
188
209
  */
189
210
  textEmbedding(this: That, params: T.InferenceTextEmbeddingRequest | TB.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceTextEmbeddingResponse>;
190
211
  textEmbedding(this: That, params: T.InferenceTextEmbeddingRequest | TB.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceTextEmbeddingResponse, unknown>>;
191
212
  textEmbedding(this: That, params: T.InferenceTextEmbeddingRequest | TB.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise<T.InferenceTextEmbeddingResponse>;
192
213
  /**
193
214
  * Update an inference endpoint. Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
194
- * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/update-inference-api.html | Elasticsearch API documentation}
215
+ * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-inference-api.html | Elasticsearch API documentation}
195
216
  */
196
217
  update(this: That, params: T.InferenceUpdateRequest | TB.InferenceUpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceUpdateResponse>;
197
218
  update(this: That, params: T.InferenceUpdateRequest | TB.InferenceUpdateRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceUpdateResponse, unknown>>;
@@ -166,7 +166,7 @@ class Inference {
166
166
  }
167
167
  async inference(params, options) {
168
168
  const acceptedPath = ['task_type', 'inference_id'];
169
- const acceptedBody = ['query', 'input', 'task_settings'];
169
+ const acceptedBody = ['query', 'input', 'input_type', 'task_settings'];
170
170
  const querystring = {};
171
171
  // @ts-expect-error
172
172
  const userBody = params === null || params === void 0 ? void 0 : params.body;
@@ -325,6 +325,44 @@ class Inference {
325
325
  };
326
326
  return await this.transport.request({ path, method, querystring, body, meta }, options);
327
327
  }
328
+ async putAmazonsagemaker(params, options) {
329
+ const acceptedPath = ['task_type', 'amazonsagemaker_inference_id'];
330
+ const acceptedBody = ['chunking_settings', 'service', 'service_settings', 'task_settings'];
331
+ const querystring = {};
332
+ // @ts-expect-error
333
+ const userBody = params === null || params === void 0 ? void 0 : params.body;
334
+ let body;
335
+ if (typeof userBody === 'string') {
336
+ body = userBody;
337
+ }
338
+ else {
339
+ body = userBody != null ? { ...userBody } : undefined;
340
+ }
341
+ for (const key in params) {
342
+ if (acceptedBody.includes(key)) {
343
+ body = body !== null && body !== void 0 ? body : {};
344
+ // @ts-expect-error
345
+ body[key] = params[key];
346
+ }
347
+ else if (acceptedPath.includes(key)) {
348
+ continue;
349
+ }
350
+ else if (key !== 'body') {
351
+ // @ts-expect-error
352
+ querystring[key] = params[key];
353
+ }
354
+ }
355
+ const method = 'PUT';
356
+ const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.amazonsagemaker_inference_id.toString())}`;
357
+ const meta = {
358
+ name: 'inference.put_amazonsagemaker',
359
+ pathParts: {
360
+ task_type: params.task_type,
361
+ amazonsagemaker_inference_id: params.amazonsagemaker_inference_id
362
+ }
363
+ };
364
+ return await this.transport.request({ path, method, querystring, body, meta }, options);
365
+ }
328
366
  async putAnthropic(params, options) {
329
367
  const acceptedPath = ['task_type', 'anthropic_inference_id'];
330
368
  const acceptedBody = ['chunking_settings', 'service', 'service_settings', 'task_settings'];
@@ -477,6 +515,82 @@ class Inference {
477
515
  };
478
516
  return await this.transport.request({ path, method, querystring, body, meta }, options);
479
517
  }
518
+ async putCustom(params, options) {
519
+ const acceptedPath = ['task_type', 'custom_inference_id'];
520
+ const acceptedBody = ['chunking_settings', 'service', 'service_settings', 'task_settings'];
521
+ const querystring = {};
522
+ // @ts-expect-error
523
+ const userBody = params === null || params === void 0 ? void 0 : params.body;
524
+ let body;
525
+ if (typeof userBody === 'string') {
526
+ body = userBody;
527
+ }
528
+ else {
529
+ body = userBody != null ? { ...userBody } : undefined;
530
+ }
531
+ for (const key in params) {
532
+ if (acceptedBody.includes(key)) {
533
+ body = body !== null && body !== void 0 ? body : {};
534
+ // @ts-expect-error
535
+ body[key] = params[key];
536
+ }
537
+ else if (acceptedPath.includes(key)) {
538
+ continue;
539
+ }
540
+ else if (key !== 'body') {
541
+ // @ts-expect-error
542
+ querystring[key] = params[key];
543
+ }
544
+ }
545
+ const method = 'PUT';
546
+ const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.custom_inference_id.toString())}`;
547
+ const meta = {
548
+ name: 'inference.put_custom',
549
+ pathParts: {
550
+ task_type: params.task_type,
551
+ custom_inference_id: params.custom_inference_id
552
+ }
553
+ };
554
+ return await this.transport.request({ path, method, querystring, body, meta }, options);
555
+ }
556
+ async putDeepseek(params, options) {
557
+ const acceptedPath = ['task_type', 'deepseek_inference_id'];
558
+ const acceptedBody = ['chunking_settings', 'service', 'service_settings'];
559
+ const querystring = {};
560
+ // @ts-expect-error
561
+ const userBody = params === null || params === void 0 ? void 0 : params.body;
562
+ let body;
563
+ if (typeof userBody === 'string') {
564
+ body = userBody;
565
+ }
566
+ else {
567
+ body = userBody != null ? { ...userBody } : undefined;
568
+ }
569
+ for (const key in params) {
570
+ if (acceptedBody.includes(key)) {
571
+ body = body !== null && body !== void 0 ? body : {};
572
+ // @ts-expect-error
573
+ body[key] = params[key];
574
+ }
575
+ else if (acceptedPath.includes(key)) {
576
+ continue;
577
+ }
578
+ else if (key !== 'body') {
579
+ // @ts-expect-error
580
+ querystring[key] = params[key];
581
+ }
582
+ }
583
+ const method = 'PUT';
584
+ const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.deepseek_inference_id.toString())}`;
585
+ const meta = {
586
+ name: 'inference.put_deepseek',
587
+ pathParts: {
588
+ task_type: params.task_type,
589
+ deepseek_inference_id: params.deepseek_inference_id
590
+ }
591
+ };
592
+ return await this.transport.request({ path, method, querystring, body, meta }, options);
593
+ }
480
594
  async putElasticsearch(params, options) {
481
595
  const acceptedPath = ['task_type', 'elasticsearch_inference_id'];
482
596
  const acceptedBody = ['chunking_settings', 'service', 'service_settings', 'task_settings'];
@@ -631,7 +745,7 @@ class Inference {
631
745
  }
632
746
  async putHuggingFace(params, options) {
633
747
  const acceptedPath = ['task_type', 'huggingface_inference_id'];
634
- const acceptedBody = ['chunking_settings', 'service', 'service_settings'];
748
+ const acceptedBody = ['chunking_settings', 'service', 'service_settings', 'task_settings'];
635
749
  const querystring = {};
636
750
  // @ts-expect-error
637
751
  const userBody = params === null || params === void 0 ? void 0 : params.body;