@elastic/elasticsearch 9.0.1 → 9.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.d.ts CHANGED
@@ -8,6 +8,6 @@ import SniffingTransport from './lib/sniffingTransport'
8
8
 
9
9
  export * from '@elastic/transport'
10
10
  export * as estypes from './lib/api/types'
11
- export * as estypesWithBody from './lib/api/typesWithBodyKey'
12
11
  export { Client, SniffingTransport }
13
12
  export type { ClientOptions, NodeOptions } from './lib/client'
13
+ export * as helpers from './lib/helpers'
@@ -17,7 +17,7 @@ export default class Inference {
17
17
  }>;
18
18
  constructor(transport: Transport);
19
19
  /**
20
- * Perform chat completion inference
20
+ * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API.
21
21
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-unified-inference | Elasticsearch API documentation}
22
22
  */
23
23
  chatCompletionUnified(this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferenceChatCompletionUnifiedResponse>;
@@ -52,49 +52,49 @@ export default class Inference {
52
52
  inference(this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferenceInferenceResponse, unknown>>;
53
53
  inference(this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise<T.InferenceInferenceResponse>;
54
54
  /**
55
- * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
55
+ * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
56
56
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put | Elasticsearch API documentation}
57
57
  */
58
58
  put(this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutResponse>;
59
59
  put(this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutResponse, unknown>>;
60
60
  put(this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise<T.InferencePutResponse>;
61
61
  /**
62
- * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
62
+ * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.
63
63
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-alibabacloud | Elasticsearch API documentation}
64
64
  */
65
65
  putAlibabacloud(this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAlibabacloudResponse>;
66
66
  putAlibabacloud(this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutAlibabacloudResponse, unknown>>;
67
67
  putAlibabacloud(this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise<T.InferencePutAlibabacloudResponse>;
68
68
  /**
69
- * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
69
+ * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
70
70
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-amazonbedrock | Elasticsearch API documentation}
71
71
  */
72
72
  putAmazonbedrock(this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAmazonbedrockResponse>;
73
73
  putAmazonbedrock(this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutAmazonbedrockResponse, unknown>>;
74
74
  putAmazonbedrock(this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise<T.InferencePutAmazonbedrockResponse>;
75
75
  /**
76
- * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
76
+ * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service.
77
77
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-anthropic | Elasticsearch API documentation}
78
78
  */
79
79
  putAnthropic(this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAnthropicResponse>;
80
80
  putAnthropic(this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutAnthropicResponse, unknown>>;
81
81
  putAnthropic(this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise<T.InferencePutAnthropicResponse>;
82
82
  /**
83
- * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
83
+ * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service.
84
84
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-azureaistudio | Elasticsearch API documentation}
85
85
  */
86
86
  putAzureaistudio(this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAzureaistudioResponse>;
87
87
  putAzureaistudio(this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutAzureaistudioResponse, unknown>>;
88
88
  putAzureaistudio(this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise<T.InferencePutAzureaistudioResponse>;
89
89
  /**
90
- * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
90
+ * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).
91
91
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-azureopenai | Elasticsearch API documentation}
92
92
  */
93
93
  putAzureopenai(this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutAzureopenaiResponse>;
94
94
  putAzureopenai(this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutAzureopenaiResponse, unknown>>;
95
95
  putAzureopenai(this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutAzureopenaiResponse>;
96
96
  /**
97
- * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
97
+ * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service.
98
98
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-cohere | Elasticsearch API documentation}
99
99
  */
100
100
  putCohere(this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutCohereResponse>;
@@ -115,42 +115,42 @@ export default class Inference {
115
115
  putElser(this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutElserResponse, unknown>>;
116
116
  putElser(this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptions): Promise<T.InferencePutElserResponse>;
117
117
  /**
118
- * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
118
+ * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service.
119
119
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-googleaistudio | Elasticsearch API documentation}
120
120
  */
121
121
  putGoogleaistudio(this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutGoogleaistudioResponse>;
122
122
  putGoogleaistudio(this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutGoogleaistudioResponse, unknown>>;
123
123
  putGoogleaistudio(this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise<T.InferencePutGoogleaistudioResponse>;
124
124
  /**
125
- * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
125
+ * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service.
126
126
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-googlevertexai | Elasticsearch API documentation}
127
127
  */
128
128
  putGooglevertexai(this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutGooglevertexaiResponse>;
129
129
  putGooglevertexai(this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutGooglevertexaiResponse, unknown>>;
130
130
  putGooglevertexai(this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutGooglevertexaiResponse>;
131
131
  /**
132
- * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
132
+ * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small`
133
133
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-hugging-face | Elasticsearch API documentation}
134
134
  */
135
135
  putHuggingFace(this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutHuggingFaceResponse>;
136
136
  putHuggingFace(this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutHuggingFaceResponse, unknown>>;
137
137
  putHuggingFace(this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise<T.InferencePutHuggingFaceResponse>;
138
138
  /**
139
- * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to <https://jina.ai/reranker>. To review the available `text_embedding` models, refer to the <https://jina.ai/embeddings/>. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
139
+ * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to <https://jina.ai/reranker>. To review the available `text_embedding` models, refer to the <https://jina.ai/embeddings/>.
140
140
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-jinaai | Elasticsearch API documentation}
141
141
  */
142
142
  putJinaai(this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutJinaaiResponse>;
143
143
  putJinaai(this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutJinaaiResponse, unknown>>;
144
144
  putJinaai(this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutJinaaiResponse>;
145
145
  /**
146
- * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
146
+ * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service.
147
147
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-mistral | Elasticsearch API documentation}
148
148
  */
149
149
  putMistral(this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutMistralResponse>;
150
150
  putMistral(this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutMistralResponse, unknown>>;
151
151
  putMistral(this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptions): Promise<T.InferencePutMistralResponse>;
152
152
  /**
153
- * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
153
+ * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs.
154
154
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-openai | Elasticsearch API documentation}
155
155
  */
156
156
  putOpenai(this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutOpenaiResponse>;
@@ -164,7 +164,7 @@ export default class Inference {
164
164
  putVoyageai(this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutVoyageaiResponse, unknown>>;
165
165
  putVoyageai(this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise<T.InferencePutVoyageaiResponse>;
166
166
  /**
167
- * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
167
+ * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.
168
168
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-watsonx | Elasticsearch API documentation}
169
169
  */
170
170
  putWatsonx(this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutWatsonxResponse>;
@@ -185,14 +185,14 @@ export default class Ml {
185
185
  getDataFrameAnalytics(this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.MlGetDataFrameAnalyticsResponse, unknown>>;
186
186
  getDataFrameAnalytics(this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise<T.MlGetDataFrameAnalyticsResponse>;
187
187
  /**
188
- * Get data frame analytics jobs usage info.
188
+ * Get data frame analytics job stats.
189
189
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-data-frame-analytics-stats | Elasticsearch API documentation}
190
190
  */
191
191
  getDataFrameAnalyticsStats(this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetDataFrameAnalyticsStatsResponse>;
192
192
  getDataFrameAnalyticsStats(this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.MlGetDataFrameAnalyticsStatsResponse, unknown>>;
193
193
  getDataFrameAnalyticsStats(this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise<T.MlGetDataFrameAnalyticsStatsResponse>;
194
194
  /**
195
- * Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds.
195
+ * Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds.
196
196
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-datafeed-stats | Elasticsearch API documentation}
197
197
  */
198
198
  getDatafeedStats(this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetDatafeedStatsResponse>;
@@ -220,7 +220,7 @@ export default class Ml {
220
220
  getInfluencers(this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.MlGetInfluencersResponse, unknown>>;
221
221
  getInfluencers(this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise<T.MlGetInfluencersResponse>;
222
222
  /**
223
- * Get anomaly detection jobs usage info.
223
+ * Get anomaly detection job stats.
224
224
  * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-job-stats | Elasticsearch API documentation}
225
225
  */
226
226
  getJobStats(this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.MlGetJobStatsResponse>;
@@ -2639,8 +2639,8 @@ export interface SearchRequest extends RequestBase {
2639
2639
  * * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method.
2640
2640
  * * `_only_nodes:<node-id>,<node-id>` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method.
2641
2641
  * * `_prefer_nodes:<node-id>,<node-id>` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method.
2642
- * `_shards:<shard>,<shard>` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`.
2643
- * `<custom-string>` (any string that does not start with `_`) to route searches with the same `<custom-string>` to the same shards in the same order. */
2642
+ * * `_shards:<shard>,<shard>` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`.
2643
+ * * `<custom-string>` (any string that does not start with `_`) to route searches with the same `<custom-string>` to the same shards in the same order. */
2644
2644
  preference?: string;
2645
2645
  /** A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold.
2646
2646
  * This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint).
@@ -3320,7 +3320,7 @@ export interface SearchInnerHits {
3320
3320
  ignore_unmapped?: boolean;
3321
3321
  script_fields?: Record<Field, ScriptField>;
3322
3322
  seq_no_primary_term?: boolean;
3323
- fields?: Fields;
3323
+ fields?: Field[];
3324
3324
  /** How the inner hits should be sorted per `inner_hits`.
3325
3325
  * By default, inner hits are sorted by score. */
3326
3326
  sort?: Sort;
@@ -7160,12 +7160,18 @@ export interface AggregationsWeightedAverageValue {
7160
7160
  export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase {
7161
7161
  }
7162
7162
  export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer;
7163
+ export interface AnalysisApostropheTokenFilter extends AnalysisTokenFilterBase {
7164
+ type: 'apostrophe';
7165
+ }
7163
7166
  export interface AnalysisArabicAnalyzer {
7164
7167
  type: 'arabic';
7165
7168
  stopwords?: AnalysisStopWords;
7166
7169
  stopwords_path?: string;
7167
7170
  stem_exclusion?: string[];
7168
7171
  }
7172
+ export interface AnalysisArabicNormalizationTokenFilter extends AnalysisTokenFilterBase {
7173
+ type: 'arabic_normalization';
7174
+ }
7169
7175
  export interface AnalysisArmenianAnalyzer {
7170
7176
  type: 'armenian';
7171
7177
  stopwords?: AnalysisStopWords;
@@ -7174,6 +7180,7 @@ export interface AnalysisArmenianAnalyzer {
7174
7180
  }
7175
7181
  export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase {
7176
7182
  type: 'asciifolding';
7183
+ /** If `true`, emit both original tokens and folded tokens. Defaults to `false`. */
7177
7184
  preserve_original?: SpecUtilsStringified<boolean>;
7178
7185
  }
7179
7186
  export interface AnalysisBasqueAnalyzer {
@@ -7225,29 +7232,63 @@ export interface AnalysisCjkAnalyzer {
7225
7232
  stopwords?: AnalysisStopWords;
7226
7233
  stopwords_path?: string;
7227
7234
  }
7235
+ export type AnalysisCjkBigramIgnoredScript = 'han' | 'hangul' | 'hiragana' | 'katakana';
7236
+ export interface AnalysisCjkBigramTokenFilter extends AnalysisTokenFilterBase {
7237
+ type: 'cjk_bigram';
7238
+ /** Array of character scripts for which to disable bigrams. */
7239
+ ignored_scripts?: AnalysisCjkBigramIgnoredScript[];
7240
+ /** If `true`, emit tokens in both bigram and unigram form. If `false`, a CJK character is output in unigram form when it has no adjacent characters. Defaults to `false`. */
7241
+ output_unigrams?: boolean;
7242
+ }
7243
+ export interface AnalysisCjkWidthTokenFilter extends AnalysisTokenFilterBase {
7244
+ type: 'cjk_width';
7245
+ }
7246
+ export interface AnalysisClassicTokenFilter extends AnalysisTokenFilterBase {
7247
+ type: 'classic';
7248
+ }
7228
7249
  export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase {
7229
7250
  type: 'classic';
7230
7251
  max_token_length?: integer;
7231
7252
  }
7232
7253
  export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase {
7233
7254
  type: 'common_grams';
7255
+ /** A list of tokens. The filter generates bigrams for these tokens.
7256
+ * Either this or the `common_words_path` parameter is required. */
7234
7257
  common_words?: string[];
7258
+ /** Path to a file containing a list of tokens. The filter generates bigrams for these tokens.
7259
+ * This path must be absolute or relative to the `config` location. The file must be UTF-8 encoded. Each token in the file must be separated by a line break.
7260
+ * Either this or the `common_words` parameter is required. */
7235
7261
  common_words_path?: string;
7262
+ /** If `true`, matches for common words matching are case-insensitive. Defaults to `false`. */
7236
7263
  ignore_case?: boolean;
7264
+ /** If `true`, the filter excludes the following tokens from the output:
7265
+ * - Unigrams for common words
7266
+ * - Unigrams for terms followed by common words
7267
+ * Defaults to `false`. We recommend enabling this parameter for search analyzers. */
7237
7268
  query_mode?: boolean;
7238
7269
  }
7239
7270
  export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase {
7240
- hyphenation_patterns_path?: string;
7271
+ /** Maximum subword character length. Longer subword tokens are excluded from the output. Defaults to `15`. */
7241
7272
  max_subword_size?: integer;
7273
+ /** Minimum subword character length. Shorter subword tokens are excluded from the output. Defaults to `2`. */
7242
7274
  min_subword_size?: integer;
7275
+ /** Minimum word character length. Shorter word tokens are excluded from the output. Defaults to `5`. */
7243
7276
  min_word_size?: integer;
7277
+ /** If `true`, only include the longest matching subword. Defaults to `false`. */
7244
7278
  only_longest_match?: boolean;
7279
+ /** A list of subwords to look for in the token stream. If found, the subword is included in the token output.
7280
+ * Either this parameter or `word_list_path` must be specified. */
7245
7281
  word_list?: string[];
7282
+ /** Path to a file that contains a list of subwords to find in the token stream. If found, the subword is included in the token output.
7283
+ * This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break.
7284
+ * Either this parameter or `word_list` must be specified. */
7246
7285
  word_list_path?: string;
7247
7286
  }
7248
7287
  export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase {
7249
7288
  type: 'condition';
7289
+ /** Array of token filters. If a token matches the predicate script in the `script` parameter, these filters are applied to the token in the order provided. */
7250
7290
  filter: string[];
7291
+ /** Predicate script used to apply token filters. If a token matches this script, the filters in the `filter` parameter are applied to the token. */
7251
7292
  script: Script | ScriptSource;
7252
7293
  }
7253
7294
  export interface AnalysisCustomAnalyzer {
@@ -7274,10 +7315,15 @@ export interface AnalysisDanishAnalyzer {
7274
7315
  stopwords?: AnalysisStopWords;
7275
7316
  stopwords_path?: string;
7276
7317
  }
7318
+ export interface AnalysisDecimalDigitTokenFilter extends AnalysisTokenFilterBase {
7319
+ type: 'decimal_digit';
7320
+ }
7277
7321
  export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity';
7278
7322
  export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase {
7279
7323
  type: 'delimited_payload';
7324
+ /** Character used to separate tokens from payloads. Defaults to `|`. */
7280
7325
  delimiter?: string;
7326
+ /** Data type for the stored payload. */
7281
7327
  encoding?: AnalysisDelimitedPayloadEncoding;
7282
7328
  }
7283
7329
  export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase {
@@ -7292,9 +7338,13 @@ export interface AnalysisDutchAnalyzer {
7292
7338
  export type AnalysisEdgeNGramSide = 'front' | 'back';
7293
7339
  export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase {
7294
7340
  type: 'edge_ngram';
7341
+ /** Maximum character length of a gram. For custom token filters, defaults to `2`. For the built-in edge_ngram filter, defaults to `1`. */
7295
7342
  max_gram?: integer;
7343
+ /** Minimum character length of a gram. Defaults to `1`. */
7296
7344
  min_gram?: integer;
7345
+ /** Indicates whether to truncate tokens from the `front` or `back`. Defaults to `front`. */
7297
7346
  side?: AnalysisEdgeNGramSide;
7347
+ /** Emits original token when set to `true`. Defaults to `false`. */
7298
7348
  preserve_original?: SpecUtilsStringified<boolean>;
7299
7349
  }
7300
7350
  export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase {
@@ -7306,8 +7356,16 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase {
7306
7356
  }
7307
7357
  export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase {
7308
7358
  type: 'elision';
7359
+ /** List of elisions to remove.
7360
+ * To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed.
7361
+ * For custom `elision` filters, either this parameter or `articles_path` must be specified. */
7309
7362
  articles?: string[];
7363
+ /** Path to a file that contains a list of elisions to remove.
7364
+ * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each elision in the file must be separated by a line break.
7365
+ * To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed.
7366
+ * For custom `elision` filters, either this parameter or `articles` must be specified. */
7310
7367
  articles_path?: string;
7368
+ /** If `true`, elision matching is case insensitive. If `false`, elision matching is case sensitive. Defaults to `false`. */
7311
7369
  articles_case?: SpecUtilsStringified<boolean>;
7312
7370
  }
7313
7371
  export interface AnalysisEnglishAnalyzer {
@@ -7338,7 +7396,9 @@ export interface AnalysisFingerprintAnalyzer {
7338
7396
  }
7339
7397
  export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase {
7340
7398
  type: 'fingerprint';
7399
+ /** Maximum character length, including whitespace, of the output token. Defaults to `255`. Concatenated tokens longer than this will result in no token output. */
7341
7400
  max_output_size?: integer;
7401
+ /** Character to use to concatenate the token stream input. Defaults to a space. */
7342
7402
  separator?: string;
7343
7403
  }
7344
7404
  export interface AnalysisFinnishAnalyzer {
@@ -7347,6 +7407,9 @@ export interface AnalysisFinnishAnalyzer {
7347
7407
  stopwords_path?: string;
7348
7408
  stem_exclusion?: string[];
7349
7409
  }
7410
+ export interface AnalysisFlattenGraphTokenFilter extends AnalysisTokenFilterBase {
7411
+ type: 'flatten_graph';
7412
+ }
7350
7413
  export interface AnalysisFrenchAnalyzer {
7351
7414
  type: 'french';
7352
7415
  stopwords?: AnalysisStopWords;
@@ -7365,6 +7428,9 @@ export interface AnalysisGermanAnalyzer {
7365
7428
  stopwords_path?: string;
7366
7429
  stem_exclusion?: string[];
7367
7430
  }
7431
+ export interface AnalysisGermanNormalizationTokenFilter extends AnalysisTokenFilterBase {
7432
+ type: 'german_normalization';
7433
+ }
7368
7434
  export interface AnalysisGreekAnalyzer {
7369
7435
  type: 'greek';
7370
7436
  stopwords?: AnalysisStopWords;
@@ -7376,6 +7442,9 @@ export interface AnalysisHindiAnalyzer {
7376
7442
  stopwords_path?: string;
7377
7443
  stem_exclusion?: string[];
7378
7444
  }
7445
+ export interface AnalysisHindiNormalizationTokenFilter extends AnalysisTokenFilterBase {
7446
+ type: 'hindi_normalization';
7447
+ }
7379
7448
  export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase {
7380
7449
  type: 'html_strip';
7381
7450
  escaped_tags?: string[];
@@ -7388,13 +7457,31 @@ export interface AnalysisHungarianAnalyzer {
7388
7457
  }
7389
7458
  export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase {
7390
7459
  type: 'hunspell';
7460
+ /** If `true`, duplicate tokens are removed from the filter’s output. Defaults to `true`. */
7391
7461
  dedup?: boolean;
7462
+ /** One or more `.dic` files (e.g, `en_US.dic`, my_custom.dic) to use for the Hunspell dictionary.
7463
+ * By default, the `hunspell` filter uses all `.dic` files in the `<$ES_PATH_CONF>/hunspell/<locale>` directory specified using the `lang`, `language`, or `locale` parameter. */
7392
7464
  dictionary?: string;
7465
+ /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. */
7393
7466
  locale: string;
7467
+ /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary.
7468
+ * @alias locale */
7469
+ lang: string;
7470
+ /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary.
7471
+ * @alias locale */
7472
+ language: string;
7473
+ /** If `true`, only the longest stemmed version of each token is included in the output. If `false`, all stemmed versions of the token are included. Defaults to `false`. */
7394
7474
  longest_only?: boolean;
7395
7475
  }
7396
7476
  export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase {
7397
7477
  type: 'hyphenation_decompounder';
7478
+ /** Path to an Apache FOP (Formatting Objects Processor) XML hyphenation pattern file.
7479
+ * This path must be absolute or relative to the `config` location. Only FOP v1.2 compatible files are supported. */
7480
+ hyphenation_patterns_path: string;
7481
+ /** If `true`, do not match sub tokens in tokens that are in the word list. Defaults to `false`. */
7482
+ no_sub_matches?: boolean;
7483
+ /** If `true`, do not allow overlapping tokens. Defaults to `false`. */
7484
+ no_overlapping_matches?: boolean;
7398
7485
  }
7399
7486
  export interface AnalysisIcuAnalyzer {
7400
7487
  type: 'icu_analyzer';
@@ -7428,6 +7515,7 @@ export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBa
7428
7515
  type: 'icu_normalizer';
7429
7516
  mode?: AnalysisIcuNormalizationMode;
7430
7517
  name?: AnalysisIcuNormalizationType;
7518
+ unicode_set_filter?: string;
7431
7519
  }
7432
7520
  export type AnalysisIcuNormalizationMode = 'decompose' | 'compose';
7433
7521
  export interface AnalysisIcuNormalizationTokenFilter extends AnalysisTokenFilterBase {
@@ -7445,6 +7533,9 @@ export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase
7445
7533
  dir?: AnalysisIcuTransformDirection;
7446
7534
  id: string;
7447
7535
  }
7536
+ export interface AnalysisIndicNormalizationTokenFilter extends AnalysisTokenFilterBase {
7537
+ type: 'indic_normalization';
7538
+ }
7448
7539
  export interface AnalysisIndonesianAnalyzer {
7449
7540
  type: 'indonesian';
7450
7541
  stopwords?: AnalysisStopWords;
@@ -7463,19 +7554,31 @@ export interface AnalysisItalianAnalyzer {
7463
7554
  stopwords_path?: string;
7464
7555
  stem_exclusion?: string[];
7465
7556
  }
7557
+ export interface AnalysisJaStopTokenFilter extends AnalysisTokenFilterBase {
7558
+ type: 'ja_stop';
7559
+ stopwords?: AnalysisStopWords;
7560
+ }
7466
7561
  export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase {
7467
7562
  type: 'kstem';
7468
7563
  }
7469
7564
  export type AnalysisKeepTypesMode = 'include' | 'exclude';
7470
7565
  export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase {
7471
7566
  type: 'keep_types';
7567
+ /** Indicates whether to keep or remove the specified token types. */
7472
7568
  mode?: AnalysisKeepTypesMode;
7473
- types?: string[];
7569
+ /** List of token types to keep or remove. */
7570
+ types: string[];
7474
7571
  }
7475
7572
  export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase {
7476
7573
  type: 'keep';
7574
+ /** List of words to keep. Only tokens that match words in this list are included in the output.
7575
+ * Either this parameter or `keep_words_path` must be specified. */
7477
7576
  keep_words?: string[];
7577
+ /** If `true`, lowercase all keep words. Defaults to `false`. */
7478
7578
  keep_words_case?: boolean;
7579
+ /** Path to a file that contains a list of words to keep. Only tokens that match words in this list are included in the output.
7580
+ * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break.
7581
+ * Either this parameter or `keep_words` must be specified. */
7479
7582
  keep_words_path?: string;
7480
7583
  }
7481
7584
  export interface AnalysisKeywordAnalyzer {
@@ -7484,11 +7587,22 @@ export interface AnalysisKeywordAnalyzer {
7484
7587
  }
7485
7588
  export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase {
7486
7589
  type: 'keyword_marker';
7590
+ /** If `true`, matching for the `keywords` and `keywords_path` parameters ignores letter case. Defaults to `false`. */
7487
7591
  ignore_case?: boolean;
7592
+ /** Array of keywords. Tokens that match these keywords are not stemmed.
7593
+ * This parameter, `keywords_path`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`. */
7488
7594
  keywords?: string | string[];
7595
+ /** Path to a file that contains a list of keywords. Tokens that match these keywords are not stemmed.
7596
+ * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break.
7597
+ * This parameter, `keywords`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`. */
7489
7598
  keywords_path?: string;
7599
+ /** Java regular expression used to match tokens. Tokens that match this expression are marked as keywords and not stemmed.
7600
+ * This parameter, `keywords`, or `keywords_path` must be specified. You cannot specify this parameter and `keywords` or `keywords_pattern`. */
7490
7601
  keywords_pattern?: string;
7491
7602
  }
7603
+ export interface AnalysisKeywordRepeatTokenFilter extends AnalysisTokenFilterBase {
7604
+ type: 'keyword_repeat';
7605
+ }
7492
7606
  export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase {
7493
7607
  type: 'keyword';
7494
7608
  buffer_size?: integer;
@@ -7534,7 +7648,9 @@ export interface AnalysisLatvianAnalyzer {
7534
7648
  }
7535
7649
  export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase {
7536
7650
  type: 'length';
7651
+ /** Maximum character length of a token. Longer tokens are excluded from the output. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. */
7537
7652
  max?: integer;
7653
+ /** Minimum character length of a token. Shorter tokens are excluded from the output. Defaults to `0`. */
7538
7654
  min?: integer;
7539
7655
  }
7540
7656
  export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase {
@@ -7542,7 +7658,9 @@ export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase {
7542
7658
  }
7543
7659
  export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase {
7544
7660
  type: 'limit';
7661
+ /** If `true`, the limit filter exhausts the token stream, even if the `max_token_count` has already been reached. Defaults to `false`. */
7545
7662
  consume_all_tokens?: boolean;
7663
+ /** Maximum number of tokens to keep. Once this limit is reached, any remaining tokens are excluded from the output. Defaults to `1`. */
7546
7664
  max_token_count?: SpecUtilsStringified<integer>;
7547
7665
  }
7548
7666
  export interface AnalysisLithuanianAnalyzer {
@@ -7556,8 +7674,10 @@ export interface AnalysisLowercaseNormalizer {
7556
7674
  }
7557
7675
  export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase {
7558
7676
  type: 'lowercase';
7559
- language?: string;
7677
+ /** Language-specific lowercase token filter to use. */
7678
+ language?: AnalysisLowercaseTokenFilterLanguages;
7560
7679
  }
7680
+ export type AnalysisLowercaseTokenFilterLanguages = 'greek' | 'irish' | 'turkish';
7561
7681
  export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase {
7562
7682
  type: 'lowercase';
7563
7683
  }
@@ -7566,15 +7686,32 @@ export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase {
7566
7686
  mappings?: string[];
7567
7687
  mappings_path?: string;
7568
7688
  }
7689
+ export interface AnalysisMinHashTokenFilter extends AnalysisTokenFilterBase {
7690
+ type: 'min_hash';
7691
+ /** Number of buckets to which hashes are assigned. Defaults to `512`. */
7692
+ bucket_count?: integer;
7693
+ /** Number of ways to hash each token in the stream. Defaults to `1`. */
7694
+ hash_count?: integer;
7695
+ /** Number of hashes to keep from each bucket. Defaults to `1`.
7696
+ * Hashes are retained by ascending size, starting with the bucket’s smallest hash first. */
7697
+ hash_set_size?: integer;
7698
+ /** If `true`, the filter fills empty buckets with the value of the first non-empty bucket to its circular right if the `hash_set_size` is `1`. If the `bucket_count` argument is greater than 1, this parameter defaults to `true`. Otherwise, this parameter defaults to `false`. */
7699
+ with_rotation?: boolean;
7700
+ }
7569
7701
  export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase {
7570
7702
  type: 'multiplexer';
7703
+ /** A list of token filters to apply to incoming tokens. */
7571
7704
  filters: string[];
7705
+ /** If `true` (the default) then emit the original token in addition to the filtered tokens. */
7572
7706
  preserve_original?: SpecUtilsStringified<boolean>;
7573
7707
  }
7574
7708
  export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase {
7575
7709
  type: 'ngram';
7710
+ /** Maximum length of characters in a gram. Defaults to `2`. */
7576
7711
  max_gram?: integer;
7712
+ /** Minimum length of characters in a gram. Defaults to `1`. */
7577
7713
  min_gram?: integer;
7714
+ /** Emits original token when set to `true`. Defaults to `false`. */
7578
7715
  preserve_original?: SpecUtilsStringified<boolean>;
7579
7716
  }
7580
7717
  export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase {
@@ -7594,6 +7731,7 @@ export interface AnalysisNoriAnalyzer {
7594
7731
  export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed';
7595
7732
  export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase {
7596
7733
  type: 'nori_part_of_speech';
7734
+ /** An array of part-of-speech tags that should be removed. */
7597
7735
  stoptags?: string[];
7598
7736
  }
7599
7737
  export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase {
@@ -7637,7 +7775,9 @@ export interface AnalysisPatternAnalyzer {
7637
7775
  }
7638
7776
  export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase {
7639
7777
  type: 'pattern_capture';
7778
+ /** A list of regular expressions to match. */
7640
7779
  patterns: string[];
7780
+ /** If set to `true` (the default) it will emit the original token. */
7641
7781
  preserve_original?: SpecUtilsStringified<boolean>;
7642
7782
  }
7643
7783
  export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase {
@@ -7648,9 +7788,11 @@ export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase
7648
7788
  }
7649
7789
  export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase {
7650
7790
  type: 'pattern_replace';
7791
+ /** If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`. */
7651
7792
  all?: boolean;
7652
- flags?: string;
7793
+ /** Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter. */
7653
7794
  pattern: string;
7795
+ /** Replacement substring. Defaults to an empty substring (`""`). */
7654
7796
  replacement?: string;
7655
7797
  }
7656
7798
  export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase {
@@ -7664,6 +7806,9 @@ export interface AnalysisPersianAnalyzer {
7664
7806
  stopwords?: AnalysisStopWords;
7665
7807
  stopwords_path?: string;
7666
7808
  }
7809
+ export interface AnalysisPersianNormalizationTokenFilter extends AnalysisTokenFilterBase {
7810
+ type: 'persian_normalization';
7811
+ }
7667
7812
  export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff';
7668
7813
  export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish';
7669
7814
  export type AnalysisPhoneticNameType = 'generic' | 'ashkenazi' | 'sephardic';
@@ -7688,6 +7833,7 @@ export interface AnalysisPortugueseAnalyzer {
7688
7833
  }
7689
7834
  export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase {
7690
7835
  type: 'predicate_token_filter';
7836
+ /** Script containing a condition used to filter incoming tokens. Only tokens that match this script are included in the output. */
7691
7837
  script: Script | ScriptSource;
7692
7838
  }
7693
7839
  export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase {
@@ -7708,19 +7854,34 @@ export interface AnalysisRussianAnalyzer {
7708
7854
  stopwords_path?: string;
7709
7855
  stem_exclusion?: string[];
7710
7856
  }
7857
+ export interface AnalysisScandinavianFoldingTokenFilter extends AnalysisTokenFilterBase {
7858
+ type: 'scandinavian_folding';
7859
+ }
7860
+ export interface AnalysisScandinavianNormalizationTokenFilter extends AnalysisTokenFilterBase {
7861
+ type: 'scandinavian_normalization';
7862
+ }
7711
7863
  export interface AnalysisSerbianAnalyzer {
7712
7864
  type: 'serbian';
7713
7865
  stopwords?: AnalysisStopWords;
7714
7866
  stopwords_path?: string;
7715
7867
  stem_exclusion?: string[];
7716
7868
  }
7869
+ export interface AnalysisSerbianNormalizationTokenFilter extends AnalysisTokenFilterBase {
7870
+ type: 'serbian_normalization';
7871
+ }
7717
7872
  export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase {
7718
7873
  type: 'shingle';
7874
+ /** String used in shingles as a replacement for empty positions that do not contain a token. This filler token is only used in shingles, not original unigrams. Defaults to an underscore (`_`). */
7719
7875
  filler_token?: string;
7720
- max_shingle_size?: integer | string;
7721
- min_shingle_size?: integer | string;
7876
+ /** Maximum number of tokens to concatenate when creating shingles. Defaults to `2`. */
7877
+ max_shingle_size?: SpecUtilsStringified<integer>;
7878
+ /** Minimum number of tokens to concatenate when creating shingles. Defaults to `2`. */
7879
+ min_shingle_size?: SpecUtilsStringified<integer>;
7880
+ /** If `true`, the output includes the original input tokens. If `false`, the output only includes shingles; the original input tokens are removed. Defaults to `true`. */
7722
7881
  output_unigrams?: boolean;
7882
+ /** If `true`, the output includes the original input tokens only if no shingles are produced; if shingles are produced, the output only includes shingles. Defaults to `false`. */
7723
7883
  output_unigrams_if_no_shingles?: boolean;
7884
+ /** Separator used to concatenate adjacent tokens to form a shingle. Defaults to a space (`" "`). */
7724
7885
  token_separator?: string;
7725
7886
  }
7726
7887
  export interface AnalysisSimpleAnalyzer {
@@ -7741,9 +7902,10 @@ export interface AnalysisSnowballAnalyzer {
7741
7902
  language: AnalysisSnowballLanguage;
7742
7903
  stopwords?: AnalysisStopWords;
7743
7904
  }
7744
- export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish';
7905
+ export type AnalysisSnowballLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Irish' | 'Kp' | 'Lithuanian' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Serbian' | 'Spanish' | 'Swedish' | 'Turkish';
7745
7906
  export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase {
7746
7907
  type: 'snowball';
7908
+ /** Controls the language used by the stemmer. */
7747
7909
  language?: AnalysisSnowballLanguage;
7748
7910
  }
7749
7911
  export interface AnalysisSoraniAnalyzer {
@@ -7752,6 +7914,9 @@ export interface AnalysisSoraniAnalyzer {
7752
7914
  stopwords_path?: string;
7753
7915
  stem_exclusion?: string[];
7754
7916
  }
7917
+ export interface AnalysisSoraniNormalizationTokenFilter extends AnalysisTokenFilterBase {
7918
+ type: 'sorani_normalization';
7919
+ }
7755
7920
  export interface AnalysisSpanishAnalyzer {
7756
7921
  type: 'spanish';
7757
7922
  stopwords?: AnalysisStopWords;
@@ -7775,7 +7940,9 @@ export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase {
7775
7940
  }
7776
7941
  export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase {
7777
7942
  type: 'stemmer_override';
7943
+ /** A list of mapping rules to use. */
7778
7944
  rules?: string[];
7945
+ /** A path (either relative to `config` location, or absolute) to a list of mappings. */
7779
7946
  rules_path?: string;
7780
7947
  }
7781
7948
  export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase {
@@ -7795,12 +7962,18 @@ export interface AnalysisStopAnalyzer {
7795
7962
  }
7796
7963
  export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase {
7797
7964
  type: 'stop';
7965
+ /** If `true`, stop word matching is case insensitive. For example, if `true`, a stop word of the matches and removes `The`, `THE`, or `the`. Defaults to `false`. */
7798
7966
  ignore_case?: boolean;
7967
+ /** If `true`, the last token of a stream is removed if it’s a stop word. Defaults to `true`. */
7799
7968
  remove_trailing?: boolean;
7969
+ /** Language value, such as `_arabic_` or `_thai_`. Defaults to `_english_`. */
7800
7970
  stopwords?: AnalysisStopWords;
7971
+ /** Path to a file that contains a list of stop words to remove.
7972
+ * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each stop word in the file must be separated by a line break. */
7801
7973
  stopwords_path?: string;
7802
7974
  }
7803
- export type AnalysisStopWords = string | string[];
7975
+ export type AnalysisStopWordLanguage = '_arabic_' | '_armenian_' | '_basque_' | '_bengali_' | '_brazilian_' | '_bulgarian_' | '_catalan_' | '_cjk_' | '_czech_' | '_danish_' | '_dutch_' | '_english_' | '_estonian_' | '_finnish_' | '_french_' | '_galician_' | '_german_' | '_greek_' | '_hindi_' | '_hungarian_' | '_indonesian_' | '_irish_' | '_italian_' | '_latvian_' | '_lithuanian_' | '_norwegian_' | '_persian_' | '_portuguese_' | '_romanian_' | '_russian_' | '_serbian_' | '_sorani_' | '_spanish_' | '_swedish_' | '_thai_' | '_turkish_' | '_none_';
7976
+ export type AnalysisStopWords = AnalysisStopWordLanguage | string[];
7804
7977
  export interface AnalysisSwedishAnalyzer {
7805
7978
  type: 'swedish';
7806
7979
  stopwords?: AnalysisStopWords;
@@ -7808,26 +7981,28 @@ export interface AnalysisSwedishAnalyzer {
7808
7981
  stem_exclusion?: string[];
7809
7982
  }
7810
7983
  export type AnalysisSynonymFormat = 'solr' | 'wordnet';
7811
- export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase {
7984
+ export interface AnalysisSynonymGraphTokenFilter extends AnalysisSynonymTokenFilterBase {
7812
7985
  type: 'synonym_graph';
7813
- expand?: boolean;
7814
- format?: AnalysisSynonymFormat;
7815
- lenient?: boolean;
7816
- synonyms?: string[];
7817
- synonyms_path?: string;
7818
- synonyms_set?: string;
7819
- tokenizer?: string;
7820
- updateable?: boolean;
7821
7986
  }
7822
- export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase {
7987
+ export interface AnalysisSynonymTokenFilter extends AnalysisSynonymTokenFilterBase {
7823
7988
  type: 'synonym';
7989
+ }
7990
+ export interface AnalysisSynonymTokenFilterBase extends AnalysisTokenFilterBase {
7991
+ /** Expands definitions for equivalent synonym rules. Defaults to `true`. */
7824
7992
  expand?: boolean;
7993
+ /** Sets the synonym rules format. */
7825
7994
  format?: AnalysisSynonymFormat;
7995
+ /** If `true` ignores errors while parsing the synonym rules. It is important to note that only those synonym rules which cannot get parsed are ignored. Defaults to the value of the `updateable` setting. */
7826
7996
  lenient?: boolean;
7997
+ /** Used to define inline synonyms. */
7827
7998
  synonyms?: string[];
7999
+ /** Used to provide a synonym file. This path must be absolute or relative to the `config` location. */
7828
8000
  synonyms_path?: string;
8001
+ /** Provide a synonym set created via Synonyms Management APIs. */
7829
8002
  synonyms_set?: string;
8003
+ /** Controls the tokenizers that will be used to tokenize the synonym, this parameter is for backwards compatibility for indices that created before 6.0. */
7830
8004
  tokenizer?: string;
8005
+ /** If `true` allows reloading search analyzers to pick up changes to synonym files. Only to be used for search analyzers. Defaults to `false`. */
7831
8006
  updateable?: boolean;
7832
8007
  }
7833
8008
  export interface AnalysisThaiAnalyzer {
@@ -7843,7 +8018,7 @@ export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition;
7843
8018
  export interface AnalysisTokenFilterBase {
7844
8019
  version?: VersionString;
7845
8020
  }
7846
- export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter;
8021
+ export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter;
7847
8022
  export type AnalysisTokenizer = string | AnalysisTokenizerDefinition;
7848
8023
  export interface AnalysisTokenizerBase {
7849
8024
  version?: VersionString;
@@ -7854,6 +8029,7 @@ export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase {
7854
8029
  }
7855
8030
  export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase {
7856
8031
  type: 'truncate';
8032
+ /** Character limit for each token. Tokens exceeding this limit are truncated. Defaults to `10`. */
7857
8033
  length?: integer;
7858
8034
  }
7859
8035
  export interface AnalysisTurkishAnalyzer {
@@ -7868,6 +8044,7 @@ export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase {
7868
8044
  }
7869
8045
  export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase {
7870
8046
  type: 'unique';
8047
+ /** If `true`, only remove duplicate tokens in the same position. Defaults to `false`. */
7871
8048
  only_on_same_position?: boolean;
7872
8049
  }
7873
8050
  export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase {
@@ -7881,38 +8058,43 @@ export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase {
7881
8058
  type: 'whitespace';
7882
8059
  max_token_length?: integer;
7883
8060
  }
7884
- export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilterBase {
8061
+ export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisWordDelimiterTokenFilterBase {
7885
8062
  type: 'word_delimiter_graph';
8063
+ /** If `true`, the filter adjusts the offsets of split or catenated tokens to better reflect their actual position in the token stream. Defaults to `true`. */
7886
8064
  adjust_offsets?: boolean;
7887
- catenate_all?: boolean;
7888
- catenate_numbers?: boolean;
7889
- catenate_words?: boolean;
7890
- generate_number_parts?: boolean;
7891
- generate_word_parts?: boolean;
8065
+ /** If `true`, the filter skips tokens with a keyword attribute of true. Defaults to `false`. */
7892
8066
  ignore_keywords?: boolean;
7893
- preserve_original?: SpecUtilsStringified<boolean>;
7894
- protected_words?: string[];
7895
- protected_words_path?: string;
7896
- split_on_case_change?: boolean;
7897
- split_on_numerics?: boolean;
7898
- stem_english_possessive?: boolean;
7899
- type_table?: string[];
7900
- type_table_path?: string;
7901
8067
  }
7902
- export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBase {
8068
+ export interface AnalysisWordDelimiterTokenFilter extends AnalysisWordDelimiterTokenFilterBase {
7903
8069
  type: 'word_delimiter';
8070
+ }
8071
+ export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilterBase {
8072
+ /** If `true`, the filter produces catenated tokens for chains of alphanumeric characters separated by non-alphabetic delimiters. Defaults to `false`. */
7904
8073
  catenate_all?: boolean;
8074
+ /** If `true`, the filter produces catenated tokens for chains of numeric characters separated by non-alphabetic delimiters. Defaults to `false`. */
7905
8075
  catenate_numbers?: boolean;
8076
+ /** If `true`, the filter produces catenated tokens for chains of alphabetical characters separated by non-alphabetic delimiters. Defaults to `false`. */
7906
8077
  catenate_words?: boolean;
8078
+ /** If `true`, the filter includes tokens consisting of only numeric characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`. */
7907
8079
  generate_number_parts?: boolean;
8080
+ /** If `true`, the filter includes tokens consisting of only alphabetical characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`. */
7908
8081
  generate_word_parts?: boolean;
8082
+ /** If `true`, the filter includes the original version of any split tokens in the output. This original version includes non-alphanumeric delimiters. Defaults to `false`. */
7909
8083
  preserve_original?: SpecUtilsStringified<boolean>;
8084
+ /** Array of tokens the filter won’t split. */
7910
8085
  protected_words?: string[];
8086
+ /** Path to a file that contains a list of tokens the filter won’t split.
8087
+ * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break. */
7911
8088
  protected_words_path?: string;
8089
+ /** If `true`, the filter splits tokens at letter case transitions. For example: camelCase -> [ camel, Case ]. Defaults to `true`. */
7912
8090
  split_on_case_change?: boolean;
8091
+ /** If `true`, the filter splits tokens at letter-number transitions. For example: j2se -> [ j, 2, se ]. Defaults to `true`. */
7913
8092
  split_on_numerics?: boolean;
8093
+ /** If `true`, the filter removes the English possessive (`'s`) from the end of each token. For example: O'Neil's -> [ O, Neil ]. Defaults to `true`. */
7914
8094
  stem_english_possessive?: boolean;
8095
+ /** Array of custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters. */
7915
8096
  type_table?: string[];
8097
+ /** Path to a file that contains custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters. */
7916
8098
  type_table_path?: string;
7917
8099
  }
7918
8100
  export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase {
@@ -10208,6 +10390,8 @@ export type CatCatDatafeedColumn = 'ae' | 'assignment_explanation' | 'bc' | 'buc
10208
10390
  export type CatCatDatafeedColumns = CatCatDatafeedColumn | CatCatDatafeedColumn[];
10209
10391
  export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | 'ct' | 'createTime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'failure_reason' | 'fr' | 'failureReason' | 'id' | 'model_memory_limit' | 'mml' | 'modelMemoryLimit' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'progress' | 'p' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'type' | 't' | 'version' | 'v';
10210
10392
  export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[];
10393
+ export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string;
10394
+ export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[];
10211
10395
  export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters {
10212
10396
  }
10213
10397
  export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v';
@@ -13439,15 +13623,16 @@ export interface CatNodesRequest extends CatCatRequestBase {
13439
13623
  full_id?: boolean | string;
13440
13624
  /** If true, the response includes information from segments that are not loaded into memory. */
13441
13625
  include_unloaded_segments?: boolean;
13442
- /** List of columns to appear in the response. Supports simple wildcards. */
13443
- h?: Names;
13444
- /** List of columns that determine how the table should be sorted.
13626
+ /** A comma-separated list of columns names to display.
13627
+ * It supports simple wildcards. */
13628
+ h?: CatCatNodeColumns;
13629
+ /** A comma-separated list of column names or aliases that determines the sort order.
13445
13630
  * Sorting defaults to ascending and can be changed by setting `:asc`
13446
13631
  * or `:desc` as a suffix to the column name. */
13447
13632
  s?: Names;
13448
- /** Period to wait for a connection to the master node. */
13633
+ /** The period to wait for a connection to the master node. */
13449
13634
  master_timeout?: Duration;
13450
- /** Unit used to display time values. */
13635
+ /** The unit used to display time values. */
13451
13636
  time?: TimeUnit;
13452
13637
  /** All values in `body` will be added to the request body. */
13453
13638
  body?: string | ({
@@ -16862,6 +17047,58 @@ export interface ClusterStateRequest extends RequestBase {
16862
17047
  };
16863
17048
  }
16864
17049
  export type ClusterStateResponse = any;
17050
+ export interface ClusterStatsCCSStats {
17051
+ /** Contains remote cluster settings and metrics collected from them.
17052
+ * The keys are cluster names, and the values are per-cluster data.
17053
+ * Only present if `include_remotes` option is set to true. */
17054
+ clusters?: Record<string, ClusterStatsRemoteClusterInfo>;
17055
+ /** Information about cross-cluster search usage. */
17056
+ _search: ClusterStatsCCSUsageStats;
17057
+ /** Information about ES|QL cross-cluster query usage. */
17058
+ _esql?: ClusterStatsCCSUsageStats;
17059
+ }
17060
+ export interface ClusterStatsCCSUsageClusterStats {
17061
+ /** The total number of successful (not skipped) cross-cluster search requests that were executed against this cluster. This may include requests where partial results were returned, but not requests in which the cluster has been skipped entirely. */
17062
+ total: integer;
17063
+ /** The total number of cross-cluster search requests for which this cluster was skipped. */
17064
+ skipped: integer;
17065
+ /** Statistics about the time taken to execute requests against this cluster. */
17066
+ took: ClusterStatsCCSUsageTimeValue;
17067
+ }
17068
+ export interface ClusterStatsCCSUsageStats {
17069
+ /** The total number of cross-cluster search requests that have been executed by the cluster. */
17070
+ total: integer;
17071
+ /** The total number of cross-cluster search requests that have been successfully executed by the cluster. */
17072
+ success: integer;
17073
+ /** The total number of cross-cluster search requests (successful or failed) that had at least one remote cluster skipped. */
17074
+ skipped: integer;
17075
+ /** Statistics about the time taken to execute cross-cluster search requests. */
17076
+ took: ClusterStatsCCSUsageTimeValue;
17077
+ /** Statistics about the time taken to execute cross-cluster search requests for which the `ccs_minimize_roundtrips` setting was set to `true`. */
17078
+ took_mrt_true?: ClusterStatsCCSUsageTimeValue;
17079
+ /** Statistics about the time taken to execute cross-cluster search requests for which the `ccs_minimize_roundtrips` setting was set to `false`. */
17080
+ took_mrt_false?: ClusterStatsCCSUsageTimeValue;
17081
+ /** The maximum number of remote clusters that were queried in a single cross-cluster search request. */
17082
+ remotes_per_search_max: integer;
17083
+ /** The average number of remote clusters that were queried in a single cross-cluster search request. */
17084
+ remotes_per_search_avg: double;
17085
+ /** Statistics about the reasons for cross-cluster search request failures. The keys are the failure reason names and the values are the number of requests that failed for that reason. */
17086
+ failure_reasons: Record<string, integer>;
17087
+ /** The keys are the names of the search feature, and the values are the number of requests that used that feature. Single request can use more than one feature (e.g. both `async` and `wildcard`). */
17088
+ features: Record<string, integer>;
17089
+ /** Statistics about the clients that executed cross-cluster search requests. The keys are the names of the clients, and the values are the number of requests that were executed by that client. Only known clients (such as `kibana` or `elasticsearch`) are counted. */
17090
+ clients: Record<string, integer>;
17091
+ /** Statistics about the clusters that were queried in cross-cluster search requests. The keys are cluster names, and the values are per-cluster telemetry data. This also includes the local cluster itself, which uses the name `(local)`. */
17092
+ clusters: Record<string, ClusterStatsCCSUsageClusterStats>;
17093
+ }
17094
+ export interface ClusterStatsCCSUsageTimeValue {
17095
+ /** The maximum time taken to execute a request, in milliseconds. */
17096
+ max: DurationValue<UnitMillis>;
17097
+ /** The average time taken to execute a request, in milliseconds. */
17098
+ avg: DurationValue<UnitMillis>;
17099
+ /** The 90th percentile of the time taken to execute requests, in milliseconds. */
17100
+ p90: DurationValue<UnitMillis>;
17101
+ }
16865
17102
  export interface ClusterStatsCharFilterTypes {
16866
17103
  /** Contains statistics about analyzer types used in selected nodes. */
16867
17104
  analyzer_types: ClusterStatsFieldTypes[];
@@ -17163,6 +17400,38 @@ export interface ClusterStatsOperatingSystemMemoryInfo {
17163
17400
  /** Percentage of physical memory in use across all selected nodes. */
17164
17401
  used_percent: integer;
17165
17402
  }
17403
+ export interface ClusterStatsRemoteClusterInfo {
17404
+ /** The UUID of the remote cluster. */
17405
+ cluster_uuid: string;
17406
+ /** The connection mode used to communicate with the remote cluster. */
17407
+ mode: string;
17408
+ /** The `skip_unavailable` setting used for this remote cluster. */
17409
+ skip_unavailable: boolean;
17410
+ /** Transport compression setting used for this remote cluster. */
17411
+ transport_compress: string;
17412
+ /** Health status of the cluster, based on the state of its primary and replica shards. */
17413
+ status: HealthStatus;
17414
+ /** The list of Elasticsearch versions used by the nodes on the remote cluster. */
17415
+ version: VersionString[];
17416
+ /** The total count of nodes in the remote cluster. */
17417
+ nodes_count: integer;
17418
+ /** The total number of shards in the remote cluster. */
17419
+ shards_count: integer;
17420
+ /** The total number of indices in the remote cluster. */
17421
+ indices_count: integer;
17422
+ /** Total data set size, in bytes, of all shards assigned to selected nodes. */
17423
+ indices_total_size_in_bytes: long;
17424
+ /** Total data set size of all shards assigned to selected nodes, as a human-readable string. */
17425
+ indices_total_size?: string;
17426
+ /** Maximum amount of memory, in bytes, available for use by the heap across the nodes of the remote cluster. */
17427
+ max_heap_in_bytes: long;
17428
+ /** Maximum amount of memory available for use by the heap across the nodes of the remote cluster, as a human-readable string. */
17429
+ max_heap?: string;
17430
+ /** Total amount, in bytes, of physical memory across the nodes of the remote cluster. */
17431
+ mem_total_in_bytes: long;
17432
+ /** Total amount of physical memory across the nodes of the remote cluster, as a human-readable string. */
17433
+ mem_total?: string;
17434
+ }
17166
17435
  export interface ClusterStatsRequest extends RequestBase {
17167
17436
  /** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */
17168
17437
  node_id?: NodeIds;
@@ -17233,6 +17502,8 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase {
17233
17502
  status: HealthStatus;
17234
17503
  /** Unix timestamp, in milliseconds, for the last time the cluster statistics were refreshed. */
17235
17504
  timestamp: long;
17505
+ /** Cross-cluster stats */
17506
+ ccs: ClusterStatsCCSStats;
17236
17507
  }
17237
17508
  export interface ConnectorConnector {
17238
17509
  api_key_id?: string;
@@ -19628,7 +19899,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged {
19628
19899
  lifecycle_date?: DateTime;
19629
19900
  lifecycle_date_millis?: EpochTime<UnitMillis>;
19630
19901
  managed: true;
19631
- phase: Name;
19902
+ phase?: Name;
19632
19903
  phase_time?: DateTime;
19633
19904
  phase_time_millis?: EpochTime<UnitMillis>;
19634
19905
  policy?: Name;
@@ -20840,7 +21111,16 @@ export interface IndicesCloseResponse {
20840
21111
  shards_acknowledged: boolean;
20841
21112
  }
20842
21113
  export interface IndicesCreateRequest extends RequestBase {
20843
- /** Name of the index you wish to create. */
21114
+ /** Name of the index you wish to create.
21115
+ * Index names must meet the following criteria:
21116
+ *
21117
+ * * Lowercase only
21118
+ * * Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#`
21119
+ * * Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions
21120
+ * * Cannot start with `-`, `_`, or `+`
21121
+ * * Cannot be `.` or `..`
21122
+ * * Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster)
21123
+ * * Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins */
20844
21124
  index: IndexName;
20845
21125
  /** Period to wait for a connection to the master node.
20846
21126
  * If no response is received before the timeout expires, the request fails and returns an error. */
@@ -24447,7 +24727,9 @@ export interface InferenceRateLimitSetting {
24447
24727
  requests_per_minute?: integer;
24448
24728
  }
24449
24729
  export interface InferenceRequestChatCompletion {
24450
- /** A list of objects representing the conversation. */
24730
+ /** A list of objects representing the conversation.
24731
+ * Requests should generally only add new messages from the user (role `user`).
24732
+ * The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation. */
24451
24733
  messages: InferenceMessage[];
24452
24734
  /** The ID of the model to use. */
24453
24735
  model?: string;
@@ -26011,15 +26293,16 @@ export interface IngestPipelineProcessor extends IngestProcessorBase {
26011
26293
  /** Whether to ignore missing pipelines instead of failing. */
26012
26294
  ignore_missing_pipeline?: boolean;
26013
26295
  }
26014
- export interface IngestPipelineSimulation {
26296
+ export interface IngestPipelineProcessorResult {
26015
26297
  doc?: IngestDocumentSimulation;
26016
26298
  tag?: string;
26017
26299
  processor_type?: string;
26018
- status?: WatcherActionStatusOptions;
26300
+ status?: IngestPipelineSimulationStatusOptions;
26019
26301
  description?: string;
26020
26302
  ignored_error?: ErrorCause;
26021
26303
  error?: ErrorCause;
26022
26304
  }
26305
+ export type IngestPipelineSimulationStatusOptions = 'success' | 'error' | 'error_ignored' | 'skipped' | 'dropped';
26023
26306
  export interface IngestProcessorBase {
26024
26307
  /** Description of the processor.
26025
26308
  * Useful for describing the purpose of the processor or its configuration. */
@@ -26286,7 +26569,7 @@ export type IngestShapeType = 'geo_shape' | 'shape';
26286
26569
  export interface IngestSimulateDocumentResult {
26287
26570
  doc?: IngestDocumentSimulation;
26288
26571
  error?: ErrorCause;
26289
- processor_results?: IngestPipelineSimulation[];
26572
+ processor_results?: IngestPipelineProcessorResult[];
26290
26573
  }
26291
26574
  export interface IngestSortProcessor extends IngestProcessorBase {
26292
26575
  /** The field to be sorted. */
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@elastic/elasticsearch",
3
- "version": "9.0.1",
4
- "versionCanary": "9.0.1-canary.0",
3
+ "version": "9.0.2",
4
+ "versionCanary": "9.0.2-canary.0",
5
5
  "description": "The official Elasticsearch client for Node.js",
6
6
  "main": "./index.js",
7
7
  "types": "index.d.ts",