@elastic/elasticsearch 9.0.4 → 9.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/catalog-info.yaml +2 -15
  2. package/lib/api/api/async_search.d.ts +4 -4
  3. package/lib/api/api/autoscaling.d.ts +4 -4
  4. package/lib/api/api/bulk.d.ts +1 -1
  5. package/lib/api/api/cat.d.ts +26 -26
  6. package/lib/api/api/ccr.d.ts +13 -13
  7. package/lib/api/api/clear_scroll.d.ts +1 -1
  8. package/lib/api/api/close_point_in_time.d.ts +1 -1
  9. package/lib/api/api/cluster.d.ts +16 -16
  10. package/lib/api/api/connector.d.ts +30 -30
  11. package/lib/api/api/count.d.ts +1 -1
  12. package/lib/api/api/create.d.ts +1 -1
  13. package/lib/api/api/dangling_indices.d.ts +3 -3
  14. package/lib/api/api/delete.d.ts +1 -1
  15. package/lib/api/api/delete_by_query.d.ts +1 -1
  16. package/lib/api/api/delete_by_query_rethrottle.d.ts +1 -1
  17. package/lib/api/api/delete_script.d.ts +1 -1
  18. package/lib/api/api/enrich.d.ts +5 -5
  19. package/lib/api/api/eql.d.ts +4 -4
  20. package/lib/api/api/esql.d.ts +16 -4
  21. package/lib/api/api/esql.js +79 -1
  22. package/lib/api/api/esql.js.map +1 -1
  23. package/lib/api/api/exists.d.ts +1 -1
  24. package/lib/api/api/exists_source.d.ts +1 -1
  25. package/lib/api/api/explain.d.ts +1 -1
  26. package/lib/api/api/features.d.ts +2 -2
  27. package/lib/api/api/field_caps.d.ts +1 -1
  28. package/lib/api/api/fleet.d.ts +3 -3
  29. package/lib/api/api/get.d.ts +1 -1
  30. package/lib/api/api/get_script.d.ts +1 -1
  31. package/lib/api/api/get_script_context.d.ts +1 -1
  32. package/lib/api/api/get_script_languages.d.ts +1 -1
  33. package/lib/api/api/get_source.d.ts +1 -1
  34. package/lib/api/api/graph.d.ts +1 -1
  35. package/lib/api/api/health_report.d.ts +1 -1
  36. package/lib/api/api/ilm.d.ts +11 -11
  37. package/lib/api/api/index.d.ts +1 -1
  38. package/lib/api/api/indices.d.ts +105 -63
  39. package/lib/api/api/indices.js +285 -2
  40. package/lib/api/api/indices.js.map +1 -1
  41. package/lib/api/api/inference.d.ts +49 -35
  42. package/lib/api/api/inference.js +146 -6
  43. package/lib/api/api/inference.js.map +1 -1
  44. package/lib/api/api/info.d.ts +1 -1
  45. package/lib/api/api/ingest.d.ts +9 -9
  46. package/lib/api/api/knn_search.d.ts +1 -1
  47. package/lib/api/api/license.d.ts +7 -7
  48. package/lib/api/api/logstash.d.ts +3 -3
  49. package/lib/api/api/mget.d.ts +1 -1
  50. package/lib/api/api/migration.d.ts +3 -3
  51. package/lib/api/api/ml.d.ts +72 -72
  52. package/lib/api/api/msearch.d.ts +1 -1
  53. package/lib/api/api/msearch_template.d.ts +1 -1
  54. package/lib/api/api/mtermvectors.d.ts +1 -1
  55. package/lib/api/api/nodes.d.ts +7 -7
  56. package/lib/api/api/open_point_in_time.d.ts +1 -1
  57. package/lib/api/api/ping.d.ts +1 -1
  58. package/lib/api/api/profiling.d.ts +4 -4
  59. package/lib/api/api/put_script.d.ts +1 -1
  60. package/lib/api/api/query_rules.d.ts +8 -8
  61. package/lib/api/api/rank_eval.d.ts +1 -1
  62. package/lib/api/api/reindex.d.ts +1 -1
  63. package/lib/api/api/reindex_rethrottle.d.ts +1 -1
  64. package/lib/api/api/render_search_template.d.ts +1 -1
  65. package/lib/api/api/rollup.d.ts +8 -8
  66. package/lib/api/api/scroll.d.ts +1 -1
  67. package/lib/api/api/search.d.ts +1 -1
  68. package/lib/api/api/search_application.d.ts +10 -10
  69. package/lib/api/api/search_mvt.d.ts +1 -1
  70. package/lib/api/api/search_shards.d.ts +1 -1
  71. package/lib/api/api/search_template.d.ts +1 -1
  72. package/lib/api/api/searchable_snapshots.d.ts +4 -4
  73. package/lib/api/api/security.d.ts +64 -64
  74. package/lib/api/api/shutdown.d.ts +3 -3
  75. package/lib/api/api/simulate.d.ts +1 -1
  76. package/lib/api/api/slm.d.ts +9 -9
  77. package/lib/api/api/snapshot.d.ts +14 -14
  78. package/lib/api/api/snapshot.js +1 -0
  79. package/lib/api/api/snapshot.js.map +1 -1
  80. package/lib/api/api/sql.d.ts +6 -6
  81. package/lib/api/api/ssl.d.ts +1 -1
  82. package/lib/api/api/streams.d.ts +41 -0
  83. package/lib/api/api/streams.js +132 -0
  84. package/lib/api/api/streams.js.map +1 -0
  85. package/lib/api/api/synonyms.d.ts +7 -7
  86. package/lib/api/api/synonyms.js +9 -3
  87. package/lib/api/api/synonyms.js.map +1 -1
  88. package/lib/api/api/tasks.d.ts +3 -3
  89. package/lib/api/api/terms_enum.d.ts +1 -1
  90. package/lib/api/api/termvectors.d.ts +1 -1
  91. package/lib/api/api/text_structure.d.ts +4 -4
  92. package/lib/api/api/transform.d.ts +12 -12
  93. package/lib/api/api/update.d.ts +1 -1
  94. package/lib/api/api/update_by_query.d.ts +1 -1
  95. package/lib/api/api/update_by_query_rethrottle.d.ts +1 -1
  96. package/lib/api/api/watcher.d.ts +13 -13
  97. package/lib/api/api/xpack.d.ts +2 -2
  98. package/lib/api/index.d.ts +4 -0
  99. package/lib/api/index.js +14 -2
  100. package/lib/api/index.js.map +1 -1
  101. package/lib/api/types.d.ts +977 -59
  102. package/lib/client.d.ts +1 -1
  103. package/package.json +13 -13
  104. package/renovate.json +4 -2
@@ -3190,7 +3190,7 @@ export interface SearchFieldSuggester {
3190
3190
  }
3191
3191
  export interface SearchHighlight extends SearchHighlightBase {
3192
3192
  encoder?: SearchHighlighterEncoder;
3193
- fields: Record<Field, SearchHighlightField>;
3193
+ fields: Partial<Record<Field, SearchHighlightField>> | Partial<Record<Field, SearchHighlightField>>[];
3194
3194
  }
3195
3195
  export interface SearchHighlightBase {
3196
3196
  type?: SearchHighlighterType;
@@ -3632,11 +3632,20 @@ export interface SearchSmoothingModelContainer {
3632
3632
  export type SearchSourceConfig = boolean | SearchSourceFilter | Fields;
3633
3633
  export type SearchSourceConfigParam = boolean | Fields;
3634
3634
  export interface SearchSourceFilter {
3635
+ /** If `true`, vector fields are excluded from the returned source.
3636
+ *
3637
+ * This option takes precedence over `includes`: any vector field will
3638
+ * remain excluded even if it matches an `includes` rule. */
3639
+ exclude_vectors?: boolean;
3640
+ /** A list of fields to exclude from the returned source. */
3635
3641
  excludes?: Fields;
3636
- /** @alias excludes */
3642
+ /** A list of fields to exclude from the returned source.
3643
+ * @alias excludes */
3637
3644
  exclude?: Fields;
3645
+ /** A list of fields to include in the returned source. */
3638
3646
  includes?: Fields;
3639
- /** @alias includes */
3647
+ /** A list of fields to include in the returned source.
3648
+ * @alias includes */
3640
3649
  include?: Fields;
3641
3650
  }
3642
3651
  export type SearchStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram';
@@ -4883,6 +4892,8 @@ export interface IndexingStats {
4883
4892
  index_failed: long;
4884
4893
  types?: Record<string, IndexingStats>;
4885
4894
  write_load?: double;
4895
+ recent_write_load?: double;
4896
+ peak_write_load?: double;
4886
4897
  }
4887
4898
  export type Indices = IndexName | IndexName[];
4888
4899
  export interface IndicesOptions {
@@ -4934,8 +4945,7 @@ export interface KnnQuery extends QueryDslQueryBase {
4934
4945
  filter?: QueryDslQueryContainer | QueryDslQueryContainer[];
4935
4946
  /** The minimum similarity for a vector to be considered a match */
4936
4947
  similarity?: float;
4937
- /** Apply oversampling and rescoring to quantized vectors *
4938
- * @experimental */
4948
+ /** Apply oversampling and rescoring to quantized vectors */
4939
4949
  rescore_vector?: RescoreVector;
4940
4950
  }
4941
4951
  export interface KnnRetriever extends RetrieverBase {
@@ -4951,8 +4961,7 @@ export interface KnnRetriever extends RetrieverBase {
4951
4961
  num_candidates: integer;
4952
4962
  /** The minimum similarity required for a document to be considered a match. */
4953
4963
  similarity?: float;
4954
- /** Apply oversampling and rescoring to quantized vectors *
4955
- * @experimental */
4964
+ /** Apply oversampling and rescoring to quantized vectors */
4956
4965
  rescore_vector?: RescoreVector;
4957
4966
  }
4958
4967
  export interface KnnSearch {
@@ -4974,8 +4983,7 @@ export interface KnnSearch {
4974
4983
  similarity?: float;
4975
4984
  /** If defined, each search hit will contain inner hits. */
4976
4985
  inner_hits?: SearchInnerHits;
4977
- /** Apply oversampling and rescoring to quantized vectors *
4978
- * @experimental */
4986
+ /** Apply oversampling and rescoring to quantized vectors */
4979
4987
  rescore_vector?: RescoreVector;
4980
4988
  }
4981
4989
  export interface LatLonGeoLocation {
@@ -4990,6 +4998,9 @@ export interface LinearRetriever extends RetrieverBase {
4990
4998
  /** Inner retrievers. */
4991
4999
  retrievers?: InnerRetriever[];
4992
5000
  rank_window_size?: integer;
5001
+ query?: string;
5002
+ fields?: string[];
5003
+ normalizer?: ScoreNormalizer;
4993
5004
  }
4994
5005
  export type MapboxVectorTiles = ArrayBuffer;
4995
5006
  export interface MergesStats {
@@ -5115,6 +5126,8 @@ export interface RRFRetriever extends RetrieverBase {
5115
5126
  rank_constant?: integer;
5116
5127
  /** This value determines the size of the individual result sets per query. */
5117
5128
  rank_window_size?: integer;
5129
+ query?: string;
5130
+ fields?: string[];
5118
5131
  }
5119
5132
  export interface RankBase {
5120
5133
  }
@@ -5211,7 +5224,7 @@ export interface RuleRetriever extends RetrieverBase {
5211
5224
  rank_window_size?: integer;
5212
5225
  }
5213
5226
  export type ScalarValue = long | double | string | boolean | null;
5214
- export type ScoreNormalizer = 'none' | 'minmax';
5227
+ export type ScoreNormalizer = 'none' | 'minmax' | 'l2_norm';
5215
5228
  export interface ScoreSort {
5216
5229
  order?: SortOrder;
5217
5230
  }
@@ -5267,6 +5280,7 @@ export interface SearchStats {
5267
5280
  suggest_time?: Duration;
5268
5281
  suggest_time_in_millis: DurationValue<UnitMillis>;
5269
5282
  suggest_total: long;
5283
+ recent_search_load?: double;
5270
5284
  groups?: Record<string, SearchStats>;
5271
5285
  }
5272
5286
  export interface SearchTransform {
@@ -5417,7 +5431,7 @@ export interface TaskFailure {
5417
5431
  status: string;
5418
5432
  reason: ErrorCause;
5419
5433
  }
5420
- export type TaskId = string | integer;
5434
+ export type TaskId = string;
5421
5435
  export interface TextEmbedding {
5422
5436
  model_id: string;
5423
5437
  model_text: string;
@@ -5438,6 +5452,14 @@ export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem';
5438
5452
  export type TimeOfDay = string;
5439
5453
  export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd';
5440
5454
  export type TimeZone = string;
5455
+ export interface TokenPruningConfig {
5456
+ /** Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */
5457
+ tokens_freq_ratio_threshold?: integer;
5458
+ /** Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */
5459
+ tokens_weight_threshold?: float;
5460
+ /** Whether to only score pruned tokens, vs only scoring kept tokens. */
5461
+ only_score_pruned_tokens?: boolean;
5462
+ }
5441
5463
  export interface TopLeftBottomRightGeoBounds {
5442
5464
  top_left: GeoLocation;
5443
5465
  bottom_right: GeoLocation;
@@ -5779,6 +5801,9 @@ export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase
5779
5801
  export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase {
5780
5802
  /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */
5781
5803
  compression?: double;
5804
+ /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases).
5805
+ * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */
5806
+ execution_hint?: AggregationsTDigestExecutionHint;
5782
5807
  }
5783
5808
  export interface AggregationsBucketAggregationBase {
5784
5809
  }
@@ -6525,6 +6550,9 @@ export interface AggregationsMedianAbsoluteDeviationAggregate extends Aggregatio
6525
6550
  export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase {
6526
6551
  /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */
6527
6552
  compression?: double;
6553
+ /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases).
6554
+ * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */
6555
+ execution_hint?: AggregationsTDigestExecutionHint;
6528
6556
  }
6529
6557
  export interface AggregationsMetricAggregationBase {
6530
6558
  /** The field on which to run the aggregation. */
@@ -6989,7 +7017,11 @@ export interface AggregationsSumBucketAggregation extends AggregationsPipelineAg
6989
7017
  export interface AggregationsTDigest {
6990
7018
  /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */
6991
7019
  compression?: integer;
7020
+ /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases).
7021
+ * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */
7022
+ execution_hint?: AggregationsTDigestExecutionHint;
6992
7023
  }
7024
+ export type AggregationsTDigestExecutionHint = 'default' | 'high_accuracy';
6993
7025
  export interface AggregationsTDigestPercentileRanksAggregate extends AggregationsPercentilesAggregateBase {
6994
7026
  }
6995
7027
  export interface AggregationsTDigestPercentilesAggregate extends AggregationsPercentilesAggregateBase {
@@ -8200,6 +8232,21 @@ export interface MappingByteNumberProperty extends MappingNumberPropertyBase {
8200
8232
  type: 'byte';
8201
8233
  null_value?: byte;
8202
8234
  }
8235
+ export interface MappingChunkingSettings {
8236
+ /** The chunking strategy: `sentence` or `word`. */
8237
+ strategy: string;
8238
+ /** The maximum size of a chunk in words.
8239
+ * This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). */
8240
+ max_chunk_size: integer;
8241
+ /** The number of overlapping words for chunks.
8242
+ * It is applicable only to a `word` chunking strategy.
8243
+ * This value cannot be higher than half the `max_chunk_size` value. */
8244
+ overlap?: integer;
8245
+ /** The number of overlapping sentences for chunks.
8246
+ * It is applicable only for a `sentence` chunking strategy.
8247
+ * It can be either `1` or `0`. */
8248
+ sentence_overlap?: integer;
8249
+ }
8203
8250
  export interface MappingCompletionProperty extends MappingDocValuesPropertyBase {
8204
8251
  analyzer?: string;
8205
8252
  contexts?: MappingSuggestContext[];
@@ -8279,6 +8326,15 @@ export interface MappingDenseVectorIndexOptions {
8279
8326
  m?: integer;
8280
8327
  /** The type of kNN algorithm to use. */
8281
8328
  type: MappingDenseVectorIndexOptionsType;
8329
+ /** The rescore vector options. This is only applicable to `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`, `bbq_flat`, `int4_flat`, and `int8_flat` index types. */
8330
+ rescore_vector?: MappingDenseVectorIndexOptionsRescoreVector;
8331
+ }
8332
+ export interface MappingDenseVectorIndexOptionsRescoreVector {
8333
+ /** The oversampling factor to use when searching for the nearest neighbor. This is only applicable to the quantized formats: `bbq_*`, `int4_*`, and `int8_*`.
8334
+ * When provided, `oversample * k` vectors will be gathered and then their scores will be re-computed with the original vectors.
8335
+ *
8336
+ * valid values are between `1.0` and `10.0` (inclusive), or `0` exactly to disable oversampling. */
8337
+ oversample: float;
8282
8338
  }
8283
8339
  export type MappingDenseVectorIndexOptionsType = 'bbq_flat' | 'bbq_hnsw' | 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw';
8284
8340
  export interface MappingDenseVectorProperty extends MappingPropertyBase {
@@ -8635,6 +8691,10 @@ export interface MappingSemanticTextProperty {
8635
8691
  * You can update this parameter by using the Update mapping API. Use the Create inference API to create the endpoint.
8636
8692
  * If not specified, the inference endpoint defined by inference_id will be used at both index and query time. */
8637
8693
  search_inference_id?: Id;
8694
+ /** Settings for chunking text into smaller passages. If specified, these will override the
8695
+ * chunking settings sent in the inference endpoint associated with inference_id. If chunking settings are updated,
8696
+ * they will not be applied to existing documents until they are reindexed. */
8697
+ chunking_settings?: MappingChunkingSettings;
8638
8698
  }
8639
8699
  export interface MappingShapeProperty extends MappingDocValuesPropertyBase {
8640
8700
  coerce?: boolean;
@@ -8659,9 +8719,23 @@ export interface MappingSourceField {
8659
8719
  mode?: MappingSourceFieldMode;
8660
8720
  }
8661
8721
  export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic';
8722
+ export interface MappingSparseVectorIndexOptions {
8723
+ /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance.
8724
+ * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used.
8725
+ * Default: false */
8726
+ prune?: boolean;
8727
+ /** Optional pruning configuration.
8728
+ * If enabled, this will omit non-significant tokens from the query in order to improve query performance.
8729
+ * This is only used if prune is set to true.
8730
+ * If prune is set to true but pruning_config is not specified, default values will be used. */
8731
+ pruning_config?: TokenPruningConfig;
8732
+ }
8662
8733
  export interface MappingSparseVectorProperty extends MappingPropertyBase {
8663
8734
  store?: boolean;
8664
8735
  type: 'sparse_vector';
8736
+ /** Additional index options for the sparse vector field that controls the
8737
+ * token pruning behavior of the sparse vector field. */
8738
+ index_options?: MappingSparseVectorIndexOptions;
8665
8739
  }
8666
8740
  export type MappingSubobjects = boolean | 'true' | 'false' | 'auto';
8667
8741
  export interface MappingSuggestContext {
@@ -9855,15 +9929,13 @@ export interface QueryDslSparseVectorQuery extends QueryDslQueryBase {
9855
9929
  query?: string;
9856
9930
  /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance.
9857
9931
  * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used.
9858
- * Default: false
9859
- * @experimental */
9932
+ * Default: false */
9860
9933
  prune?: boolean;
9861
9934
  /** Optional pruning configuration.
9862
9935
  * If enabled, this will omit non-significant tokens from the query in order to improve query performance.
9863
9936
  * This is only used if prune is set to true.
9864
- * If prune is set to true but pruning_config is not specified, default values will be used.
9865
- * @experimental */
9866
- pruning_config?: QueryDslTokenPruningConfig;
9937
+ * If prune is set to true but pruning_config is not specified, default values will be used. */
9938
+ pruning_config?: TokenPruningConfig;
9867
9939
  }
9868
9940
  export interface QueryDslTermQuery extends QueryDslQueryBase {
9869
9941
  /** Term you wish to find in the provided field. */
@@ -9903,17 +9975,9 @@ export interface QueryDslTextExpansionQuery extends QueryDslQueryBase {
9903
9975
  model_text: string;
9904
9976
  /** Token pruning configurations
9905
9977
  * @experimental */
9906
- pruning_config?: QueryDslTokenPruningConfig;
9978
+ pruning_config?: TokenPruningConfig;
9907
9979
  }
9908
9980
  export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix';
9909
- export interface QueryDslTokenPruningConfig {
9910
- /** Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */
9911
- tokens_freq_ratio_threshold?: integer;
9912
- /** Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */
9913
- tokens_weight_threshold?: float;
9914
- /** Whether to only score pruned tokens, vs only scoring kept tokens. */
9915
- only_score_pruned_tokens?: boolean;
9916
- }
9917
9981
  export interface QueryDslTypeQuery extends QueryDslQueryBase {
9918
9982
  value: string;
9919
9983
  }
@@ -9932,9 +9996,9 @@ export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase<any> {
9932
9996
  }
9933
9997
  export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase {
9934
9998
  /** The tokens representing this query */
9935
- tokens: Record<string, float>;
9999
+ tokens: Record<string, float> | Record<string, float>[];
9936
10000
  /** Token pruning configurations */
9937
- pruning_config?: QueryDslTokenPruningConfig;
10001
+ pruning_config?: TokenPruningConfig;
9938
10002
  }
9939
10003
  export interface QueryDslWildcardQuery extends QueryDslQueryBase {
9940
10004
  /** Allows case insensitive matching of the pattern with the indexed field values when set to true. Default is false which means the case sensitivity of matching depends on the underlying field’s mapping. */
@@ -16355,6 +16419,7 @@ export interface ClusterComponentTemplateSummary {
16355
16419
  mappings?: MappingTypeMapping;
16356
16420
  aliases?: Record<string, IndicesAliasDefinition>;
16357
16421
  lifecycle?: IndicesDataStreamLifecycleWithRollover;
16422
+ data_stream_options?: IndicesDataStreamOptionsTemplate | null;
16358
16423
  }
16359
16424
  export interface ClusterAllocationExplainAllocationDecision {
16360
16425
  decider: string;
@@ -19258,6 +19323,11 @@ export type EsqlTableValuesKeywordValue = string | string[];
19258
19323
  export type EsqlTableValuesLongDouble = double | double[];
19259
19324
  export type EsqlTableValuesLongValue = long | long[];
19260
19325
  export interface EsqlAsyncQueryRequest extends RequestBase {
19326
+ /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards.
19327
+ * If `false`, the query will fail if there are any failures.
19328
+ *
19329
+ * To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. */
19330
+ allow_partial_results?: boolean;
19261
19331
  /** The character to use between values within a CSV row.
19262
19332
  * It is valid only for the CSV format. */
19263
19333
  delimiter?: string;
@@ -19309,6 +19379,7 @@ export interface EsqlAsyncQueryRequest extends RequestBase {
19309
19379
  body?: string | ({
19310
19380
  [key: string]: any;
19311
19381
  } & {
19382
+ allow_partial_results?: never;
19312
19383
  delimiter?: never;
19313
19384
  drop_null_columns?: never;
19314
19385
  format?: never;
@@ -19328,6 +19399,7 @@ export interface EsqlAsyncQueryRequest extends RequestBase {
19328
19399
  querystring?: {
19329
19400
  [key: string]: any;
19330
19401
  } & {
19402
+ allow_partial_results?: never;
19331
19403
  delimiter?: never;
19332
19404
  drop_null_columns?: never;
19333
19405
  format?: never;
@@ -19428,6 +19500,51 @@ export interface EsqlAsyncQueryStopRequest extends RequestBase {
19428
19500
  };
19429
19501
  }
19430
19502
  export type EsqlAsyncQueryStopResponse = EsqlEsqlResult;
19503
+ export interface EsqlGetQueryRequest extends RequestBase {
19504
+ /** The query ID */
19505
+ id: Id;
19506
+ /** All values in `body` will be added to the request body. */
19507
+ body?: string | ({
19508
+ [key: string]: any;
19509
+ } & {
19510
+ id?: never;
19511
+ });
19512
+ /** All values in `querystring` will be added to the request querystring. */
19513
+ querystring?: {
19514
+ [key: string]: any;
19515
+ } & {
19516
+ id?: never;
19517
+ };
19518
+ }
19519
+ export interface EsqlGetQueryResponse {
19520
+ id: long;
19521
+ node: NodeId;
19522
+ start_time_millis: long;
19523
+ running_time_nanos: long;
19524
+ query: string;
19525
+ coordinating_node: NodeId;
19526
+ data_nodes: NodeId[];
19527
+ }
19528
+ export interface EsqlListQueriesBody {
19529
+ id: long;
19530
+ node: NodeId;
19531
+ start_time_millis: long;
19532
+ running_time_nanos: long;
19533
+ query: string;
19534
+ }
19535
+ export interface EsqlListQueriesRequest extends RequestBase {
19536
+ /** All values in `body` will be added to the request body. */
19537
+ body?: string | {
19538
+ [key: string]: any;
19539
+ };
19540
+ /** All values in `querystring` will be added to the request querystring. */
19541
+ querystring?: {
19542
+ [key: string]: any;
19543
+ };
19544
+ }
19545
+ export interface EsqlListQueriesResponse {
19546
+ queries: Record<TaskId, EsqlListQueriesBody>;
19547
+ }
19431
19548
  export interface EsqlQueryRequest extends RequestBase {
19432
19549
  /** A short version of the Accept header, e.g. json, yaml.
19433
19550
  *
@@ -19438,6 +19555,11 @@ export interface EsqlQueryRequest extends RequestBase {
19438
19555
  /** Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results?
19439
19556
  * Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. */
19440
19557
  drop_null_columns?: boolean;
19558
+ /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards.
19559
+ * If `false`, the query will fail if there are any failures.
19560
+ *
19561
+ * To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. */
19562
+ allow_partial_results?: boolean;
19441
19563
  /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */
19442
19564
  columnar?: boolean;
19443
19565
  /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */
@@ -19466,6 +19588,7 @@ export interface EsqlQueryRequest extends RequestBase {
19466
19588
  format?: never;
19467
19589
  delimiter?: never;
19468
19590
  drop_null_columns?: never;
19591
+ allow_partial_results?: never;
19469
19592
  columnar?: never;
19470
19593
  filter?: never;
19471
19594
  locale?: never;
@@ -19482,6 +19605,7 @@ export interface EsqlQueryRequest extends RequestBase {
19482
19605
  format?: never;
19483
19606
  delimiter?: never;
19484
19607
  drop_null_columns?: never;
19608
+ allow_partial_results?: never;
19485
19609
  columnar?: never;
19486
19610
  filter?: never;
19487
19611
  locale?: never;
@@ -20176,6 +20300,7 @@ export interface IlmExplainLifecycleLifecycleExplainManaged {
20176
20300
  step_time_millis?: EpochTime<UnitMillis>;
20177
20301
  phase_execution?: IlmExplainLifecycleLifecycleExplainPhaseExecution;
20178
20302
  time_since_index_creation?: Duration;
20303
+ skip: boolean;
20179
20304
  }
20180
20305
  export interface IlmExplainLifecycleLifecycleExplainPhaseExecution {
20181
20306
  phase_definition?: IlmPhase;
@@ -20515,6 +20640,9 @@ export interface IndicesDataStream {
20515
20640
  replicated?: boolean;
20516
20641
  /** If `true`, the next write to this data stream will trigger a rollover first and the document will be indexed in the new backing index. If the rollover fails the indexing request will fail too. */
20517
20642
  rollover_on_write: boolean;
20643
+ /** The settings specific to this data stream that will take precedence over the settings in the matching index
20644
+ * template. */
20645
+ settings: IndicesIndexSettings;
20518
20646
  /** Health status of the data stream.
20519
20647
  * This health status is based on the state of the primary and replica shards of the stream’s backing indices. */
20520
20648
  status: HealthStatus;
@@ -20525,6 +20653,24 @@ export interface IndicesDataStream {
20525
20653
  template: Name;
20526
20654
  /** Information about the `@timestamp` field in the data stream. */
20527
20655
  timestamp_field: IndicesDataStreamTimestampField;
20656
+ /** The index mode for the data stream that will be used for newly created backing indices. */
20657
+ index_mode?: IndicesIndexMode;
20658
+ }
20659
+ export interface IndicesDataStreamFailureStore {
20660
+ /** If defined, it turns the failure store on/off (`true`/`false`) for this data stream. A data stream failure store
20661
+ * that's disabled (enabled: `false`) will redirect no new failed indices to the failure store; however, it will
20662
+ * not remove any existing data from the failure store. */
20663
+ enabled?: boolean;
20664
+ /** If defined, it specifies the lifecycle configuration for the failure store of this data stream. */
20665
+ lifecycle?: IndicesFailureStoreLifecycle;
20666
+ }
20667
+ export interface IndicesDataStreamFailureStoreTemplate {
20668
+ /** If defined, it turns the failure store on/off (`true`/`false`) for this data stream. A data stream failure store
20669
+ * that's disabled (enabled: `false`) will redirect no new failed indices to the failure store; however, it will
20670
+ * not remove any existing data from the failure store. */
20671
+ enabled?: boolean | null;
20672
+ /** If defined, it specifies the lifecycle configuration for the failure store of this data stream. */
20673
+ lifecycle?: IndicesFailureStoreLifecycleTemplate | null;
20528
20674
  }
20529
20675
  export interface IndicesDataStreamIndex {
20530
20676
  /** Name of the backing index. */
@@ -20537,6 +20683,8 @@ export interface IndicesDataStreamIndex {
20537
20683
  managed_by?: IndicesManagedBy;
20538
20684
  /** Indicates if ILM should take precedence over DSL in case both are configured to manage this index. */
20539
20685
  prefer_ilm?: boolean;
20686
+ /** The index mode of this backing index of the data stream. */
20687
+ index_mode?: IndicesIndexMode;
20540
20688
  }
20541
20689
  export interface IndicesDataStreamLifecycle {
20542
20690
  /** If defined, every document added to this data stream will be stored at least for this time frame.
@@ -20571,6 +20719,13 @@ export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStrea
20571
20719
  * The contents of this field are subject to change. */
20572
20720
  rollover?: IndicesDataStreamLifecycleRolloverConditions;
20573
20721
  }
20722
+ export interface IndicesDataStreamOptions {
20723
+ /** If defined, it specifies configuration for the failure store of this data stream. */
20724
+ failure_store?: IndicesDataStreamFailureStore;
20725
+ }
20726
+ export interface IndicesDataStreamOptionsTemplate {
20727
+ failure_store?: IndicesDataStreamFailureStoreTemplate | null;
20728
+ }
20574
20729
  export interface IndicesDataStreamTimestampField {
20575
20730
  /** Name of the timestamp field for the data stream, which must be `@timestamp`. The `@timestamp` field must be included in every document indexed to the data stream. */
20576
20731
  name: Field;
@@ -20594,12 +20749,31 @@ export interface IndicesFailureStore {
20594
20749
  indices: IndicesDataStreamIndex[];
20595
20750
  rollover_on_write: boolean;
20596
20751
  }
20752
+ export interface IndicesFailureStoreLifecycle {
20753
+ /** If defined, every document added to this data stream will be stored at least for this time frame.
20754
+ * Any time after this duration the document could be deleted.
20755
+ * When empty, every document in this data stream will be stored indefinitely. */
20756
+ data_retention?: Duration;
20757
+ /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle
20758
+ * that's disabled (enabled: `false`) will have no effect on the data stream. */
20759
+ enabled?: boolean;
20760
+ }
20761
+ export interface IndicesFailureStoreLifecycleTemplate {
20762
+ /** If defined, every document added to this data stream will be stored at least for this time frame.
20763
+ * Any time after this duration the document could be deleted.
20764
+ * When empty, every document in this data stream will be stored indefinitely. */
20765
+ data_retention?: Duration | null;
20766
+ /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle
20767
+ * that's disabled (enabled: `false`) will have no effect on the data stream. */
20768
+ enabled?: boolean;
20769
+ }
20597
20770
  export interface IndicesFielddataFrequencyFilter {
20598
20771
  max: double;
20599
20772
  min: double;
20600
20773
  min_segment_size: integer;
20601
20774
  }
20602
20775
  export type IndicesIndexCheckOnStartup = boolean | 'true' | 'false' | 'checksum';
20776
+ export type IndicesIndexMode = 'standard' | 'time_series' | 'logsdb' | 'lookup';
20603
20777
  export interface IndicesIndexRouting {
20604
20778
  allocation?: IndicesIndexRoutingAllocation;
20605
20779
  rebalance?: IndicesIndexRoutingRebalance;
@@ -20807,6 +20981,7 @@ export interface IndicesIndexTemplateSummary {
20807
20981
  /** Configuration options for the index. */
20808
20982
  settings?: IndicesIndexSettings;
20809
20983
  lifecycle?: IndicesDataStreamLifecycleWithRollover;
20984
+ data_stream_options?: IndicesDataStreamOptionsTemplate | null;
20810
20985
  }
20811
20986
  export interface IndicesIndexVersioning {
20812
20987
  created?: VersionString;
@@ -20832,6 +21007,7 @@ export interface IndicesIndexingSlowlogTresholds {
20832
21007
  * Log and the thresholds are configured in the same way as the search slowlog. */
20833
21008
  index?: IndicesSlowlogTresholdLevels;
20834
21009
  }
21010
+ export type IndicesIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write';
20835
21011
  export type IndicesManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged';
20836
21012
  export interface IndicesMappingLimitSettings {
20837
21013
  coerce?: boolean;
@@ -21035,8 +21211,7 @@ export interface IndicesTranslogRetention {
21035
21211
  * indices created in Elasticsearch versions 7.0.0 and later. */
21036
21212
  age?: Duration;
21037
21213
  }
21038
- export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write';
21039
- export interface IndicesAddBlockIndicesBlockStatus {
21214
+ export interface IndicesAddBlockAddIndicesBlockStatus {
21040
21215
  name: IndexName;
21041
21216
  blocked: boolean;
21042
21217
  }
@@ -21047,7 +21222,7 @@ export interface IndicesAddBlockRequest extends RequestBase {
21047
21222
  * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */
21048
21223
  index: IndexName;
21049
21224
  /** The block type to add to the index. */
21050
- block: IndicesAddBlockIndicesBlockOptions;
21225
+ block: IndicesIndicesBlockOptions;
21051
21226
  /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.
21052
21227
  * This behavior applies even if the request targets other open indices.
21053
21228
  * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */
@@ -21094,7 +21269,7 @@ export interface IndicesAddBlockRequest extends RequestBase {
21094
21269
  export interface IndicesAddBlockResponse {
21095
21270
  acknowledged: boolean;
21096
21271
  shards_acknowledged: boolean;
21097
- indices: IndicesAddBlockIndicesBlockStatus[];
21272
+ indices: IndicesAddBlockAddIndicesBlockStatus[];
21098
21273
  }
21099
21274
  export interface IndicesAnalyzeAnalyzeDetail {
21100
21275
  analyzer?: IndicesAnalyzeAnalyzerDetail;
@@ -21690,6 +21865,35 @@ export interface IndicesDeleteDataStreamRequest extends RequestBase {
21690
21865
  };
21691
21866
  }
21692
21867
  export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase;
21868
+ export interface IndicesDeleteDataStreamOptionsRequest extends RequestBase {
21869
+ /** A comma-separated list of data streams of which the data stream options will be deleted; use `*` to get all data streams */
21870
+ name: DataStreamNames;
21871
+ /** Whether wildcard expressions should get expanded to open or closed indices (default: open) */
21872
+ expand_wildcards?: ExpandWildcards;
21873
+ /** Specify timeout for connection to master */
21874
+ master_timeout?: Duration;
21875
+ /** Explicit timestamp for the document */
21876
+ timeout?: Duration;
21877
+ /** All values in `body` will be added to the request body. */
21878
+ body?: string | ({
21879
+ [key: string]: any;
21880
+ } & {
21881
+ name?: never;
21882
+ expand_wildcards?: never;
21883
+ master_timeout?: never;
21884
+ timeout?: never;
21885
+ });
21886
+ /** All values in `querystring` will be added to the request querystring. */
21887
+ querystring?: {
21888
+ [key: string]: any;
21889
+ } & {
21890
+ name?: never;
21891
+ expand_wildcards?: never;
21892
+ master_timeout?: never;
21893
+ timeout?: never;
21894
+ };
21895
+ }
21896
+ export type IndicesDeleteDataStreamOptionsResponse = AcknowledgedResponseBase;
21693
21897
  export interface IndicesDeleteIndexTemplateRequest extends RequestBase {
21694
21898
  /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */
21695
21899
  name: Names;
@@ -22379,6 +22583,74 @@ export interface IndicesGetDataStreamRequest extends RequestBase {
22379
22583
  export interface IndicesGetDataStreamResponse {
22380
22584
  data_streams: IndicesDataStream[];
22381
22585
  }
22586
+ export interface IndicesGetDataStreamOptionsDataStreamWithOptions {
22587
+ name: DataStreamName;
22588
+ options?: IndicesDataStreamOptions;
22589
+ }
22590
+ export interface IndicesGetDataStreamOptionsRequest extends RequestBase {
22591
+ /** Comma-separated list of data streams to limit the request.
22592
+ * Supports wildcards (`*`).
22593
+ * To target all data streams, omit this parameter or use `*` or `_all`. */
22594
+ name: DataStreamNames;
22595
+ /** Type of data stream that wildcard patterns can match.
22596
+ * Supports comma-separated values, such as `open,hidden`. */
22597
+ expand_wildcards?: ExpandWildcards;
22598
+ /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */
22599
+ master_timeout?: Duration;
22600
+ /** All values in `body` will be added to the request body. */
22601
+ body?: string | ({
22602
+ [key: string]: any;
22603
+ } & {
22604
+ name?: never;
22605
+ expand_wildcards?: never;
22606
+ master_timeout?: never;
22607
+ });
22608
+ /** All values in `querystring` will be added to the request querystring. */
22609
+ querystring?: {
22610
+ [key: string]: any;
22611
+ } & {
22612
+ name?: never;
22613
+ expand_wildcards?: never;
22614
+ master_timeout?: never;
22615
+ };
22616
+ }
22617
+ export interface IndicesGetDataStreamOptionsResponse {
22618
+ data_streams: IndicesGetDataStreamOptionsDataStreamWithOptions[];
22619
+ }
22620
+ export interface IndicesGetDataStreamSettingsDataStreamSettings {
22621
+ /** The name of the data stream. */
22622
+ name: string;
22623
+ /** The settings specific to this data stream */
22624
+ settings: IndicesIndexSettings;
22625
+ /** The settings specific to this data stream merged with the settings from its template. These `effective_settings`
22626
+ * are the settings that will be used when a new index is created for this data stream. */
22627
+ effective_settings: IndicesIndexSettings;
22628
+ }
22629
+ export interface IndicesGetDataStreamSettingsRequest extends RequestBase {
22630
+ /** A comma-separated list of data streams or data stream patterns. Supports wildcards (`*`). */
22631
+ name: Indices;
22632
+ /** The period to wait for a connection to the master node. If no response is
22633
+ * received before the timeout expires, the request fails and returns an
22634
+ * error. */
22635
+ master_timeout?: Duration;
22636
+ /** All values in `body` will be added to the request body. */
22637
+ body?: string | ({
22638
+ [key: string]: any;
22639
+ } & {
22640
+ name?: never;
22641
+ master_timeout?: never;
22642
+ });
22643
+ /** All values in `querystring` will be added to the request querystring. */
22644
+ querystring?: {
22645
+ [key: string]: any;
22646
+ } & {
22647
+ name?: never;
22648
+ master_timeout?: never;
22649
+ };
22650
+ }
22651
+ export interface IndicesGetDataStreamSettingsResponse {
22652
+ data_streams: IndicesGetDataStreamSettingsDataStreamSettings[];
22653
+ }
22382
22654
  export interface IndicesGetFieldMappingRequest extends RequestBase {
22383
22655
  /** Comma-separated list or wildcard expression of fields used to limit returned information.
22384
22656
  * Supports wildcards (`*`). */
@@ -22398,8 +22670,6 @@ export interface IndicesGetFieldMappingRequest extends RequestBase {
22398
22670
  ignore_unavailable?: boolean;
22399
22671
  /** If `true`, return all default settings in the response. */
22400
22672
  include_defaults?: boolean;
22401
- /** If `true`, the request retrieves information from the local node only. */
22402
- local?: boolean;
22403
22673
  /** All values in `body` will be added to the request body. */
22404
22674
  body?: string | ({
22405
22675
  [key: string]: any;
@@ -22410,7 +22680,6 @@ export interface IndicesGetFieldMappingRequest extends RequestBase {
22410
22680
  expand_wildcards?: never;
22411
22681
  ignore_unavailable?: never;
22412
22682
  include_defaults?: never;
22413
- local?: never;
22414
22683
  });
22415
22684
  /** All values in `querystring` will be added to the request querystring. */
22416
22685
  querystring?: {
@@ -22422,7 +22691,6 @@ export interface IndicesGetFieldMappingRequest extends RequestBase {
22422
22691
  expand_wildcards?: never;
22423
22692
  ignore_unavailable?: never;
22424
22693
  include_defaults?: never;
22425
- local?: never;
22426
22694
  };
22427
22695
  }
22428
22696
  export type IndicesGetFieldMappingResponse = Record<IndexName, IndicesGetFieldMappingTypeFieldMappings>;
@@ -22917,6 +23185,114 @@ export interface IndicesPutDataLifecycleRequest extends RequestBase {
22917
23185
  };
22918
23186
  }
22919
23187
  export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase;
23188
+ export interface IndicesPutDataStreamOptionsRequest extends RequestBase {
23189
+ /** Comma-separated list of data streams used to limit the request.
23190
+ * Supports wildcards (`*`).
23191
+ * To target all data streams use `*` or `_all`. */
23192
+ name: DataStreamNames;
23193
+ /** Type of data stream that wildcard patterns can match.
23194
+ * Supports comma-separated values, such as `open,hidden`. */
23195
+ expand_wildcards?: ExpandWildcards;
23196
+ /** Period to wait for a connection to the master node. If no response is
23197
+ * received before the timeout expires, the request fails and returns an
23198
+ * error. */
23199
+ master_timeout?: Duration;
23200
+ /** Period to wait for a response.
23201
+ * If no response is received before the timeout expires, the request fails and returns an error. */
23202
+ timeout?: Duration;
23203
+ /** If defined, it will update the failure store configuration of every data stream resolved by the name expression. */
23204
+ failure_store?: IndicesDataStreamFailureStore;
23205
+ /** All values in `body` will be added to the request body. */
23206
+ body?: string | ({
23207
+ [key: string]: any;
23208
+ } & {
23209
+ name?: never;
23210
+ expand_wildcards?: never;
23211
+ master_timeout?: never;
23212
+ timeout?: never;
23213
+ failure_store?: never;
23214
+ });
23215
+ /** All values in `querystring` will be added to the request querystring. */
23216
+ querystring?: {
23217
+ [key: string]: any;
23218
+ } & {
23219
+ name?: never;
23220
+ expand_wildcards?: never;
23221
+ master_timeout?: never;
23222
+ timeout?: never;
23223
+ failure_store?: never;
23224
+ };
23225
+ }
23226
+ export type IndicesPutDataStreamOptionsResponse = AcknowledgedResponseBase;
23227
+ export interface IndicesPutDataStreamSettingsDataStreamSettingsError {
23228
+ index: IndexName;
23229
+ /** A message explaining why the settings could not be applied to specific indices. */
23230
+ error: string;
23231
+ }
23232
+ export interface IndicesPutDataStreamSettingsIndexSettingResults {
23233
+ /** The list of settings that were applied to the data stream but not to backing indices. These will be applied to
23234
+ * the write index the next time the data stream is rolled over. */
23235
+ applied_to_data_stream_only: string[];
23236
+ /** The list of settings that were applied to the data stream and to all of its backing indices. These settings will
23237
+ * also be applied to the write index the next time the data stream is rolled over. */
23238
+ applied_to_data_stream_and_backing_indices: string[];
23239
+ errors?: IndicesPutDataStreamSettingsDataStreamSettingsError[];
23240
+ }
23241
+ export interface IndicesPutDataStreamSettingsRequest extends RequestBase {
23242
+ /** A comma-separated list of data streams or data stream patterns. */
23243
+ name: Indices;
23244
+ /** If `true`, the request does not actually change the settings on any data streams or indices. Instead, it
23245
+ * simulates changing the settings and reports back to the user what would have happened had these settings
23246
+ * actually been applied. */
23247
+ dry_run?: boolean;
23248
+ /** The period to wait for a connection to the master node. If no response is
23249
+ * received before the timeout expires, the request fails and returns an
23250
+ * error. */
23251
+ master_timeout?: Duration;
23252
+ /** The period to wait for a response. If no response is received before the
23253
+ * timeout expires, the request fails and returns an error. */
23254
+ timeout?: Duration;
23255
+ settings?: IndicesIndexSettings;
23256
+ /** All values in `body` will be added to the request body. */
23257
+ body?: string | ({
23258
+ [key: string]: any;
23259
+ } & {
23260
+ name?: never;
23261
+ dry_run?: never;
23262
+ master_timeout?: never;
23263
+ timeout?: never;
23264
+ settings?: never;
23265
+ });
23266
+ /** All values in `querystring` will be added to the request querystring. */
23267
+ querystring?: {
23268
+ [key: string]: any;
23269
+ } & {
23270
+ name?: never;
23271
+ dry_run?: never;
23272
+ master_timeout?: never;
23273
+ timeout?: never;
23274
+ settings?: never;
23275
+ };
23276
+ }
23277
+ export interface IndicesPutDataStreamSettingsResponse {
23278
+ data_streams: IndicesPutDataStreamSettingsUpdatedDataStreamSettings[];
23279
+ }
23280
+ export interface IndicesPutDataStreamSettingsUpdatedDataStreamSettings {
23281
+ /** The data stream name. */
23282
+ name: IndexName;
23283
+ /** If the settings were successfully applied to the data stream (or would have been, if running in `dry_run`
23284
+ * mode), it is `true`. If an error occurred, it is `false`. */
23285
+ applied_to_data_stream: boolean;
23286
+ /** A message explaining why the settings could not be applied to the data stream. */
23287
+ error?: string;
23288
+ /** The settings that are specfic to this data stream that will override any settings from the matching index template. */
23289
+ settings: IndicesIndexSettings;
23290
+ /** The settings that are effective on this data stream, taking into account the settings from the matching index
23291
+ * template and the settings specific to this data stream. */
23292
+ effective_settings: IndicesIndexSettings;
23293
+ /** Information about whether and where each setting was applied. */
23294
+ index_settings_results: IndicesPutDataStreamSettingsIndexSettingResults;
23295
+ }
22920
23296
  export interface IndicesPutIndexTemplateIndexTemplateMapping {
22921
23297
  /** Aliases to add.
22922
23298
  * If the index template includes a `data_stream` object, these are data stream aliases.
@@ -23450,6 +23826,66 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase {
23450
23826
  };
23451
23827
  }
23452
23828
  export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult;
23829
+ export interface IndicesRemoveBlockRemoveIndicesBlockStatus {
23830
+ name: IndexName;
23831
+ unblocked?: boolean;
23832
+ exception?: ErrorCause;
23833
+ }
23834
+ export interface IndicesRemoveBlockRequest extends RequestBase {
23835
+ /** A comma-separated list or wildcard expression of index names used to limit the request.
23836
+ * By default, you must explicitly name the indices you are removing blocks from.
23837
+ * To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`.
23838
+ * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */
23839
+ index: IndexName;
23840
+ /** The block type to remove from the index. */
23841
+ block: IndicesIndicesBlockOptions;
23842
+ /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.
23843
+ * This behavior applies even if the request targets other open indices.
23844
+ * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */
23845
+ allow_no_indices?: boolean;
23846
+ /** The type of index that wildcard patterns can match.
23847
+ * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
23848
+ * It supports comma-separated values, such as `open,hidden`. */
23849
+ expand_wildcards?: ExpandWildcards;
23850
+ /** If `false`, the request returns an error if it targets a missing or closed index. */
23851
+ ignore_unavailable?: boolean;
23852
+ /** The period to wait for the master node.
23853
+ * If the master node is not available before the timeout expires, the request fails and returns an error.
23854
+ * It can also be set to `-1` to indicate that the request should never timeout. */
23855
+ master_timeout?: Duration;
23856
+ /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata.
23857
+ * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged.
23858
+ * It can also be set to `-1` to indicate that the request should never timeout. */
23859
+ timeout?: Duration;
23860
+ /** All values in `body` will be added to the request body. */
23861
+ body?: string | ({
23862
+ [key: string]: any;
23863
+ } & {
23864
+ index?: never;
23865
+ block?: never;
23866
+ allow_no_indices?: never;
23867
+ expand_wildcards?: never;
23868
+ ignore_unavailable?: never;
23869
+ master_timeout?: never;
23870
+ timeout?: never;
23871
+ });
23872
+ /** All values in `querystring` will be added to the request querystring. */
23873
+ querystring?: {
23874
+ [key: string]: any;
23875
+ } & {
23876
+ index?: never;
23877
+ block?: never;
23878
+ allow_no_indices?: never;
23879
+ expand_wildcards?: never;
23880
+ ignore_unavailable?: never;
23881
+ master_timeout?: never;
23882
+ timeout?: never;
23883
+ };
23884
+ }
23885
+ export interface IndicesRemoveBlockResponse {
23886
+ acknowledged: boolean;
23887
+ indices: IndicesRemoveBlockRemoveIndicesBlockStatus[];
23888
+ }
23453
23889
  export interface IndicesResolveClusterRequest extends RequestBase {
23454
23890
  /** A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve.
23455
23891
  * Resources on remote clusters can be specified using the `<cluster>`:`<name>` syntax.
@@ -24518,6 +24954,51 @@ export interface InferenceAmazonBedrockTaskSettings {
24518
24954
  top_p?: float;
24519
24955
  }
24520
24956
  export type InferenceAmazonBedrockTaskType = 'completion' | 'text_embedding';
24957
+ export type InferenceAmazonSageMakerApi = 'openai' | 'elastic';
24958
+ export interface InferenceAmazonSageMakerServiceSettings {
24959
+ /** A valid AWS access key that has permissions to use Amazon SageMaker and access to models for invoking requests. */
24960
+ access_key: string;
24961
+ /** The name of the SageMaker endpoint. */
24962
+ endpoint_name: string;
24963
+ /** The API format to use when calling SageMaker.
24964
+ * Elasticsearch will convert the POST _inference request to this data format when invoking the SageMaker endpoint. */
24965
+ api: InferenceAmazonSageMakerApi;
24966
+ /** The region that your endpoint or Amazon Resource Name (ARN) is deployed in.
24967
+ * The list of available regions per model can be found in the Amazon SageMaker documentation. */
24968
+ region: string;
24969
+ /** A valid AWS secret key that is paired with the `access_key`.
24970
+ * For information about creating and managing access and secret keys, refer to the AWS documentation. */
24971
+ secret_key: string;
24972
+ /** The model ID when calling a multi-model endpoint. */
24973
+ target_model?: string;
24974
+ /** The container to directly invoke when calling a multi-container endpoint. */
24975
+ target_container_hostname?: string;
24976
+ /** The inference component to directly invoke when calling a multi-component endpoint. */
24977
+ inference_component_name?: string;
24978
+ /** The maximum number of inputs in each batch. This value is used by inference ingestion pipelines
24979
+ * when processing semantic values. It correlates to the number of times the SageMaker endpoint is
24980
+ * invoked (one per batch of input). */
24981
+ batch_size?: integer;
24982
+ /** The number of dimensions returned by the text embedding models. If this value is not provided, then
24983
+ * it is guessed by making invoking the endpoint for the `text_embedding` task. */
24984
+ dimensions?: integer;
24985
+ }
24986
+ export type InferenceAmazonSageMakerServiceType = 'amazon_sagemaker';
24987
+ export interface InferenceAmazonSageMakerTaskSettings {
24988
+ /** The AWS custom attributes passed verbatim through to the model running in the SageMaker Endpoint.
24989
+ * Values will be returned in the `X-elastic-sagemaker-custom-attributes` header. */
24990
+ custom_attributes?: string;
24991
+ /** The optional JMESPath expression used to override the EnableExplanations provided during endpoint creation. */
24992
+ enable_explanations?: string;
24993
+ /** The capture data ID when enabled in the endpoint. */
24994
+ inference_id?: string;
24995
+ /** The stateful session identifier for a new or existing session.
24996
+ * New sessions will be returned in the `X-elastic-sagemaker-new-session-id` header.
24997
+ * Closed sessions will be returned in the `X-elastic-sagemaker-closed-session-id` header. */
24998
+ session_id?: string;
24999
+ /** Specifies the variant when running with multi-variant Endpoints. */
25000
+ target_variant?: string;
25001
+ }
24521
25002
  export interface InferenceAnthropicServiceSettings {
24522
25003
  /** A valid API key for the Anthropic API. */
24523
25004
  api_key: string;
@@ -24634,7 +25115,7 @@ export interface InferenceAzureOpenAITaskSettings {
24634
25115
  user?: string;
24635
25116
  }
24636
25117
  export type InferenceAzureOpenAITaskType = 'completion' | 'text_embedding';
24637
- export type InferenceCohereEmbeddingType = 'byte' | 'float' | 'int8';
25118
+ export type InferenceCohereEmbeddingType = 'binary' | 'bit' | 'byte' | 'float' | 'int8';
24638
25119
  export type InferenceCohereInputType = 'classification' | 'clustering' | 'ingest' | 'search';
24639
25120
  export interface InferenceCohereServiceSettings {
24640
25121
  /** A valid API key for your Cohere account.
@@ -24646,6 +25127,8 @@ export interface InferenceCohereServiceSettings {
24646
25127
  * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */
24647
25128
  api_key: string;
24648
25129
  /** For a `text_embedding` task, the types of embeddings you want to get back.
25130
+ * Use `binary` for binary embeddings, which are encoded as bytes with signed int8 precision.
25131
+ * Use `bit` for binary embeddings, which are encoded as bytes with signed int8 precision (this is a synonym of `binary`).
24649
25132
  * Use `byte` for signed int8 embeddings (this is a synonym of `int8`).
24650
25133
  * Use `float` for the default float embeddings.
24651
25134
  * Use `int8` for signed int8 embeddings. */
@@ -24735,6 +25218,240 @@ export interface InferenceContentObject {
24735
25218
  /** The type of content. */
24736
25219
  type: string;
24737
25220
  }
25221
+ export interface InferenceCustomRequestParams {
25222
+ /** The body structure of the request. It requires passing in the string-escaped result of the JSON format HTTP request body.
25223
+ * For example:
25224
+ * ```
25225
+ * "request": "{\"input\":${input}}"
25226
+ * ```
25227
+ * > info
25228
+ * > The content string needs to be a single line except when using the Kibana console. */
25229
+ content: string;
25230
+ }
25231
+ export interface InferenceCustomResponseParams {
25232
+ /** Specifies the JSON parser that is used to parse the response from the custom service.
25233
+ * Different task types require different json_parser parameters.
25234
+ * For example:
25235
+ * ```
25236
+ * # text_embedding
25237
+ * # For a response like this:
25238
+ *
25239
+ * {
25240
+ * "object": "list",
25241
+ * "data": [
25242
+ * {
25243
+ * "object": "embedding",
25244
+ * "index": 0,
25245
+ * "embedding": [
25246
+ * 0.014539449,
25247
+ * -0.015288644
25248
+ * ]
25249
+ * }
25250
+ * ],
25251
+ * "model": "text-embedding-ada-002-v2",
25252
+ * "usage": {
25253
+ * "prompt_tokens": 8,
25254
+ * "total_tokens": 8
25255
+ * }
25256
+ * }
25257
+ *
25258
+ * # the json_parser definition should look like this:
25259
+ *
25260
+ * "response":{
25261
+ * "json_parser":{
25262
+ * "text_embeddings":"$.data[*].embedding[*]"
25263
+ * }
25264
+ * }
25265
+ *
25266
+ * # sparse_embedding
25267
+ * # For a response like this:
25268
+ *
25269
+ * {
25270
+ * "request_id": "75C50B5B-E79E-4930-****-F48DBB392231",
25271
+ * "latency": 22,
25272
+ * "usage": {
25273
+ * "token_count": 11
25274
+ * },
25275
+ * "result": {
25276
+ * "sparse_embeddings": [
25277
+ * {
25278
+ * "index": 0,
25279
+ * "embedding": [
25280
+ * {
25281
+ * "token_id": 6,
25282
+ * "weight": 0.101
25283
+ * },
25284
+ * {
25285
+ * "token_id": 163040,
25286
+ * "weight": 0.28417
25287
+ * }
25288
+ * ]
25289
+ * }
25290
+ * ]
25291
+ * }
25292
+ * }
25293
+ *
25294
+ * # the json_parser definition should look like this:
25295
+ *
25296
+ * "response":{
25297
+ * "json_parser":{
25298
+ * "token_path":"$.result.sparse_embeddings[*].embedding[*].token_id",
25299
+ * "weight_path":"$.result.sparse_embeddings[*].embedding[*].weight"
25300
+ * }
25301
+ * }
25302
+ *
25303
+ * # rerank
25304
+ * # For a response like this:
25305
+ *
25306
+ * {
25307
+ * "results": [
25308
+ * {
25309
+ * "index": 3,
25310
+ * "relevance_score": 0.999071,
25311
+ * "document": "abc"
25312
+ * },
25313
+ * {
25314
+ * "index": 4,
25315
+ * "relevance_score": 0.7867867,
25316
+ * "document": "123"
25317
+ * },
25318
+ * {
25319
+ * "index": 0,
25320
+ * "relevance_score": 0.32713068,
25321
+ * "document": "super"
25322
+ * }
25323
+ * ],
25324
+ * }
25325
+ *
25326
+ * # the json_parser definition should look like this:
25327
+ *
25328
+ * "response":{
25329
+ * "json_parser":{
25330
+ * "reranked_index":"$.result.scores[*].index", // optional
25331
+ * "relevance_score":"$.result.scores[*].score",
25332
+ * "document_text":"xxx" // optional
25333
+ * }
25334
+ * }
25335
+ *
25336
+ * # completion
25337
+ * # For a response like this:
25338
+ *
25339
+ * {
25340
+ * "id": "chatcmpl-B9MBs8CjcvOU2jLn4n570S5qMJKcT",
25341
+ * "object": "chat.completion",
25342
+ * "created": 1741569952,
25343
+ * "model": "gpt-4.1-2025-04-14",
25344
+ * "choices": [
25345
+ * {
25346
+ * "index": 0,
25347
+ * "message": {
25348
+ * "role": "assistant",
25349
+ * "content": "Hello! How can I assist you today?",
25350
+ * "refusal": null,
25351
+ * "annotations": []
25352
+ * },
25353
+ * "logprobs": null,
25354
+ * "finish_reason": "stop"
25355
+ * }
25356
+ * ]
25357
+ * }
25358
+ *
25359
+ * # the json_parser definition should look like this:
25360
+ *
25361
+ * "response":{
25362
+ * "json_parser":{
25363
+ * "completion_result":"$.choices[*].message.content"
25364
+ * }
25365
+ * } */
25366
+ json_parser: any;
25367
+ }
25368
+ export interface InferenceCustomServiceSettings {
25369
+ /** Specifies the HTTPS header parameters – such as `Authentication` or `Contet-Type` – that are required to access the custom service.
25370
+ * For example:
25371
+ * ```
25372
+ * "headers":{
25373
+ * "Authorization": "Bearer ${api_key}",
25374
+ * "Content-Type": "application/json;charset=utf-8"
25375
+ * }
25376
+ * ``` */
25377
+ headers?: any;
25378
+ /** Specifies the input type translation values that are used to replace the `${input_type}` template in the request body.
25379
+ * For example:
25380
+ * ```
25381
+ * "input_type": {
25382
+ * "translation": {
25383
+ * "ingest": "do_ingest",
25384
+ * "search": "do_search"
25385
+ * },
25386
+ * "default": "a_default"
25387
+ * },
25388
+ * ```
25389
+ * If the subsequent inference requests come from a search context, the `search` key will be used and the template will be replaced with `do_search`.
25390
+ * If it comes from the ingest context `do_ingest` is used. If it's a different context that is not specified, the default value will be used. If no default is specified an empty string is used.
25391
+ * `translation` can be:
25392
+ * * `classification`
25393
+ * * `clustering`
25394
+ * * `ingest`
25395
+ * * `search` */
25396
+ input_type?: any;
25397
+ /** Specifies the query parameters as a list of tuples. The arrays inside the `query_parameters` must have two items, a key and a value.
25398
+ * For example:
25399
+ * ```
25400
+ * "query_parameters":[
25401
+ * ["param_key", "some_value"],
25402
+ * ["param_key", "another_value"],
25403
+ * ["other_key", "other_value"]
25404
+ * ]
25405
+ * ```
25406
+ * If the base url is `https://www.elastic.co` it results in: `https://www.elastic.co?param_key=some_value&param_key=another_value&other_key=other_value`. */
25407
+ query_parameters?: any;
25408
+ /** The request configuration object. */
25409
+ request: InferenceCustomRequestParams;
25410
+ /** The response configuration object. */
25411
+ response: InferenceCustomResponseParams;
25412
+ /** Specifies secret parameters, like `api_key` or `api_token`, that are required to access the custom service.
25413
+ * For example:
25414
+ * ```
25415
+ * "secret_parameters":{
25416
+ * "api_key":"<api_key>"
25417
+ * }
25418
+ * ``` */
25419
+ secret_parameters: any;
25420
+ /** The URL endpoint to use for the requests. */
25421
+ url?: string;
25422
+ }
25423
+ export type InferenceCustomServiceType = 'custom';
25424
+ export interface InferenceCustomTaskSettings {
25425
+ /** Specifies parameters that are required to run the custom service. The parameters depend on the model your custom service uses.
25426
+ * For example:
25427
+ * ```
25428
+ * "task_settings":{
25429
+ * "parameters":{
25430
+ * "input_type":"query",
25431
+ * "return_token":true
25432
+ * }
25433
+ * }
25434
+ * ``` */
25435
+ parameters?: any;
25436
+ }
25437
+ export type InferenceCustomTaskType = 'text_embedding' | 'sparse_embedding' | 'rerank' | 'completion';
25438
+ export interface InferenceDeepSeekServiceSettings {
25439
+ /** A valid API key for your DeepSeek account.
25440
+ * You can find or create your DeepSeek API keys on the DeepSeek API key page.
25441
+ *
25442
+ * IMPORTANT: You need to provide the API key only once, during the inference model creation.
25443
+ * The get inference endpoint API does not retrieve your API key.
25444
+ * After creating the inference model, you cannot change the associated API key.
25445
+ * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */
25446
+ api_key: string;
25447
+ /** For a `completion` or `chat_completion` task, the name of the model to use for the inference task.
25448
+ *
25449
+ * For the available `completion` and `chat_completion` models, refer to the [DeepSeek Models & Pricing docs](https://api-docs.deepseek.com/quick_start/pricing). */
25450
+ model_id: string;
25451
+ /** The URL endpoint to use for the requests. Defaults to `https://api.deepseek.com/chat/completions`. */
25452
+ url?: string;
25453
+ }
25454
+ export type InferenceDeepSeekServiceType = 'deepseek';
24738
25455
  export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase {
24739
25456
  pipelines: string[];
24740
25457
  }
@@ -24827,7 +25544,7 @@ export interface InferenceGoogleVertexAITaskSettings {
24827
25544
  /** For a `rerank` task, the number of the top N documents that should be returned. */
24828
25545
  top_n?: integer;
24829
25546
  }
24830
- export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding';
25547
+ export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' | 'completion' | 'chat_completion';
24831
25548
  export interface InferenceHuggingFaceServiceSettings {
24832
25549
  /** A valid access token for your HuggingFace account.
24833
25550
  * You can create or find your access tokens on the HuggingFace settings page.
@@ -24838,13 +25555,28 @@ export interface InferenceHuggingFaceServiceSettings {
24838
25555
  * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */
24839
25556
  api_key: string;
24840
25557
  /** This setting helps to minimize the number of rate limit errors returned from Hugging Face.
24841
- * By default, the `hugging_face` service sets the number of requests allowed per minute to 3000. */
25558
+ * By default, the `hugging_face` service sets the number of requests allowed per minute to 3000 for all supported tasks.
25559
+ * Hugging Face does not publish a universal rate limit — actual limits may vary.
25560
+ * It is recommended to adjust this value based on the capacity and limits of your specific deployment environment. */
24842
25561
  rate_limit?: InferenceRateLimitSetting;
24843
- /** The URL endpoint to use for the requests. */
25562
+ /** The URL endpoint to use for the requests.
25563
+ * For `completion` and `chat_completion` tasks, the deployed model must be compatible with the Hugging Face Chat Completion interface (see the linked external documentation for details). The endpoint URL for the request must include `/v1/chat/completions`.
25564
+ * If the model supports the OpenAI Chat Completion schema, a toggle should appear in the interface. Enabling this toggle doesn't change any model behavior, it reveals the full endpoint URL needed (which should include `/v1/chat/completions`) when configuring the inference endpoint in Elasticsearch. If the model doesn't support this schema, the toggle may not be shown. */
24844
25565
  url: string;
25566
+ /** The name of the HuggingFace model to use for the inference task.
25567
+ * For `completion` and `chat_completion` tasks, this field is optional but may be required for certain models — particularly when using serverless inference endpoints.
25568
+ * For the `text_embedding` task, this field should not be included. Otherwise, the request will fail. */
25569
+ model_id?: string;
24845
25570
  }
24846
25571
  export type InferenceHuggingFaceServiceType = 'hugging_face';
24847
- export type InferenceHuggingFaceTaskType = 'text_embedding';
25572
+ export interface InferenceHuggingFaceTaskSettings {
25573
+ /** For a `rerank` task, return doc text within the results. */
25574
+ return_documents?: boolean;
25575
+ /** For a `rerank` task, the number of most relevant documents to return.
25576
+ * It defaults to the number of the documents. */
25577
+ top_n?: integer;
25578
+ }
25579
+ export type InferenceHuggingFaceTaskType = 'chat_completion' | 'completion' | 'rerank' | 'text_embedding';
24848
25580
  export interface InferenceInferenceChunkingSettings {
24849
25581
  /** The maximum size of a chunk in words.
24850
25582
  * This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). */
@@ -24888,6 +25620,12 @@ export interface InferenceInferenceEndpointInfoAmazonBedrock extends InferenceIn
24888
25620
  /** The task type */
24889
25621
  task_type: InferenceTaskTypeAmazonBedrock;
24890
25622
  }
25623
+ export interface InferenceInferenceEndpointInfoAmazonSageMaker extends InferenceInferenceEndpoint {
25624
+ /** The inference Id */
25625
+ inference_id: string;
25626
+ /** The task type */
25627
+ task_type: InferenceTaskTypeAmazonSageMaker;
25628
+ }
24891
25629
  export interface InferenceInferenceEndpointInfoAnthropic extends InferenceInferenceEndpoint {
24892
25630
  /** The inference Id */
24893
25631
  inference_id: string;
@@ -24912,6 +25650,18 @@ export interface InferenceInferenceEndpointInfoCohere extends InferenceInference
24912
25650
  /** The task type */
24913
25651
  task_type: InferenceTaskTypeCohere;
24914
25652
  }
25653
+ export interface InferenceInferenceEndpointInfoCustom extends InferenceInferenceEndpoint {
25654
+ /** The inference Id */
25655
+ inference_id: string;
25656
+ /** The task type */
25657
+ task_type: InferenceTaskTypeCustom;
25658
+ }
25659
+ export interface InferenceInferenceEndpointInfoDeepSeek extends InferenceInferenceEndpoint {
25660
+ /** The inference Id */
25661
+ inference_id: string;
25662
+ /** The task type */
25663
+ task_type: InferenceTaskTypeDeepSeek;
25664
+ }
24915
25665
  export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint {
24916
25666
  /** The inference Id */
24917
25667
  inference_id: string;
@@ -25077,14 +25827,14 @@ export interface InferenceMistralServiceSettings {
25077
25827
  /** The maximum number of tokens per input before chunking occurs. */
25078
25828
  max_input_tokens?: integer;
25079
25829
  /** The name of the model to use for the inference task.
25080
- * Refer to the Mistral models documentation for the list of available text embedding models. */
25830
+ * Refer to the Mistral models documentation for the list of available models. */
25081
25831
  model: string;
25082
25832
  /** This setting helps to minimize the number of rate limit errors returned from the Mistral API.
25083
25833
  * By default, the `mistral` service sets the number of requests allowed per minute to 240. */
25084
25834
  rate_limit?: InferenceRateLimitSetting;
25085
25835
  }
25086
25836
  export type InferenceMistralServiceType = 'mistral';
25087
- export type InferenceMistralTaskType = 'text_embedding';
25837
+ export type InferenceMistralTaskType = 'text_embedding' | 'completion' | 'chat_completion';
25088
25838
  export interface InferenceOpenAIServiceSettings {
25089
25839
  /** A valid API key of your OpenAI account.
25090
25840
  * You can find your OpenAI API keys in your OpenAI account under the API keys section.
@@ -25219,20 +25969,23 @@ export type InferenceTaskSettings = any;
25219
25969
  export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion';
25220
25970
  export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding';
25221
25971
  export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion';
25972
+ export type InferenceTaskTypeAmazonSageMaker = 'text_embedding' | 'completion' | 'chat_completion' | 'sparse_embedding' | 'rerank';
25222
25973
  export type InferenceTaskTypeAnthropic = 'completion';
25223
25974
  export type InferenceTaskTypeAzureAIStudio = 'text_embedding' | 'completion';
25224
25975
  export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion';
25225
25976
  export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion';
25977
+ export type InferenceTaskTypeCustom = 'text_embedding' | 'sparse_embedding' | 'rerank' | 'completion';
25978
+ export type InferenceTaskTypeDeepSeek = 'completion' | 'chat_completion';
25226
25979
  export type InferenceTaskTypeELSER = 'sparse_embedding';
25227
25980
  export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embedding' | 'rerank';
25228
25981
  export type InferenceTaskTypeGoogleAIStudio = 'text_embedding' | 'completion';
25229
25982
  export type InferenceTaskTypeGoogleVertexAI = 'text_embedding' | 'rerank';
25230
- export type InferenceTaskTypeHuggingFace = 'text_embedding';
25983
+ export type InferenceTaskTypeHuggingFace = 'chat_completion' | 'completion' | 'rerank' | 'text_embedding';
25231
25984
  export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank';
25232
- export type InferenceTaskTypeMistral = 'text_embedding';
25985
+ export type InferenceTaskTypeMistral = 'text_embedding' | 'chat_completion' | 'completion';
25233
25986
  export type InferenceTaskTypeOpenAI = 'text_embedding' | 'chat_completion' | 'completion';
25234
25987
  export type InferenceTaskTypeVoyageAI = 'text_embedding' | 'rerank';
25235
- export type InferenceTaskTypeWatsonx = 'text_embedding';
25988
+ export type InferenceTaskTypeWatsonx = 'text_embedding' | 'chat_completion' | 'completion';
25236
25989
  export interface InferenceTextEmbeddingByteResult {
25237
25990
  embedding: InferenceDenseByteVector;
25238
25991
  }
@@ -25308,7 +26061,8 @@ export interface InferenceWatsonxServiceSettings {
25308
26061
  * For the active version data parameters, refer to the Wastonx documentation. */
25309
26062
  api_version: string;
25310
26063
  /** The name of the model to use for the inference task.
25311
- * Refer to the IBM Embedding Models section in the Watsonx documentation for the list of available text embedding models. */
26064
+ * Refer to the IBM Embedding Models section in the Watsonx documentation for the list of available text embedding models.
26065
+ * Refer to the IBM library - Foundation models in Watsonx.ai. */
25312
26066
  model_id: string;
25313
26067
  /** The identifier of the IBM Cloud project to use for the inference task. */
25314
26068
  project_id: string;
@@ -25319,7 +26073,7 @@ export interface InferenceWatsonxServiceSettings {
25319
26073
  url: string;
25320
26074
  }
25321
26075
  export type InferenceWatsonxServiceType = 'watsonxai';
25322
- export type InferenceWatsonxTaskType = 'text_embedding';
26076
+ export type InferenceWatsonxTaskType = 'text_embedding' | 'chat_completion' | 'completion';
25323
26077
  export interface InferenceChatCompletionUnifiedRequest extends RequestBase {
25324
26078
  /** The inference Id */
25325
26079
  inference_id: Id;
@@ -25442,6 +26196,17 @@ export interface InferenceInferenceRequest extends RequestBase {
25442
26196
  * > info
25443
26197
  * > Inference endpoints for the `completion` task type currently only support a single string as input. */
25444
26198
  input: string | string[];
26199
+ /** Specifies the input data type for the text embedding model. The `input_type` parameter only applies to Inference Endpoints with the `text_embedding` task type. Possible values include:
26200
+ * * `SEARCH`
26201
+ * * `INGEST`
26202
+ * * `CLASSIFICATION`
26203
+ * * `CLUSTERING`
26204
+ * Not all services support all values. Unsupported values will trigger a validation exception.
26205
+ * Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info.
26206
+ *
26207
+ * > info
26208
+ * > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. */
26209
+ input_type?: string;
25445
26210
  /** Task settings for the individual inference request.
25446
26211
  * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */
25447
26212
  task_settings?: InferenceTaskSettings;
@@ -25454,6 +26219,7 @@ export interface InferenceInferenceRequest extends RequestBase {
25454
26219
  timeout?: never;
25455
26220
  query?: never;
25456
26221
  input?: never;
26222
+ input_type?: never;
25457
26223
  task_settings?: never;
25458
26224
  });
25459
26225
  /** All values in `querystring` will be added to the request querystring. */
@@ -25465,6 +26231,7 @@ export interface InferenceInferenceRequest extends RequestBase {
25465
26231
  timeout?: never;
25466
26232
  query?: never;
25467
26233
  input?: never;
26234
+ input_type?: never;
25468
26235
  task_settings?: never;
25469
26236
  };
25470
26237
  }
@@ -25581,6 +26348,49 @@ export interface InferencePutAmazonbedrockRequest extends RequestBase {
25581
26348
  };
25582
26349
  }
25583
26350
  export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock;
26351
+ export interface InferencePutAmazonsagemakerRequest extends RequestBase {
26352
+ /** The type of the inference task that the model will perform. */
26353
+ task_type: InferenceTaskTypeAmazonSageMaker;
26354
+ /** The unique identifier of the inference endpoint. */
26355
+ amazonsagemaker_inference_id: Id;
26356
+ /** Specifies the amount of time to wait for the inference endpoint to be created. */
26357
+ timeout?: Duration;
26358
+ /** The chunking configuration object. */
26359
+ chunking_settings?: InferenceInferenceChunkingSettings;
26360
+ /** The type of service supported for the specified task type. In this case, `amazon_sagemaker`. */
26361
+ service: InferenceAmazonSageMakerServiceType;
26362
+ /** Settings used to install the inference model.
26363
+ * These settings are specific to the `amazon_sagemaker` service and `service_settings.api` you specified. */
26364
+ service_settings: InferenceAmazonSageMakerServiceSettings;
26365
+ /** Settings to configure the inference task.
26366
+ * These settings are specific to the task type and `service_settings.api` you specified. */
26367
+ task_settings?: InferenceAmazonSageMakerTaskSettings;
26368
+ /** All values in `body` will be added to the request body. */
26369
+ body?: string | ({
26370
+ [key: string]: any;
26371
+ } & {
26372
+ task_type?: never;
26373
+ amazonsagemaker_inference_id?: never;
26374
+ timeout?: never;
26375
+ chunking_settings?: never;
26376
+ service?: never;
26377
+ service_settings?: never;
26378
+ task_settings?: never;
26379
+ });
26380
+ /** All values in `querystring` will be added to the request querystring. */
26381
+ querystring?: {
26382
+ [key: string]: any;
26383
+ } & {
26384
+ task_type?: never;
26385
+ amazonsagemaker_inference_id?: never;
26386
+ timeout?: never;
26387
+ chunking_settings?: never;
26388
+ service?: never;
26389
+ service_settings?: never;
26390
+ task_settings?: never;
26391
+ };
26392
+ }
26393
+ export type InferencePutAmazonsagemakerResponse = InferenceInferenceEndpointInfoAmazonSageMaker;
25584
26394
  export interface InferencePutAnthropicRequest extends RequestBase {
25585
26395
  /** The task type.
25586
26396
  * The only valid task type for the model to perform is `completion`. */
@@ -25752,6 +26562,83 @@ export interface InferencePutCohereRequest extends RequestBase {
25752
26562
  };
25753
26563
  }
25754
26564
  export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere;
26565
+ export interface InferencePutCustomRequest extends RequestBase {
26566
+ /** The type of the inference task that the model will perform. */
26567
+ task_type: InferenceCustomTaskType;
26568
+ /** The unique identifier of the inference endpoint. */
26569
+ custom_inference_id: Id;
26570
+ /** The chunking configuration object. */
26571
+ chunking_settings?: InferenceInferenceChunkingSettings;
26572
+ /** The type of service supported for the specified task type. In this case, `custom`. */
26573
+ service: InferenceCustomServiceType;
26574
+ /** Settings used to install the inference model.
26575
+ * These settings are specific to the `custom` service. */
26576
+ service_settings: InferenceCustomServiceSettings;
26577
+ /** Settings to configure the inference task.
26578
+ * These settings are specific to the task type you specified. */
26579
+ task_settings?: InferenceCustomTaskSettings;
26580
+ /** All values in `body` will be added to the request body. */
26581
+ body?: string | ({
26582
+ [key: string]: any;
26583
+ } & {
26584
+ task_type?: never;
26585
+ custom_inference_id?: never;
26586
+ chunking_settings?: never;
26587
+ service?: never;
26588
+ service_settings?: never;
26589
+ task_settings?: never;
26590
+ });
26591
+ /** All values in `querystring` will be added to the request querystring. */
26592
+ querystring?: {
26593
+ [key: string]: any;
26594
+ } & {
26595
+ task_type?: never;
26596
+ custom_inference_id?: never;
26597
+ chunking_settings?: never;
26598
+ service?: never;
26599
+ service_settings?: never;
26600
+ task_settings?: never;
26601
+ };
26602
+ }
26603
+ export type InferencePutCustomResponse = InferenceInferenceEndpointInfoCustom;
26604
+ export interface InferencePutDeepseekRequest extends RequestBase {
26605
+ /** The type of the inference task that the model will perform. */
26606
+ task_type: InferenceTaskTypeDeepSeek;
26607
+ /** The unique identifier of the inference endpoint. */
26608
+ deepseek_inference_id: Id;
26609
+ /** Specifies the amount of time to wait for the inference endpoint to be created. */
26610
+ timeout?: Duration;
26611
+ /** The chunking configuration object. */
26612
+ chunking_settings?: InferenceInferenceChunkingSettings;
26613
+ /** The type of service supported for the specified task type. In this case, `deepseek`. */
26614
+ service: InferenceDeepSeekServiceType;
26615
+ /** Settings used to install the inference model.
26616
+ * These settings are specific to the `deepseek` service. */
26617
+ service_settings: InferenceDeepSeekServiceSettings;
26618
+ /** All values in `body` will be added to the request body. */
26619
+ body?: string | ({
26620
+ [key: string]: any;
26621
+ } & {
26622
+ task_type?: never;
26623
+ deepseek_inference_id?: never;
26624
+ timeout?: never;
26625
+ chunking_settings?: never;
26626
+ service?: never;
26627
+ service_settings?: never;
26628
+ });
26629
+ /** All values in `querystring` will be added to the request querystring. */
26630
+ querystring?: {
26631
+ [key: string]: any;
26632
+ } & {
26633
+ task_type?: never;
26634
+ deepseek_inference_id?: never;
26635
+ timeout?: never;
26636
+ chunking_settings?: never;
26637
+ service?: never;
26638
+ service_settings?: never;
26639
+ };
26640
+ }
26641
+ export type InferencePutDeepseekResponse = InferenceInferenceEndpointInfoDeepSeek;
25755
26642
  export interface InferencePutElasticsearchRequest extends RequestBase {
25756
26643
  /** The type of the inference task that the model will perform. */
25757
26644
  task_type: InferenceElasticsearchTaskType;
@@ -25924,6 +26811,9 @@ export interface InferencePutHuggingFaceRequest extends RequestBase {
25924
26811
  service: InferenceHuggingFaceServiceType;
25925
26812
  /** Settings used to install the inference model. These settings are specific to the `hugging_face` service. */
25926
26813
  service_settings: InferenceHuggingFaceServiceSettings;
26814
+ /** Settings to configure the inference task.
26815
+ * These settings are specific to the task type you specified. */
26816
+ task_settings?: InferenceHuggingFaceTaskSettings;
25927
26817
  /** All values in `body` will be added to the request body. */
25928
26818
  body?: string | ({
25929
26819
  [key: string]: any;
@@ -25934,6 +26824,7 @@ export interface InferencePutHuggingFaceRequest extends RequestBase {
25934
26824
  chunking_settings?: never;
25935
26825
  service?: never;
25936
26826
  service_settings?: never;
26827
+ task_settings?: never;
25937
26828
  });
25938
26829
  /** All values in `querystring` will be added to the request querystring. */
25939
26830
  querystring?: {
@@ -25945,6 +26836,7 @@ export interface InferencePutHuggingFaceRequest extends RequestBase {
25945
26836
  chunking_settings?: never;
25946
26837
  service?: never;
25947
26838
  service_settings?: never;
26839
+ task_settings?: never;
25948
26840
  };
25949
26841
  }
25950
26842
  export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace;
@@ -25991,8 +26883,7 @@ export interface InferencePutJinaaiRequest extends RequestBase {
25991
26883
  }
25992
26884
  export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi;
25993
26885
  export interface InferencePutMistralRequest extends RequestBase {
25994
- /** The task type.
25995
- * The only valid task type for the model to perform is `text_embedding`. */
26886
+ /** The type of the inference task that the model will perform. */
25996
26887
  task_type: InferenceMistralTaskType;
25997
26888
  /** The unique identifier of the inference endpoint. */
25998
26889
  mistral_inference_id: Id;
@@ -26114,8 +27005,7 @@ export interface InferencePutVoyageaiRequest extends RequestBase {
26114
27005
  }
26115
27006
  export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI;
26116
27007
  export interface InferencePutWatsonxRequest extends RequestBase {
26117
- /** The task type.
26118
- * The only valid task type for the model to perform is `text_embedding`. */
27008
+ /** The type of the inference task that the model will perform. */
26119
27009
  task_type: InferenceWatsonxTaskType;
26120
27010
  /** The unique identifier of the inference endpoint. */
26121
27011
  watsonx_inference_id: Id;
@@ -27952,7 +28842,8 @@ export interface MigrationPostFeatureUpgradeRequest extends RequestBase {
27952
28842
  }
27953
28843
  export interface MigrationPostFeatureUpgradeResponse {
27954
28844
  accepted: boolean;
27955
- features: MigrationPostFeatureUpgradeMigrationFeature[];
28845
+ features?: MigrationPostFeatureUpgradeMigrationFeature[];
28846
+ reason?: string;
27956
28847
  }
27957
28848
  export interface MlAdaptiveAllocationsSettings {
27958
28849
  /** If true, adaptive_allocations is enabled */
@@ -35764,7 +36655,7 @@ export interface SecurityBulkError {
35764
36655
  export interface SecurityClusterNode {
35765
36656
  name: Name;
35766
36657
  }
35767
- export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_stats' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string;
36658
+ export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_esql' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_esql' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_stats' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string;
35768
36659
  export interface SecurityCreatedStatus {
35769
36660
  created: boolean;
35770
36661
  }
@@ -39370,6 +40261,7 @@ export interface SnapshotSnapshotShardsStatus {
39370
40261
  stats: SnapshotShardsStatsSummary;
39371
40262
  }
39372
40263
  export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_count' | 'repository' | 'shard_count' | 'failed_shard_count';
40264
+ export type SnapshotSnapshotState = 'IN_PROGRESS' | 'SUCCESS' | 'FAILED' | 'PARTIAL' | 'INCOMPATIBLE';
39373
40265
  export interface SnapshotSnapshotStats {
39374
40266
  /** The number and size of files that still need to be copied as part of the incremental snapshot.
39375
40267
  * For completed snapshots, this property indicates the number and size of files that were not already in the repository and were copied as part of the incremental snapshot. */
@@ -39753,6 +40645,9 @@ export interface SnapshotGetRequest extends RequestBase {
39753
40645
  /** The sort order for the result.
39754
40646
  * The default behavior is sorting by snapshot start time stamp. */
39755
40647
  sort?: SnapshotSnapshotSort;
40648
+ /** Only return snapshots with a state found in the given comma-separated list of snapshot states.
40649
+ * The default is all snapshot states. */
40650
+ state?: SnapshotSnapshotState | SnapshotSnapshotState[];
39756
40651
  /** If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted.
39757
40652
  *
39758
40653
  * NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. */
@@ -39775,6 +40670,7 @@ export interface SnapshotGetRequest extends RequestBase {
39775
40670
  size?: never;
39776
40671
  slm_policy_filter?: never;
39777
40672
  sort?: never;
40673
+ state?: never;
39778
40674
  verbose?: never;
39779
40675
  });
39780
40676
  /** All values in `querystring` will be added to the request querystring. */
@@ -39795,6 +40691,7 @@ export interface SnapshotGetRequest extends RequestBase {
39795
40691
  size?: never;
39796
40692
  slm_policy_filter?: never;
39797
40693
  sort?: never;
40694
+ state?: never;
39798
40695
  verbose?: never;
39799
40696
  };
39800
40697
  }
@@ -40676,9 +41573,9 @@ export type SynonymsSynonymString = string;
40676
41573
  export interface SynonymsSynonymsUpdateResult {
40677
41574
  /** The update operation result. */
40678
41575
  result: Result;
40679
- /** Updating synonyms in a synonym set reloads the associated analyzers.
41576
+ /** Updating synonyms in a synonym set can reload the associated analyzers in case refresh is set to true.
40680
41577
  * This information is the analyzers reloading result. */
40681
- reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult;
41578
+ reload_analyzers_details?: IndicesReloadSearchAnalyzersReloadResult;
40682
41579
  }
40683
41580
  export interface SynonymsDeleteSynonymRequest extends RequestBase {
40684
41581
  /** The synonyms set identifier to delete. */
@@ -40702,12 +41599,17 @@ export interface SynonymsDeleteSynonymRuleRequest extends RequestBase {
40702
41599
  set_id: Id;
40703
41600
  /** The ID of the synonym rule to delete. */
40704
41601
  rule_id: Id;
41602
+ /** If `true`, the request will refresh the analyzers with the deleted synonym rule and wait for the new synonyms to be available before returning.
41603
+ * If `false`, analyzers will not be reloaded with the deleted synonym rule
41604
+ * @remarks This property is not supported on Elastic Cloud Serverless. */
41605
+ refresh?: boolean;
40705
41606
  /** All values in `body` will be added to the request body. */
40706
41607
  body?: string | ({
40707
41608
  [key: string]: any;
40708
41609
  } & {
40709
41610
  set_id?: never;
40710
41611
  rule_id?: never;
41612
+ refresh?: never;
40711
41613
  });
40712
41614
  /** All values in `querystring` will be added to the request querystring. */
40713
41615
  querystring?: {
@@ -40715,6 +41617,7 @@ export interface SynonymsDeleteSynonymRuleRequest extends RequestBase {
40715
41617
  } & {
40716
41618
  set_id?: never;
40717
41619
  rule_id?: never;
41620
+ refresh?: never;
40718
41621
  };
40719
41622
  }
40720
41623
  export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult;
@@ -40804,6 +41707,10 @@ export interface SynonymsGetSynonymsSetsSynonymsSetItem {
40804
41707
  export interface SynonymsPutSynonymRequest extends RequestBase {
40805
41708
  /** The ID of the synonyms set to be created or updated. */
40806
41709
  id: Id;
41710
+ /** If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning.
41711
+ * If `false`, analyzers will not be reloaded with the new synonym set
41712
+ * @remarks This property is not supported on Elastic Cloud Serverless. */
41713
+ refresh?: boolean;
40807
41714
  /** The synonym rules definitions for the synonyms set. */
40808
41715
  synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[];
40809
41716
  /** All values in `body` will be added to the request body. */
@@ -40811,6 +41718,7 @@ export interface SynonymsPutSynonymRequest extends RequestBase {
40811
41718
  [key: string]: any;
40812
41719
  } & {
40813
41720
  id?: never;
41721
+ refresh?: never;
40814
41722
  synonyms_set?: never;
40815
41723
  });
40816
41724
  /** All values in `querystring` will be added to the request querystring. */
@@ -40818,18 +41726,26 @@ export interface SynonymsPutSynonymRequest extends RequestBase {
40818
41726
  [key: string]: any;
40819
41727
  } & {
40820
41728
  id?: never;
41729
+ refresh?: never;
40821
41730
  synonyms_set?: never;
40822
41731
  };
40823
41732
  }
40824
41733
  export interface SynonymsPutSynonymResponse {
41734
+ /** The update operation result. */
40825
41735
  result: Result;
40826
- reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult;
41736
+ /** Updating a synonyms set can reload the associated analyzers in case refresh is set to true.
41737
+ * This information is the analyzers reloading result. */
41738
+ reload_analyzers_details?: IndicesReloadSearchAnalyzersReloadResult;
40827
41739
  }
40828
41740
  export interface SynonymsPutSynonymRuleRequest extends RequestBase {
40829
41741
  /** The ID of the synonym set. */
40830
41742
  set_id: Id;
40831
41743
  /** The ID of the synonym rule to be updated or created. */
40832
41744
  rule_id: Id;
41745
+ /** If `true`, the request will refresh the analyzers with the new synonym rule and wait for the new synonyms to be available before returning.
41746
+ * If `false`, analyzers will not be reloaded with the new synonym rule
41747
+ * @remarks This property is not supported on Elastic Cloud Serverless. */
41748
+ refresh?: boolean;
40833
41749
  /** The synonym rule information definition, which must be in Solr format. */
40834
41750
  synonyms: SynonymsSynonymString;
40835
41751
  /** All values in `body` will be added to the request body. */
@@ -40838,6 +41754,7 @@ export interface SynonymsPutSynonymRuleRequest extends RequestBase {
40838
41754
  } & {
40839
41755
  set_id?: never;
40840
41756
  rule_id?: never;
41757
+ refresh?: never;
40841
41758
  synonyms?: never;
40842
41759
  });
40843
41760
  /** All values in `querystring` will be added to the request querystring. */
@@ -40846,6 +41763,7 @@ export interface SynonymsPutSynonymRuleRequest extends RequestBase {
40846
41763
  } & {
40847
41764
  set_id?: never;
40848
41765
  rule_id?: never;
41766
+ refresh?: never;
40849
41767
  synonyms?: never;
40850
41768
  };
40851
41769
  }
@@ -43604,7 +44522,7 @@ export interface SpecUtilsCommonQueryParameters {
43604
44522
  filter_path?: string | string[];
43605
44523
  /** When set to `true` will return statistics in a format suitable for humans.
43606
44524
  * For example `"exists_time": "1h"` for humans and
43607
- * `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
44525
+ * `"exists_time_in_millis": 3600000` for computers. When disabled the human
43608
44526
  * readable values will be omitted. This makes sense for responses being consumed
43609
44527
  * only by machines. */
43610
44528
  human?: boolean;
@@ -43612,6 +44530,8 @@ export interface SpecUtilsCommonQueryParameters {
43612
44530
  * this option for debugging only. */
43613
44531
  pretty?: boolean;
43614
44532
  }
44533
+ export interface SpecUtilsOverloadOf<TDefinition = unknown> {
44534
+ }
43615
44535
  export interface SpecUtilsCommonCatQueryParameters {
43616
44536
  /** Specifies the format to return the columnar data in, can be set to
43617
44537
  * `text`, `json`, `cbor`, `yaml`, or `smile`. */
@@ -43622,5 +44542,3 @@ export interface SpecUtilsCommonCatQueryParameters {
43622
44542
  /** When set to `true` will enable verbose output. */
43623
44543
  v?: boolean;
43624
44544
  }
43625
- export interface SpecUtilsOverloadOf<TDefinition = unknown> {
43626
- }