elasticsearch 9.2.0__py3-none-any.whl → 9.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. elasticsearch/_async/client/__init__.py +64 -45
  2. elasticsearch/_async/client/async_search.py +3 -3
  3. elasticsearch/_async/client/autoscaling.py +15 -4
  4. elasticsearch/_async/client/cat.py +40 -2
  5. elasticsearch/_async/client/ccr.py +10 -10
  6. elasticsearch/_async/client/cluster.py +32 -32
  7. elasticsearch/_async/client/connector.py +42 -41
  8. elasticsearch/_async/client/dangling_indices.py +8 -12
  9. elasticsearch/_async/client/enrich.py +10 -10
  10. elasticsearch/_async/client/eql.py +10 -10
  11. elasticsearch/_async/client/esql.py +16 -16
  12. elasticsearch/_async/client/features.py +6 -6
  13. elasticsearch/_async/client/fleet.py +7 -7
  14. elasticsearch/_async/client/graph.py +2 -2
  15. elasticsearch/_async/client/ilm.py +18 -18
  16. elasticsearch/_async/client/indices.py +131 -135
  17. elasticsearch/_async/client/inference.py +76 -56
  18. elasticsearch/_async/client/ingest.py +9 -9
  19. elasticsearch/_async/client/license.py +5 -7
  20. elasticsearch/_async/client/logstash.py +4 -4
  21. elasticsearch/_async/client/migration.py +6 -6
  22. elasticsearch/_async/client/ml.py +125 -85
  23. elasticsearch/_async/client/monitoring.py +4 -3
  24. elasticsearch/_async/client/nodes.py +15 -15
  25. elasticsearch/_async/client/project.py +4 -3
  26. elasticsearch/_async/client/query_rules.py +16 -16
  27. elasticsearch/_async/client/rollup.py +21 -21
  28. elasticsearch/_async/client/search_application.py +19 -19
  29. elasticsearch/_async/client/searchable_snapshots.py +10 -10
  30. elasticsearch/_async/client/security.py +8 -7
  31. elasticsearch/_async/client/shutdown.py +15 -4
  32. elasticsearch/_async/client/simulate.py +4 -4
  33. elasticsearch/_async/client/slm.py +17 -17
  34. elasticsearch/_async/client/snapshot.py +20 -20
  35. elasticsearch/_async/client/sql.py +10 -10
  36. elasticsearch/_async/client/streams.py +6 -6
  37. elasticsearch/_async/client/synonyms.py +10 -10
  38. elasticsearch/_async/client/tasks.py +8 -8
  39. elasticsearch/_async/client/text_structure.py +13 -9
  40. elasticsearch/_async/client/transform.py +51 -12
  41. elasticsearch/_async/client/utils.py +4 -2
  42. elasticsearch/_async/client/watcher.py +26 -26
  43. elasticsearch/_async/client/xpack.py +6 -5
  44. elasticsearch/_sync/client/__init__.py +66 -45
  45. elasticsearch/_sync/client/async_search.py +3 -3
  46. elasticsearch/_sync/client/autoscaling.py +15 -4
  47. elasticsearch/_sync/client/cat.py +40 -2
  48. elasticsearch/_sync/client/ccr.py +10 -10
  49. elasticsearch/_sync/client/cluster.py +32 -32
  50. elasticsearch/_sync/client/connector.py +42 -41
  51. elasticsearch/_sync/client/dangling_indices.py +8 -12
  52. elasticsearch/_sync/client/enrich.py +10 -10
  53. elasticsearch/_sync/client/eql.py +10 -10
  54. elasticsearch/_sync/client/esql.py +16 -16
  55. elasticsearch/_sync/client/features.py +6 -6
  56. elasticsearch/_sync/client/fleet.py +7 -7
  57. elasticsearch/_sync/client/graph.py +2 -2
  58. elasticsearch/_sync/client/ilm.py +18 -18
  59. elasticsearch/_sync/client/indices.py +131 -135
  60. elasticsearch/_sync/client/inference.py +76 -56
  61. elasticsearch/_sync/client/ingest.py +9 -9
  62. elasticsearch/_sync/client/license.py +5 -7
  63. elasticsearch/_sync/client/logstash.py +4 -4
  64. elasticsearch/_sync/client/migration.py +6 -6
  65. elasticsearch/_sync/client/ml.py +125 -85
  66. elasticsearch/_sync/client/monitoring.py +4 -3
  67. elasticsearch/_sync/client/nodes.py +15 -15
  68. elasticsearch/_sync/client/project.py +4 -3
  69. elasticsearch/_sync/client/query_rules.py +16 -16
  70. elasticsearch/_sync/client/rollup.py +21 -21
  71. elasticsearch/_sync/client/search_application.py +19 -19
  72. elasticsearch/_sync/client/searchable_snapshots.py +10 -10
  73. elasticsearch/_sync/client/security.py +8 -7
  74. elasticsearch/_sync/client/shutdown.py +15 -4
  75. elasticsearch/_sync/client/simulate.py +4 -4
  76. elasticsearch/_sync/client/slm.py +17 -17
  77. elasticsearch/_sync/client/snapshot.py +20 -20
  78. elasticsearch/_sync/client/sql.py +10 -10
  79. elasticsearch/_sync/client/streams.py +6 -6
  80. elasticsearch/_sync/client/synonyms.py +10 -10
  81. elasticsearch/_sync/client/tasks.py +8 -8
  82. elasticsearch/_sync/client/text_structure.py +13 -9
  83. elasticsearch/_sync/client/transform.py +51 -12
  84. elasticsearch/_sync/client/utils.py +16 -2
  85. elasticsearch/_sync/client/watcher.py +26 -26
  86. elasticsearch/_sync/client/xpack.py +6 -5
  87. elasticsearch/_version.py +2 -2
  88. elasticsearch/dsl/_async/document.py +4 -5
  89. elasticsearch/dsl/_async/index.py +1 -1
  90. elasticsearch/dsl/_async/search.py +2 -3
  91. elasticsearch/dsl/_sync/document.py +4 -5
  92. elasticsearch/dsl/_sync/index.py +1 -1
  93. elasticsearch/dsl/_sync/search.py +2 -3
  94. elasticsearch/dsl/aggs.py +3 -3
  95. elasticsearch/dsl/async_connections.py +1 -2
  96. elasticsearch/dsl/connections.py +1 -2
  97. elasticsearch/dsl/document_base.py +1 -1
  98. elasticsearch/dsl/field.py +1 -1
  99. elasticsearch/dsl/pydantic.py +1 -1
  100. elasticsearch/dsl/query.py +23 -0
  101. elasticsearch/dsl/serializer.py +1 -2
  102. elasticsearch/dsl/types.py +2 -6
  103. elasticsearch/dsl/utils.py +1 -2
  104. elasticsearch/esql/esql.py +1 -1
  105. elasticsearch/esql/functions.py +2 -2
  106. elasticsearch/helpers/vectorstore/__init__.py +7 -7
  107. elasticsearch/helpers/vectorstore/_async/_utils.py +1 -1
  108. elasticsearch/helpers/vectorstore/_async/embedding_service.py +2 -2
  109. elasticsearch/helpers/vectorstore/_async/strategies.py +3 -3
  110. elasticsearch/helpers/vectorstore/_async/vectorstore.py +5 -5
  111. elasticsearch/helpers/vectorstore/_sync/_utils.py +1 -1
  112. elasticsearch/helpers/vectorstore/_sync/embedding_service.py +2 -2
  113. elasticsearch/helpers/vectorstore/_sync/strategies.py +3 -3
  114. elasticsearch/helpers/vectorstore/_sync/vectorstore.py +5 -5
  115. {elasticsearch-9.2.0.dist-info → elasticsearch-9.2.1.dist-info}/METADATA +1 -1
  116. elasticsearch-9.2.1.dist-info/RECORD +168 -0
  117. {elasticsearch-9.2.0.dist-info → elasticsearch-9.2.1.dist-info}/WHEEL +1 -1
  118. elasticsearch-9.2.0.dist-info/RECORD +0 -168
  119. {elasticsearch-9.2.0.dist-info → elasticsearch-9.2.1.dist-info}/licenses/LICENSE +0 -0
  120. {elasticsearch-9.2.0.dist-info → elasticsearch-9.2.1.dist-info}/licenses/NOTICE +0 -0
@@ -44,14 +44,16 @@ class InferenceClient(NamespacedClient):
44
44
  """
45
45
  .. raw:: html
46
46
 
47
- <p>Perform completion inference on the service</p>
47
+ <p>Perform completion inference on the service.</p>
48
48
 
49
49
 
50
50
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
51
51
 
52
52
  :param inference_id: The inference Id
53
53
  :param input: Inference input. Either a string or an array of strings.
54
- :param task_settings: Optional task settings
54
+ :param task_settings: Task settings for the individual inference request. These
55
+ settings are specific to the <task_type> you specified and override the task
56
+ settings specified when initializing the service.
55
57
  :param timeout: Specifies the amount of time to wait for the inference request
56
58
  to complete.
57
59
  """
@@ -116,15 +118,17 @@ class InferenceClient(NamespacedClient):
116
118
  """
117
119
  .. raw:: html
118
120
 
119
- <p>Delete an inference endpoint</p>
121
+ <p>Delete an inference endpoint.</p>
122
+ <p>This API requires the manage_inference cluster privilege (the built-in <code>inference_admin</code> role grants this privilege).</p>
120
123
 
121
124
 
122
125
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete>`_
123
126
 
124
127
  :param inference_id: The inference identifier.
125
128
  :param task_type: The task type
126
- :param dry_run: When true, the endpoint is not deleted and a list of ingest processors
127
- which reference this endpoint is returned.
129
+ :param dry_run: When true, checks the semantic_text fields and inference processors
130
+ that reference the endpoint and returns them in a list, but does not delete
131
+ the endpoint.
128
132
  :param force: When true, the inference endpoint is forcefully deleted even if
129
133
  it is still being used by ingest processors or semantic text fields.
130
134
  """
@@ -190,7 +194,8 @@ class InferenceClient(NamespacedClient):
190
194
  """
191
195
  .. raw:: html
192
196
 
193
- <p>Get an inference endpoint</p>
197
+ <p>Get an inference endpoint.</p>
198
+ <p>This API requires the <code>monitor_inference</code> cluster privilege (the built-in <code>inference_admin</code> and <code>inference_user</code> roles grant this privilege).</p>
194
199
 
195
200
 
196
201
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get>`_
@@ -544,7 +549,7 @@ class InferenceClient(NamespacedClient):
544
549
  self,
545
550
  *,
546
551
  task_type: t.Union[
547
- str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"]
552
+ str, t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"]
548
553
  ],
549
554
  alibabacloud_inference_id: str,
550
555
  service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None,
@@ -573,7 +578,9 @@ class InferenceClient(NamespacedClient):
573
578
  this case, `alibabacloud-ai-search`.
574
579
  :param service_settings: Settings used to install the inference model. These
575
580
  settings are specific to the `alibabacloud-ai-search` service.
576
- :param chunking_settings: The chunking configuration object.
581
+ :param chunking_settings: The chunking configuration object. Applies only to
582
+ the `sparse_embedding` or `text_embedding` task types. Not applicable to
583
+ the `rerank` or `completion` task types.
577
584
  :param task_settings: Settings to configure the inference task. These settings
578
585
  are specific to the task type you specified.
579
586
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -669,7 +676,8 @@ class InferenceClient(NamespacedClient):
669
676
  this case, `amazonbedrock`.
670
677
  :param service_settings: Settings used to install the inference model. These
671
678
  settings are specific to the `amazonbedrock` service.
672
- :param chunking_settings: The chunking configuration object.
679
+ :param chunking_settings: The chunking configuration object. Applies only to
680
+ the `text_embedding` task type. Not applicable to the `completion` task type.
673
681
  :param task_settings: Settings to configure the inference task. These settings
674
682
  are specific to the task type you specified.
675
683
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -771,7 +779,9 @@ class InferenceClient(NamespacedClient):
771
779
  :param service_settings: Settings used to install the inference model. These
772
780
  settings are specific to the `amazon_sagemaker` service and `service_settings.api`
773
781
  you specified.
774
- :param chunking_settings: The chunking configuration object.
782
+ :param chunking_settings: The chunking configuration object. Applies only to
783
+ the `sparse_embedding` or `text_embedding` task types. Not applicable to
784
+ the `rerank`, `completion`, or `chat_completion` task types.
775
785
  :param task_settings: Settings to configure the inference task. These settings
776
786
  are specific to the task type and `service_settings.api` you specified.
777
787
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -825,12 +835,7 @@ class InferenceClient(NamespacedClient):
825
835
  )
826
836
 
827
837
  @_rewrite_parameters(
828
- body_fields=(
829
- "service",
830
- "service_settings",
831
- "chunking_settings",
832
- "task_settings",
833
- ),
838
+ body_fields=("service", "service_settings", "task_settings"),
834
839
  )
835
840
  def put_anthropic(
836
841
  self,
@@ -839,7 +844,6 @@ class InferenceClient(NamespacedClient):
839
844
  anthropic_inference_id: str,
840
845
  service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None,
841
846
  service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
842
- chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
843
847
  error_trace: t.Optional[bool] = None,
844
848
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
845
849
  human: t.Optional[bool] = None,
@@ -863,8 +867,7 @@ class InferenceClient(NamespacedClient):
863
867
  :param service: The type of service supported for the specified task type. In
864
868
  this case, `anthropic`.
865
869
  :param service_settings: Settings used to install the inference model. These
866
- settings are specific to the `watsonxai` service.
867
- :param chunking_settings: The chunking configuration object.
870
+ settings are specific to the `anthropic` service.
868
871
  :param task_settings: Settings to configure the inference task. These settings
869
872
  are specific to the task type you specified.
870
873
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -902,8 +905,6 @@ class InferenceClient(NamespacedClient):
902
905
  __body["service"] = service
903
906
  if service_settings is not None:
904
907
  __body["service_settings"] = service_settings
905
- if chunking_settings is not None:
906
- __body["chunking_settings"] = chunking_settings
907
908
  if task_settings is not None:
908
909
  __body["task_settings"] = task_settings
909
910
  __headers = {"accept": "application/json", "content-type": "application/json"}
@@ -955,8 +956,10 @@ class InferenceClient(NamespacedClient):
955
956
  :param service: The type of service supported for the specified task type. In
956
957
  this case, `azureaistudio`.
957
958
  :param service_settings: Settings used to install the inference model. These
958
- settings are specific to the `openai` service.
959
- :param chunking_settings: The chunking configuration object.
959
+ settings are specific to the `azureaistudio` service.
960
+ :param chunking_settings: The chunking configuration object. Applies only to
961
+ the `text_embedding` task type. Not applicable to the `rerank` or `completion`
962
+ task types.
960
963
  :param task_settings: Settings to configure the inference task. These settings
961
964
  are specific to the task type you specified.
962
965
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1056,7 +1059,8 @@ class InferenceClient(NamespacedClient):
1056
1059
  this case, `azureopenai`.
1057
1060
  :param service_settings: Settings used to install the inference model. These
1058
1061
  settings are specific to the `azureopenai` service.
1059
- :param chunking_settings: The chunking configuration object.
1062
+ :param chunking_settings: The chunking configuration object. Applies only to
1063
+ the `text_embedding` task type. Not applicable to the `completion` task type.
1060
1064
  :param task_settings: Settings to configure the inference task. These settings
1061
1065
  are specific to the task type you specified.
1062
1066
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1148,7 +1152,9 @@ class InferenceClient(NamespacedClient):
1148
1152
  this case, `cohere`.
1149
1153
  :param service_settings: Settings used to install the inference model. These
1150
1154
  settings are specific to the `cohere` service.
1151
- :param chunking_settings: The chunking configuration object.
1155
+ :param chunking_settings: The chunking configuration object. Applies only to
1156
+ the `text_embedding` task type. Not applicable to the `rerank` or `completion`
1157
+ task type.
1152
1158
  :param task_settings: Settings to configure the inference task. These settings
1153
1159
  are specific to the task type you specified.
1154
1160
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1200,12 +1206,7 @@ class InferenceClient(NamespacedClient):
1200
1206
  )
1201
1207
 
1202
1208
  @_rewrite_parameters(
1203
- body_fields=(
1204
- "service",
1205
- "service_settings",
1206
- "chunking_settings",
1207
- "task_settings",
1208
- ),
1209
+ body_fields=("service", "service_settings", "task_settings"),
1209
1210
  )
1210
1211
  def put_contextualai(
1211
1212
  self,
@@ -1214,7 +1215,6 @@ class InferenceClient(NamespacedClient):
1214
1215
  contextualai_inference_id: str,
1215
1216
  service: t.Optional[t.Union[str, t.Literal["contextualai"]]] = None,
1216
1217
  service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1217
- chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1218
1218
  error_trace: t.Optional[bool] = None,
1219
1219
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1220
1220
  human: t.Optional[bool] = None,
@@ -1239,7 +1239,6 @@ class InferenceClient(NamespacedClient):
1239
1239
  this case, `contextualai`.
1240
1240
  :param service_settings: Settings used to install the inference model. These
1241
1241
  settings are specific to the `contextualai` service.
1242
- :param chunking_settings: The chunking configuration object.
1243
1242
  :param task_settings: Settings to configure the inference task. These settings
1244
1243
  are specific to the task type you specified.
1245
1244
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1277,8 +1276,6 @@ class InferenceClient(NamespacedClient):
1277
1276
  __body["service"] = service
1278
1277
  if service_settings is not None:
1279
1278
  __body["service_settings"] = service_settings
1280
- if chunking_settings is not None:
1281
- __body["chunking_settings"] = chunking_settings
1282
1279
  if task_settings is not None:
1283
1280
  __body["task_settings"] = task_settings
1284
1281
  __headers = {"accept": "application/json", "content-type": "application/json"}
@@ -1372,7 +1369,9 @@ class InferenceClient(NamespacedClient):
1372
1369
  this case, `custom`.
1373
1370
  :param service_settings: Settings used to install the inference model. These
1374
1371
  settings are specific to the `custom` service.
1375
- :param chunking_settings: The chunking configuration object.
1372
+ :param chunking_settings: The chunking configuration object. Applies only to
1373
+ the `sparse_embedding` or `text_embedding` task types. Not applicable to
1374
+ the `rerank` or `completion` task types.
1376
1375
  :param task_settings: Settings to configure the inference task. These settings
1377
1376
  are specific to the task type you specified.
1378
1377
  """
@@ -1420,7 +1419,7 @@ class InferenceClient(NamespacedClient):
1420
1419
  )
1421
1420
 
1422
1421
  @_rewrite_parameters(
1423
- body_fields=("service", "service_settings", "chunking_settings"),
1422
+ body_fields=("service", "service_settings"),
1424
1423
  )
1425
1424
  def put_deepseek(
1426
1425
  self,
@@ -1429,7 +1428,6 @@ class InferenceClient(NamespacedClient):
1429
1428
  deepseek_inference_id: str,
1430
1429
  service: t.Optional[t.Union[str, t.Literal["deepseek"]]] = None,
1431
1430
  service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1432
- chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1433
1431
  error_trace: t.Optional[bool] = None,
1434
1432
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1435
1433
  human: t.Optional[bool] = None,
@@ -1452,7 +1450,6 @@ class InferenceClient(NamespacedClient):
1452
1450
  this case, `deepseek`.
1453
1451
  :param service_settings: Settings used to install the inference model. These
1454
1452
  settings are specific to the `deepseek` service.
1455
- :param chunking_settings: The chunking configuration object.
1456
1453
  :param timeout: Specifies the amount of time to wait for the inference endpoint
1457
1454
  to be created.
1458
1455
  """
@@ -1486,8 +1483,6 @@ class InferenceClient(NamespacedClient):
1486
1483
  __body["service"] = service
1487
1484
  if service_settings is not None:
1488
1485
  __body["service_settings"] = service_settings
1489
- if chunking_settings is not None:
1490
- __body["chunking_settings"] = chunking_settings
1491
1486
  __headers = {"accept": "application/json", "content-type": "application/json"}
1492
1487
  return self.perform_request( # type: ignore[return-value]
1493
1488
  "PUT",
@@ -1554,7 +1549,9 @@ class InferenceClient(NamespacedClient):
1554
1549
  this case, `elasticsearch`.
1555
1550
  :param service_settings: Settings used to install the inference model. These
1556
1551
  settings are specific to the `elasticsearch` service.
1557
- :param chunking_settings: The chunking configuration object.
1552
+ :param chunking_settings: The chunking configuration object. Applies only to
1553
+ the `sparse_embedding` and `text_embedding` task types. Not applicable to
1554
+ the `rerank` task type.
1558
1555
  :param task_settings: Settings to configure the inference task. These settings
1559
1556
  are specific to the task type you specified.
1560
1557
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1735,7 +1732,8 @@ class InferenceClient(NamespacedClient):
1735
1732
  this case, `googleaistudio`.
1736
1733
  :param service_settings: Settings used to install the inference model. These
1737
1734
  settings are specific to the `googleaistudio` service.
1738
- :param chunking_settings: The chunking configuration object.
1735
+ :param chunking_settings: The chunking configuration object. Applies only to
1736
+ the `text_embedding` task type. Not applicable to the `completion` task type.
1739
1737
  :param timeout: Specifies the amount of time to wait for the inference endpoint
1740
1738
  to be created.
1741
1739
  """
@@ -1825,7 +1823,9 @@ class InferenceClient(NamespacedClient):
1825
1823
  this case, `googlevertexai`.
1826
1824
  :param service_settings: Settings used to install the inference model. These
1827
1825
  settings are specific to the `googlevertexai` service.
1828
- :param chunking_settings: The chunking configuration object.
1826
+ :param chunking_settings: The chunking configuration object. Applies only to
1827
+ the `text_embedding` task type. Not applicable to the `rerank`, `completion`,
1828
+ or `chat_completion` task types.
1829
1829
  :param task_settings: Settings to configure the inference task. These settings
1830
1830
  are specific to the task type you specified.
1831
1831
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1953,7 +1953,9 @@ class InferenceClient(NamespacedClient):
1953
1953
  this case, `hugging_face`.
1954
1954
  :param service_settings: Settings used to install the inference model. These
1955
1955
  settings are specific to the `hugging_face` service.
1956
- :param chunking_settings: The chunking configuration object.
1956
+ :param chunking_settings: The chunking configuration object. Applies only to
1957
+ the `text_embedding` task type. Not applicable to the `rerank`, `completion`,
1958
+ or `chat_completion` task types.
1957
1959
  :param task_settings: Settings to configure the inference task. These settings
1958
1960
  are specific to the task type you specified.
1959
1961
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2047,7 +2049,8 @@ class InferenceClient(NamespacedClient):
2047
2049
  this case, `jinaai`.
2048
2050
  :param service_settings: Settings used to install the inference model. These
2049
2051
  settings are specific to the `jinaai` service.
2050
- :param chunking_settings: The chunking configuration object.
2052
+ :param chunking_settings: The chunking configuration object. Applies only to
2053
+ the `text_embedding` task type. Not applicable to the `rerank` task type.
2051
2054
  :param task_settings: Settings to configure the inference task. These settings
2052
2055
  are specific to the task type you specified.
2053
2056
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2133,7 +2136,9 @@ class InferenceClient(NamespacedClient):
2133
2136
  this case, `llama`.
2134
2137
  :param service_settings: Settings used to install the inference model. These
2135
2138
  settings are specific to the `llama` service.
2136
- :param chunking_settings: The chunking configuration object.
2139
+ :param chunking_settings: The chunking configuration object. Applies only to
2140
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
2141
+ task types.
2137
2142
  :param timeout: Specifies the amount of time to wait for the inference endpoint
2138
2143
  to be created.
2139
2144
  """
@@ -2215,7 +2220,9 @@ class InferenceClient(NamespacedClient):
2215
2220
  this case, `mistral`.
2216
2221
  :param service_settings: Settings used to install the inference model. These
2217
2222
  settings are specific to the `mistral` service.
2218
- :param chunking_settings: The chunking configuration object.
2223
+ :param chunking_settings: The chunking configuration object. Applies only to
2224
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
2225
+ task types.
2219
2226
  :param timeout: Specifies the amount of time to wait for the inference endpoint
2220
2227
  to be created.
2221
2228
  """
@@ -2305,7 +2312,9 @@ class InferenceClient(NamespacedClient):
2305
2312
  this case, `openai`.
2306
2313
  :param service_settings: Settings used to install the inference model. These
2307
2314
  settings are specific to the `openai` service.
2308
- :param chunking_settings: The chunking configuration object.
2315
+ :param chunking_settings: The chunking configuration object. Applies only to
2316
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
2317
+ task types.
2309
2318
  :param task_settings: Settings to configure the inference task. These settings
2310
2319
  are specific to the task type you specified.
2311
2320
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2396,7 +2405,8 @@ class InferenceClient(NamespacedClient):
2396
2405
  this case, `voyageai`.
2397
2406
  :param service_settings: Settings used to install the inference model. These
2398
2407
  settings are specific to the `voyageai` service.
2399
- :param chunking_settings: The chunking configuration object.
2408
+ :param chunking_settings: The chunking configuration object. Applies only to
2409
+ the `text_embedding` task type. Not applicable to the `rerank` task type.
2400
2410
  :param task_settings: Settings to configure the inference task. These settings
2401
2411
  are specific to the task type you specified.
2402
2412
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2448,7 +2458,7 @@ class InferenceClient(NamespacedClient):
2448
2458
  )
2449
2459
 
2450
2460
  @_rewrite_parameters(
2451
- body_fields=("service", "service_settings"),
2461
+ body_fields=("service", "service_settings", "chunking_settings"),
2452
2462
  )
2453
2463
  def put_watsonx(
2454
2464
  self,
@@ -2459,6 +2469,7 @@ class InferenceClient(NamespacedClient):
2459
2469
  watsonx_inference_id: str,
2460
2470
  service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None,
2461
2471
  service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
2472
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
2462
2473
  error_trace: t.Optional[bool] = None,
2463
2474
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
2464
2475
  human: t.Optional[bool] = None,
@@ -2483,6 +2494,9 @@ class InferenceClient(NamespacedClient):
2483
2494
  this case, `watsonxai`.
2484
2495
  :param service_settings: Settings used to install the inference model. These
2485
2496
  settings are specific to the `watsonxai` service.
2497
+ :param chunking_settings: The chunking configuration object. Applies only to
2498
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
2499
+ task types.
2486
2500
  :param timeout: Specifies the amount of time to wait for the inference endpoint
2487
2501
  to be created.
2488
2502
  """
@@ -2516,6 +2530,8 @@ class InferenceClient(NamespacedClient):
2516
2530
  __body["service"] = service
2517
2531
  if service_settings is not None:
2518
2532
  __body["service_settings"] = service_settings
2533
+ if chunking_settings is not None:
2534
+ __body["chunking_settings"] = chunking_settings
2519
2535
  __headers = {"accept": "application/json", "content-type": "application/json"}
2520
2536
  return self.perform_request( # type: ignore[return-value]
2521
2537
  "PUT",
@@ -2547,7 +2563,7 @@ class InferenceClient(NamespacedClient):
2547
2563
  """
2548
2564
  .. raw:: html
2549
2565
 
2550
- <p>Perform reranking inference on the service</p>
2566
+ <p>Perform reranking inference on the service.</p>
2551
2567
 
2552
2568
 
2553
2569
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
@@ -2619,14 +2635,16 @@ class InferenceClient(NamespacedClient):
2619
2635
  """
2620
2636
  .. raw:: html
2621
2637
 
2622
- <p>Perform sparse embedding inference on the service</p>
2638
+ <p>Perform sparse embedding inference on the service.</p>
2623
2639
 
2624
2640
 
2625
2641
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
2626
2642
 
2627
2643
  :param inference_id: The inference Id
2628
2644
  :param input: Inference input. Either a string or an array of strings.
2629
- :param task_settings: Optional task settings
2645
+ :param task_settings: Task settings for the individual inference request. These
2646
+ settings are specific to the <task_type> you specified and override the task
2647
+ settings specified when initializing the service.
2630
2648
  :param timeout: Specifies the amount of time to wait for the inference request
2631
2649
  to complete.
2632
2650
  """
@@ -2684,7 +2702,7 @@ class InferenceClient(NamespacedClient):
2684
2702
  """
2685
2703
  .. raw:: html
2686
2704
 
2687
- <p>Perform text embedding inference on the service</p>
2705
+ <p>Perform text embedding inference on the service.</p>
2688
2706
 
2689
2707
 
2690
2708
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
@@ -2698,7 +2716,9 @@ class InferenceClient(NamespacedClient):
2698
2716
  to the relevant service-specific documentation for more info. > info > The
2699
2717
  `input_type` parameter specified on the root level of the request body will
2700
2718
  take precedence over the `input_type` parameter specified in `task_settings`.
2701
- :param task_settings: Optional task settings
2719
+ :param task_settings: Task settings for the individual inference request. These
2720
+ settings are specific to the <task_type> you specified and override the task
2721
+ settings specified when initializing the service.
2702
2722
  :param timeout: Specifies the amount of time to wait for the inference request
2703
2723
  to complete.
2704
2724
  """
@@ -151,8 +151,8 @@ class IngestClient(NamespacedClient):
151
151
  """
152
152
  .. raw:: html
153
153
 
154
- <p>Delete pipelines.
155
- Delete one or more ingest pipelines.</p>
154
+ <p>Delete pipelines.</p>
155
+ <p>Delete one or more ingest pipelines.</p>
156
156
 
157
157
 
158
158
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline>`_
@@ -204,8 +204,8 @@ class IngestClient(NamespacedClient):
204
204
  """
205
205
  .. raw:: html
206
206
 
207
- <p>Get GeoIP statistics.
208
- Get download statistics for GeoIP2 databases that are used with the GeoIP processor.</p>
207
+ <p>Get GeoIP statistics.</p>
208
+ <p>Get download statistics for GeoIP2 databases that are used with the GeoIP processor.</p>
209
209
 
210
210
 
211
211
  `<https://www.elastic.co/docs/reference/enrich-processor/geoip-processor>`_
@@ -355,7 +355,7 @@ class IngestClient(NamespacedClient):
355
355
  :param master_timeout: Period to wait for a connection to the master node. If
356
356
  no response is received before the timeout expires, the request fails and
357
357
  returns an error.
358
- :param summary: Return pipelines without their definitions (default: false)
358
+ :param summary: Return pipelines without their definitions
359
359
  """
360
360
  __path_parts: t.Dict[str, str]
361
361
  if id not in SKIP_IN_PATH:
@@ -399,8 +399,8 @@ class IngestClient(NamespacedClient):
399
399
  """
400
400
  .. raw:: html
401
401
 
402
- <p>Run a grok processor.
403
- Extract structured fields out of a single text field within a document.
402
+ <p>Run a grok processor.</p>
403
+ <p>Extract structured fields out of a single text field within a document.
404
404
  You must choose which field to extract matched fields from, as well as the grok pattern you expect will match.
405
405
  A grok pattern is like a regular expression that supports aliased expressions that can be reused.</p>
406
406
 
@@ -613,8 +613,8 @@ class IngestClient(NamespacedClient):
613
613
  """
614
614
  .. raw:: html
615
615
 
616
- <p>Create or update a pipeline.
617
- Changes made using this API take effect immediately.</p>
616
+ <p>Create or update a pipeline.</p>
617
+ <p>Changes made using this API take effect immediately.</p>
618
618
 
619
619
 
620
620
  `<https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines>`_
@@ -312,8 +312,7 @@ class LicenseClient(NamespacedClient):
312
312
 
313
313
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic>`_
314
314
 
315
- :param acknowledge: whether the user has acknowledged acknowledge messages (default:
316
- false)
315
+ :param acknowledge: Whether the user has acknowledged acknowledge messages
317
316
  :param master_timeout: Period to wait for a connection to the master node.
318
317
  :param timeout: Period to wait for a response. If no response is received before
319
318
  the timeout expires, the request fails and returns an error.
@@ -360,8 +359,8 @@ class LicenseClient(NamespacedClient):
360
359
  """
361
360
  .. raw:: html
362
361
 
363
- <p>Start a trial.
364
- Start a 30-day trial, which gives access to all subscription features.</p>
362
+ <p>Start a trial.</p>
363
+ <p>Start a 30-day trial, which gives access to all subscription features.</p>
365
364
  <p>NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version.
366
365
  For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at <a href="https://www.elastic.co/trialextension">https://www.elastic.co/trialextension</a>.</p>
367
366
  <p>To check the status of your trial, use the get trial status API.</p>
@@ -369,10 +368,9 @@ class LicenseClient(NamespacedClient):
369
368
 
370
369
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial>`_
371
370
 
372
- :param acknowledge: whether the user has acknowledged acknowledge messages (default:
373
- false)
371
+ :param acknowledge: Whether the user has acknowledged acknowledge messages
374
372
  :param master_timeout: Period to wait for a connection to the master node.
375
- :param type: The type of trial license to generate (default: "trial")
373
+ :param type: The type of trial license to generate
376
374
  """
377
375
  __path_parts: t.Dict[str, str] = {}
378
376
  __path = "/_license/start_trial"
@@ -38,8 +38,8 @@ class LogstashClient(NamespacedClient):
38
38
  """
39
39
  .. raw:: html
40
40
 
41
- <p>Delete a Logstash pipeline.
42
- Delete a pipeline that is used for Logstash Central Management.
41
+ <p>Delete a Logstash pipeline.</p>
42
+ <p>Delete a pipeline that is used for Logstash Central Management.
43
43
  If the request succeeds, you receive an empty response with an appropriate status code.</p>
44
44
 
45
45
 
@@ -83,8 +83,8 @@ class LogstashClient(NamespacedClient):
83
83
  """
84
84
  .. raw:: html
85
85
 
86
- <p>Get Logstash pipelines.
87
- Get pipelines that are used for Logstash Central Management.</p>
86
+ <p>Get Logstash pipelines.</p>
87
+ <p>Get pipelines that are used for Logstash Central Management.</p>
88
88
 
89
89
 
90
90
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline>`_
@@ -38,8 +38,8 @@ class MigrationClient(NamespacedClient):
38
38
  """
39
39
  .. raw:: html
40
40
 
41
- <p>Get deprecation information.
42
- Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.</p>
41
+ <p>Get deprecation information.</p>
42
+ <p>Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.</p>
43
43
  <p>TIP: This APIs is designed for indirect use by the Upgrade Assistant.
44
44
  You are strongly recommended to use the Upgrade Assistant.</p>
45
45
 
@@ -87,8 +87,8 @@ class MigrationClient(NamespacedClient):
87
87
  """
88
88
  .. raw:: html
89
89
 
90
- <p>Get feature migration information.
91
- Version upgrades sometimes require changes to how features store configuration information and data in system indices.
90
+ <p>Get feature migration information.</p>
91
+ <p>Version upgrades sometimes require changes to how features store configuration information and data in system indices.
92
92
  Check which features need to be migrated and the status of any migrations that are in progress.</p>
93
93
  <p>TIP: This API is designed for indirect use by the Upgrade Assistant.
94
94
  You are strongly recommended to use the Upgrade Assistant.</p>
@@ -129,8 +129,8 @@ class MigrationClient(NamespacedClient):
129
129
  """
130
130
  .. raw:: html
131
131
 
132
- <p>Start the feature migration.
133
- Version upgrades sometimes require changes to how features store configuration information and data in system indices.
132
+ <p>Start the feature migration.</p>
133
+ <p>Version upgrades sometimes require changes to how features store configuration information and data in system indices.
134
134
  This API starts the automatic migration process.</p>
135
135
  <p>Some functionality might be temporarily unavailable during the migration process.</p>
136
136
  <p>TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.</p>