elasticsearch 9.2.1__py3-none-any.whl → 9.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. elasticsearch/_async/client/__init__.py +44 -40
  2. elasticsearch/_async/client/async_search.py +4 -3
  3. elasticsearch/_async/client/cat.py +163 -8
  4. elasticsearch/_async/client/cluster.py +66 -34
  5. elasticsearch/_async/client/eql.py +7 -6
  6. elasticsearch/_async/client/esql.py +157 -8
  7. elasticsearch/_async/client/fleet.py +1 -1
  8. elasticsearch/_async/client/graph.py +1 -1
  9. elasticsearch/_async/client/indices.py +436 -17
  10. elasticsearch/_async/client/inference.py +299 -9
  11. elasticsearch/_async/client/ml.py +7 -3
  12. elasticsearch/_async/client/nodes.py +167 -5
  13. elasticsearch/_async/client/project.py +9 -1
  14. elasticsearch/_async/client/security.py +26 -3
  15. elasticsearch/_async/client/snapshot.py +1 -1
  16. elasticsearch/_async/client/sql.py +7 -6
  17. elasticsearch/_async/client/streams.py +0 -1
  18. elasticsearch/_async/client/text_structure.py +3 -3
  19. elasticsearch/_sync/client/__init__.py +44 -40
  20. elasticsearch/_sync/client/async_search.py +4 -3
  21. elasticsearch/_sync/client/cat.py +163 -8
  22. elasticsearch/_sync/client/cluster.py +66 -34
  23. elasticsearch/_sync/client/eql.py +7 -6
  24. elasticsearch/_sync/client/esql.py +157 -8
  25. elasticsearch/_sync/client/fleet.py +1 -1
  26. elasticsearch/_sync/client/graph.py +1 -1
  27. elasticsearch/_sync/client/indices.py +436 -17
  28. elasticsearch/_sync/client/inference.py +299 -9
  29. elasticsearch/_sync/client/ml.py +7 -3
  30. elasticsearch/_sync/client/nodes.py +167 -5
  31. elasticsearch/_sync/client/project.py +9 -1
  32. elasticsearch/_sync/client/project_routing.py +264 -0
  33. elasticsearch/_sync/client/security.py +26 -3
  34. elasticsearch/_sync/client/snapshot.py +1 -1
  35. elasticsearch/_sync/client/sql.py +7 -6
  36. elasticsearch/_sync/client/streams.py +0 -1
  37. elasticsearch/_sync/client/text_structure.py +3 -3
  38. elasticsearch/_version.py +2 -2
  39. elasticsearch/dsl/__init__.py +4 -0
  40. elasticsearch/dsl/aggs.py +6 -6
  41. elasticsearch/dsl/field.py +91 -7
  42. elasticsearch/dsl/query.py +2 -2
  43. elasticsearch/dsl/response/__init__.py +2 -0
  44. elasticsearch/dsl/types.py +66 -7
  45. elasticsearch/dsl/utils.py +11 -2
  46. elasticsearch/esql/functions.py +924 -250
  47. elasticsearch/helpers/__init__.py +2 -0
  48. elasticsearch/helpers/actions.py +21 -0
  49. elasticsearch/helpers/vectorstore/_async/vectorstore.py +3 -0
  50. elasticsearch/helpers/vectorstore/_sync/vectorstore.py +3 -0
  51. {elasticsearch-9.2.1.dist-info → elasticsearch-9.3.0.dist-info}/METADATA +2 -1
  52. {elasticsearch-9.2.1.dist-info → elasticsearch-9.3.0.dist-info}/RECORD +55 -54
  53. {elasticsearch-9.2.1.dist-info → elasticsearch-9.3.0.dist-info}/WHEEL +0 -0
  54. {elasticsearch-9.2.1.dist-info → elasticsearch-9.3.0.dist-info}/licenses/LICENSE +0 -0
  55. {elasticsearch-9.2.1.dist-info → elasticsearch-9.3.0.dist-info}/licenses/NOTICE +0 -0
@@ -45,6 +45,10 @@ class InferenceClient(NamespacedClient):
45
45
  .. raw:: html
46
46
 
47
47
  <p>Perform completion inference on the service.</p>
48
+ <p>Get responses for completion tasks.
49
+ This API works only with the completion task type.</p>
50
+ <p>IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.</p>
51
+ <p>This API requires the <code>monitor_inference</code> cluster privilege (the built-in <code>inference_admin</code> and <code>inference_user</code> roles grant this privilege).</p>
48
52
 
49
53
 
50
54
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
@@ -391,19 +395,22 @@ class InferenceClient(NamespacedClient):
391
395
  <li>Amazon Bedrock (<code>completion</code>, <code>text_embedding</code>)</li>
392
396
  <li>Amazon SageMaker (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code>)</li>
393
397
  <li>Anthropic (<code>completion</code>)</li>
394
- <li>Azure AI Studio (<code>completion</code>, 'rerank', <code>text_embedding</code>)</li>
395
- <li>Azure OpenAI (<code>completion</code>, <code>text_embedding</code>)</li>
398
+ <li>Azure AI Studio (<code>completion</code>, <code>rerank</code>, <code>text_embedding</code>)</li>
399
+ <li>Azure OpenAI (<code>chat_completion</code>, <code>completion</code>, <code>text_embedding</code>)</li>
396
400
  <li>Cohere (<code>completion</code>, <code>rerank</code>, <code>text_embedding</code>)</li>
397
401
  <li>DeepSeek (<code>chat_completion</code>, <code>completion</code>)</li>
398
402
  <li>Elasticsearch (<code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code> - this service is for built-in models and models uploaded through Eland)</li>
399
403
  <li>ELSER (<code>sparse_embedding</code>)</li>
400
404
  <li>Google AI Studio (<code>completion</code>, <code>text_embedding</code>)</li>
401
405
  <li>Google Vertex AI (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>text_embedding</code>)</li>
406
+ <li>Groq (<code>chat_completion</code>)</li>
402
407
  <li>Hugging Face (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>text_embedding</code>)</li>
403
408
  <li>JinaAI (<code>rerank</code>, <code>text_embedding</code>)</li>
404
409
  <li>Llama (<code>chat_completion</code>, <code>completion</code>, <code>text_embedding</code>)</li>
405
410
  <li>Mistral (<code>chat_completion</code>, <code>completion</code>, <code>text_embedding</code>)</li>
411
+ <li>Nvidia (<code>chat_completion</code>, <code>completion</code>, <code>text_embedding</code>, <code>rerank</code>)</li>
406
412
  <li>OpenAI (<code>chat_completion</code>, <code>completion</code>, <code>text_embedding</code>)</li>
413
+ <li>OpenShift AI (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>text_embedding</code>)</li>
407
414
  <li>VoyageAI (<code>rerank</code>, <code>text_embedding</code>)</li>
408
415
  <li>Watsonx inference integration (<code>text_embedding</code>)</li>
409
416
  </ul>
@@ -1023,7 +1030,9 @@ class InferenceClient(NamespacedClient):
1023
1030
  async def put_azureopenai(
1024
1031
  self,
1025
1032
  *,
1026
- task_type: t.Union[str, t.Literal["completion", "text_embedding"]],
1033
+ task_type: t.Union[
1034
+ str, t.Literal["chat_completion", "completion", "text_embedding"]
1035
+ ],
1027
1036
  azureopenai_inference_id: str,
1028
1037
  service: t.Optional[t.Union[str, t.Literal["azureopenai"]]] = None,
1029
1038
  service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -1060,7 +1069,8 @@ class InferenceClient(NamespacedClient):
1060
1069
  :param service_settings: Settings used to install the inference model. These
1061
1070
  settings are specific to the `azureopenai` service.
1062
1071
  :param chunking_settings: The chunking configuration object. Applies only to
1063
- the `text_embedding` task type. Not applicable to the `completion` task type.
1072
+ the `text_embedding` task type. Not applicable to the `completion` and `chat_completion`
1073
+ task types.
1064
1074
  :param task_settings: Settings to configure the inference task. These settings
1065
1075
  are specific to the task type you specified.
1066
1076
  :param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1878,6 +1888,82 @@ class InferenceClient(NamespacedClient):
1878
1888
  path_parts=__path_parts,
1879
1889
  )
1880
1890
 
1891
+ @_rewrite_parameters(
1892
+ body_fields=("service", "service_settings"),
1893
+ )
1894
+ async def put_groq(
1895
+ self,
1896
+ *,
1897
+ task_type: t.Union[str, t.Literal["chat_completion"]],
1898
+ groq_inference_id: str,
1899
+ service: t.Optional[t.Union[str, t.Literal["groq"]]] = None,
1900
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1901
+ error_trace: t.Optional[bool] = None,
1902
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1903
+ human: t.Optional[bool] = None,
1904
+ pretty: t.Optional[bool] = None,
1905
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
1906
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1907
+ ) -> ObjectApiResponse[t.Any]:
1908
+ """
1909
+ .. raw:: html
1910
+
1911
+ <p>Create a Groq inference endpoint.</p>
1912
+ <p>Create an inference endpoint to perform an inference task with the <code>groq</code> service.</p>
1913
+
1914
+
1915
+ `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-groq>`_
1916
+
1917
+ :param task_type: The type of the inference task that the model will perform.
1918
+ :param groq_inference_id: The unique identifier of the inference endpoint.
1919
+ :param service: The type of service supported for the specified task type. In
1920
+ this case, `groq`.
1921
+ :param service_settings: Settings used to install the inference model. These
1922
+ settings are specific to the `groq` service.
1923
+ :param timeout: Specifies the amount of time to wait for the inference endpoint
1924
+ to be created.
1925
+ """
1926
+ if task_type in SKIP_IN_PATH:
1927
+ raise ValueError("Empty value passed for parameter 'task_type'")
1928
+ if groq_inference_id in SKIP_IN_PATH:
1929
+ raise ValueError("Empty value passed for parameter 'groq_inference_id'")
1930
+ if service is None and body is None:
1931
+ raise ValueError("Empty value passed for parameter 'service'")
1932
+ if service_settings is None and body is None:
1933
+ raise ValueError("Empty value passed for parameter 'service_settings'")
1934
+ __path_parts: t.Dict[str, str] = {
1935
+ "task_type": _quote(task_type),
1936
+ "groq_inference_id": _quote(groq_inference_id),
1937
+ }
1938
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["groq_inference_id"]}'
1939
+ __query: t.Dict[str, t.Any] = {}
1940
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1941
+ if error_trace is not None:
1942
+ __query["error_trace"] = error_trace
1943
+ if filter_path is not None:
1944
+ __query["filter_path"] = filter_path
1945
+ if human is not None:
1946
+ __query["human"] = human
1947
+ if pretty is not None:
1948
+ __query["pretty"] = pretty
1949
+ if timeout is not None:
1950
+ __query["timeout"] = timeout
1951
+ if not __body:
1952
+ if service is not None:
1953
+ __body["service"] = service
1954
+ if service_settings is not None:
1955
+ __body["service_settings"] = service_settings
1956
+ __headers = {"accept": "application/json", "content-type": "application/json"}
1957
+ return await self.perform_request( # type: ignore[return-value]
1958
+ "PUT",
1959
+ __path,
1960
+ params=__query,
1961
+ headers=__headers,
1962
+ body=__body,
1963
+ endpoint_id="inference.put_groq",
1964
+ path_parts=__path_parts,
1965
+ )
1966
+
1881
1967
  @_rewrite_parameters(
1882
1968
  body_fields=(
1883
1969
  "service",
@@ -2269,6 +2355,104 @@ class InferenceClient(NamespacedClient):
2269
2355
  path_parts=__path_parts,
2270
2356
  )
2271
2357
 
2358
+ @_rewrite_parameters(
2359
+ body_fields=(
2360
+ "service",
2361
+ "service_settings",
2362
+ "chunking_settings",
2363
+ "task_settings",
2364
+ ),
2365
+ )
2366
+ async def put_nvidia(
2367
+ self,
2368
+ *,
2369
+ task_type: t.Union[
2370
+ str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"]
2371
+ ],
2372
+ nvidia_inference_id: str,
2373
+ service: t.Optional[t.Union[str, t.Literal["nvidia"]]] = None,
2374
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
2375
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
2376
+ error_trace: t.Optional[bool] = None,
2377
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
2378
+ human: t.Optional[bool] = None,
2379
+ pretty: t.Optional[bool] = None,
2380
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
2381
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
2382
+ body: t.Optional[t.Dict[str, t.Any]] = None,
2383
+ ) -> ObjectApiResponse[t.Any]:
2384
+ """
2385
+ .. raw:: html
2386
+
2387
+ <p>Create an Nvidia inference endpoint.</p>
2388
+ <p>Create an inference endpoint to perform an inference task with the <code>nvidia</code> service.</p>
2389
+
2390
+
2391
+ `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-nvidia>`_
2392
+
2393
+ :param task_type: The type of the inference task that the model will perform.
2394
+ NOTE: The `chat_completion` task type only supports streaming and only through
2395
+ the _stream API.
2396
+ :param nvidia_inference_id: The unique identifier of the inference endpoint.
2397
+ :param service: The type of service supported for the specified task type. In
2398
+ this case, `nvidia`.
2399
+ :param service_settings: Settings used to install the inference model. These
2400
+ settings are specific to the `nvidia` service.
2401
+ :param chunking_settings: The chunking configuration object. Applies only to
2402
+ the `text_embedding` task type. Not applicable to the `rerank`, `completion`,
2403
+ or `chat_completion` task types.
2404
+ :param task_settings: Settings to configure the inference task. Applies only
2405
+ to the `text_embedding` task type. Not applicable to the `rerank`, `completion`,
2406
+ or `chat_completion` task types. These settings are specific to the task
2407
+ type you specified.
2408
+ :param timeout: Specifies the amount of time to wait for the inference endpoint
2409
+ to be created.
2410
+ """
2411
+ if task_type in SKIP_IN_PATH:
2412
+ raise ValueError("Empty value passed for parameter 'task_type'")
2413
+ if nvidia_inference_id in SKIP_IN_PATH:
2414
+ raise ValueError("Empty value passed for parameter 'nvidia_inference_id'")
2415
+ if service is None and body is None:
2416
+ raise ValueError("Empty value passed for parameter 'service'")
2417
+ if service_settings is None and body is None:
2418
+ raise ValueError("Empty value passed for parameter 'service_settings'")
2419
+ __path_parts: t.Dict[str, str] = {
2420
+ "task_type": _quote(task_type),
2421
+ "nvidia_inference_id": _quote(nvidia_inference_id),
2422
+ }
2423
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["nvidia_inference_id"]}'
2424
+ __query: t.Dict[str, t.Any] = {}
2425
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
2426
+ if error_trace is not None:
2427
+ __query["error_trace"] = error_trace
2428
+ if filter_path is not None:
2429
+ __query["filter_path"] = filter_path
2430
+ if human is not None:
2431
+ __query["human"] = human
2432
+ if pretty is not None:
2433
+ __query["pretty"] = pretty
2434
+ if timeout is not None:
2435
+ __query["timeout"] = timeout
2436
+ if not __body:
2437
+ if service is not None:
2438
+ __body["service"] = service
2439
+ if service_settings is not None:
2440
+ __body["service_settings"] = service_settings
2441
+ if chunking_settings is not None:
2442
+ __body["chunking_settings"] = chunking_settings
2443
+ if task_settings is not None:
2444
+ __body["task_settings"] = task_settings
2445
+ __headers = {"accept": "application/json", "content-type": "application/json"}
2446
+ return await self.perform_request( # type: ignore[return-value]
2447
+ "PUT",
2448
+ __path,
2449
+ params=__query,
2450
+ headers=__headers,
2451
+ body=__body,
2452
+ endpoint_id="inference.put_nvidia",
2453
+ path_parts=__path_parts,
2454
+ )
2455
+
2272
2456
  @_rewrite_parameters(
2273
2457
  body_fields=(
2274
2458
  "service",
@@ -2365,6 +2549,106 @@ class InferenceClient(NamespacedClient):
2365
2549
  path_parts=__path_parts,
2366
2550
  )
2367
2551
 
2552
+ @_rewrite_parameters(
2553
+ body_fields=(
2554
+ "service",
2555
+ "service_settings",
2556
+ "chunking_settings",
2557
+ "task_settings",
2558
+ ),
2559
+ )
2560
+ async def put_openshift_ai(
2561
+ self,
2562
+ *,
2563
+ task_type: t.Union[
2564
+ str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"]
2565
+ ],
2566
+ openshiftai_inference_id: str,
2567
+ service: t.Optional[t.Union[str, t.Literal["openshift_ai"]]] = None,
2568
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
2569
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
2570
+ error_trace: t.Optional[bool] = None,
2571
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
2572
+ human: t.Optional[bool] = None,
2573
+ pretty: t.Optional[bool] = None,
2574
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
2575
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
2576
+ body: t.Optional[t.Dict[str, t.Any]] = None,
2577
+ ) -> ObjectApiResponse[t.Any]:
2578
+ """
2579
+ .. raw:: html
2580
+
2581
+ <p>Create an OpenShift AI inference endpoint.</p>
2582
+ <p>Create an inference endpoint to perform an inference task with the <code>openshift_ai</code> service.</p>
2583
+
2584
+
2585
+ `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openshift-ai>`_
2586
+
2587
+ :param task_type: The type of the inference task that the model will perform.
2588
+ NOTE: The `chat_completion` task type only supports streaming and only through
2589
+ the _stream API.
2590
+ :param openshiftai_inference_id: The unique identifier of the inference endpoint.
2591
+ :param service: The type of service supported for the specified task type. In
2592
+ this case, `openshift_ai`.
2593
+ :param service_settings: Settings used to install the inference model. These
2594
+ settings are specific to the `openshift_ai` service.
2595
+ :param chunking_settings: The chunking configuration object. Applies only to
2596
+ the `text_embedding` task type. Not applicable to the `rerank`, `completion`,
2597
+ or `chat_completion` task types.
2598
+ :param task_settings: Settings to configure the inference task. Applies only
2599
+ to the `rerank` task type. Not applicable to the `text_embedding`, `completion`,
2600
+ or `chat_completion` task types. These settings are specific to the task
2601
+ type you specified.
2602
+ :param timeout: Specifies the amount of time to wait for the inference endpoint
2603
+ to be created.
2604
+ """
2605
+ if task_type in SKIP_IN_PATH:
2606
+ raise ValueError("Empty value passed for parameter 'task_type'")
2607
+ if openshiftai_inference_id in SKIP_IN_PATH:
2608
+ raise ValueError(
2609
+ "Empty value passed for parameter 'openshiftai_inference_id'"
2610
+ )
2611
+ if service is None and body is None:
2612
+ raise ValueError("Empty value passed for parameter 'service'")
2613
+ if service_settings is None and body is None:
2614
+ raise ValueError("Empty value passed for parameter 'service_settings'")
2615
+ __path_parts: t.Dict[str, str] = {
2616
+ "task_type": _quote(task_type),
2617
+ "openshiftai_inference_id": _quote(openshiftai_inference_id),
2618
+ }
2619
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openshiftai_inference_id"]}'
2620
+ __query: t.Dict[str, t.Any] = {}
2621
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
2622
+ if error_trace is not None:
2623
+ __query["error_trace"] = error_trace
2624
+ if filter_path is not None:
2625
+ __query["filter_path"] = filter_path
2626
+ if human is not None:
2627
+ __query["human"] = human
2628
+ if pretty is not None:
2629
+ __query["pretty"] = pretty
2630
+ if timeout is not None:
2631
+ __query["timeout"] = timeout
2632
+ if not __body:
2633
+ if service is not None:
2634
+ __body["service"] = service
2635
+ if service_settings is not None:
2636
+ __body["service_settings"] = service_settings
2637
+ if chunking_settings is not None:
2638
+ __body["chunking_settings"] = chunking_settings
2639
+ if task_settings is not None:
2640
+ __body["task_settings"] = task_settings
2641
+ __headers = {"accept": "application/json", "content-type": "application/json"}
2642
+ return await self.perform_request( # type: ignore[return-value]
2643
+ "PUT",
2644
+ __path,
2645
+ params=__query,
2646
+ headers=__headers,
2647
+ body=__body,
2648
+ endpoint_id="inference.put_openshift_ai",
2649
+ path_parts=__path_parts,
2650
+ )
2651
+
2368
2652
  @_rewrite_parameters(
2369
2653
  body_fields=(
2370
2654
  "service",
@@ -2544,20 +2828,22 @@ class InferenceClient(NamespacedClient):
2544
2828
  )
2545
2829
 
2546
2830
  @_rewrite_parameters(
2547
- body_fields=("input", "query", "task_settings"),
2831
+ body_fields=("input", "query", "return_documents", "task_settings", "top_n"),
2548
2832
  )
2549
2833
  async def rerank(
2550
2834
  self,
2551
2835
  *,
2552
2836
  inference_id: str,
2553
- input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
2837
+ input: t.Optional[t.Sequence[str]] = None,
2554
2838
  query: t.Optional[str] = None,
2555
2839
  error_trace: t.Optional[bool] = None,
2556
2840
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
2557
2841
  human: t.Optional[bool] = None,
2558
2842
  pretty: t.Optional[bool] = None,
2843
+ return_documents: t.Optional[bool] = None,
2559
2844
  task_settings: t.Optional[t.Any] = None,
2560
2845
  timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
2846
+ top_n: t.Optional[int] = None,
2561
2847
  body: t.Optional[t.Dict[str, t.Any]] = None,
2562
2848
  ) -> ObjectApiResponse[t.Any]:
2563
2849
  """
@@ -2569,14 +2855,14 @@ class InferenceClient(NamespacedClient):
2569
2855
  `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
2570
2856
 
2571
2857
  :param inference_id: The unique identifier for the inference endpoint.
2572
- :param input: The text on which you want to perform the inference task. It can
2573
- be a single string or an array. > info > Inference endpoints for the `completion`
2574
- task type currently only support a single string as input.
2858
+ :param input: The documents to rank.
2575
2859
  :param query: Query input.
2860
+ :param return_documents: Include the document text in the response.
2576
2861
  :param task_settings: Task settings for the individual inference request. These
2577
2862
  settings are specific to the task type you specified and override the task
2578
2863
  settings specified when initializing the service.
2579
2864
  :param timeout: The amount of time to wait for the inference request to complete.
2865
+ :param top_n: Limit the response to the top N documents.
2580
2866
  """
2581
2867
  if inference_id in SKIP_IN_PATH:
2582
2868
  raise ValueError("Empty value passed for parameter 'inference_id'")
@@ -2603,8 +2889,12 @@ class InferenceClient(NamespacedClient):
2603
2889
  __body["input"] = input
2604
2890
  if query is not None:
2605
2891
  __body["query"] = query
2892
+ if return_documents is not None:
2893
+ __body["return_documents"] = return_documents
2606
2894
  if task_settings is not None:
2607
2895
  __body["task_settings"] = task_settings
2896
+ if top_n is not None:
2897
+ __body["top_n"] = top_n
2608
2898
  __headers = {"accept": "application/json", "content-type": "application/json"}
2609
2899
  return await self.perform_request( # type: ignore[return-value]
2610
2900
  "POST",
@@ -1506,7 +1506,7 @@ class MlClient(NamespacedClient):
1506
1506
  self,
1507
1507
  *,
1508
1508
  job_id: str,
1509
- category_id: t.Optional[str] = None,
1509
+ category_id: t.Optional[int] = None,
1510
1510
  error_trace: t.Optional[bool] = None,
1511
1511
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1512
1512
  from_: t.Optional[int] = None,
@@ -4861,13 +4861,14 @@ class MlClient(NamespacedClient):
4861
4861
  )
4862
4862
 
4863
4863
  @_rewrite_parameters(
4864
- body_fields=("allow_no_match", "force", "timeout"),
4864
+ body_fields=("allow_no_match", "close_job", "force", "timeout"),
4865
4865
  )
4866
4866
  async def stop_datafeed(
4867
4867
  self,
4868
4868
  *,
4869
4869
  datafeed_id: str,
4870
4870
  allow_no_match: t.Optional[bool] = None,
4871
+ close_job: t.Optional[bool] = None,
4871
4872
  error_trace: t.Optional[bool] = None,
4872
4873
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
4873
4874
  force: t.Optional[bool] = None,
@@ -4892,6 +4893,7 @@ class MlClient(NamespacedClient):
4892
4893
  `*` as the identifier.
4893
4894
  :param allow_no_match: Refer to the description for the `allow_no_match` query
4894
4895
  parameter.
4896
+ :param close_job: Refer to the description for the `close_job` query parameter.
4895
4897
  :param force: Refer to the description for the `force` query parameter.
4896
4898
  :param timeout: Refer to the description for the `timeout` query parameter.
4897
4899
  """
@@ -4912,6 +4914,8 @@ class MlClient(NamespacedClient):
4912
4914
  if not __body:
4913
4915
  if allow_no_match is not None:
4914
4916
  __body["allow_no_match"] = allow_no_match
4917
+ if close_job is not None:
4918
+ __body["close_job"] = close_job
4915
4919
  if force is not None:
4916
4920
  __body["force"] = force
4917
4921
  if timeout is not None:
@@ -5755,7 +5759,7 @@ class MlClient(NamespacedClient):
5755
5759
  <p>Validate an anomaly detection job.</p>
5756
5760
 
5757
5761
 
5758
- `<https://www.elastic.co/guide/en/machine-learning/9.2/ml-jobs.html>`_
5762
+ `<https://www.elastic.co/guide/en/machine-learning/9.3/ml-jobs.html>`_
5759
5763
 
5760
5764
  :param analysis_config:
5761
5765
  :param analysis_limits:
@@ -220,7 +220,50 @@ class NodesClient(NamespacedClient):
220
220
  self,
221
221
  *,
222
222
  node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None,
223
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
223
+ metric: t.Optional[
224
+ t.Union[
225
+ t.Sequence[
226
+ t.Union[
227
+ str,
228
+ t.Literal[
229
+ "_all",
230
+ "_none",
231
+ "aggregations",
232
+ "http",
233
+ "indices",
234
+ "ingest",
235
+ "jvm",
236
+ "os",
237
+ "plugins",
238
+ "process",
239
+ "remote_cluster_server",
240
+ "settings",
241
+ "thread_pool",
242
+ "transport",
243
+ ],
244
+ ]
245
+ ],
246
+ t.Union[
247
+ str,
248
+ t.Literal[
249
+ "_all",
250
+ "_none",
251
+ "aggregations",
252
+ "http",
253
+ "indices",
254
+ "ingest",
255
+ "jvm",
256
+ "os",
257
+ "plugins",
258
+ "process",
259
+ "remote_cluster_server",
260
+ "settings",
261
+ "thread_pool",
262
+ "transport",
263
+ ],
264
+ ],
265
+ ]
266
+ ] = None,
224
267
  error_trace: t.Optional[bool] = None,
225
268
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
226
269
  flat_settings: t.Optional[bool] = None,
@@ -357,8 +400,120 @@ class NodesClient(NamespacedClient):
357
400
  self,
358
401
  *,
359
402
  node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None,
360
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
361
- index_metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
403
+ metric: t.Optional[
404
+ t.Union[
405
+ t.Sequence[
406
+ t.Union[
407
+ str,
408
+ t.Literal[
409
+ "_all",
410
+ "_none",
411
+ "adaptive_selection",
412
+ "allocations",
413
+ "breaker",
414
+ "discovery",
415
+ "fs",
416
+ "http",
417
+ "indexing_pressure",
418
+ "indices",
419
+ "ingest",
420
+ "jvm",
421
+ "os",
422
+ "process",
423
+ "repositories",
424
+ "script",
425
+ "script_cache",
426
+ "thread_pool",
427
+ "transport",
428
+ ],
429
+ ]
430
+ ],
431
+ t.Union[
432
+ str,
433
+ t.Literal[
434
+ "_all",
435
+ "_none",
436
+ "adaptive_selection",
437
+ "allocations",
438
+ "breaker",
439
+ "discovery",
440
+ "fs",
441
+ "http",
442
+ "indexing_pressure",
443
+ "indices",
444
+ "ingest",
445
+ "jvm",
446
+ "os",
447
+ "process",
448
+ "repositories",
449
+ "script",
450
+ "script_cache",
451
+ "thread_pool",
452
+ "transport",
453
+ ],
454
+ ],
455
+ ]
456
+ ] = None,
457
+ index_metric: t.Optional[
458
+ t.Union[
459
+ t.Sequence[
460
+ t.Union[
461
+ str,
462
+ t.Literal[
463
+ "_all",
464
+ "bulk",
465
+ "completion",
466
+ "dense_vector",
467
+ "docs",
468
+ "fielddata",
469
+ "flush",
470
+ "get",
471
+ "indexing",
472
+ "mappings",
473
+ "merge",
474
+ "query_cache",
475
+ "recovery",
476
+ "refresh",
477
+ "request_cache",
478
+ "search",
479
+ "segments",
480
+ "shard_stats",
481
+ "sparse_vector",
482
+ "store",
483
+ "translog",
484
+ "warmer",
485
+ ],
486
+ ]
487
+ ],
488
+ t.Union[
489
+ str,
490
+ t.Literal[
491
+ "_all",
492
+ "bulk",
493
+ "completion",
494
+ "dense_vector",
495
+ "docs",
496
+ "fielddata",
497
+ "flush",
498
+ "get",
499
+ "indexing",
500
+ "mappings",
501
+ "merge",
502
+ "query_cache",
503
+ "recovery",
504
+ "refresh",
505
+ "request_cache",
506
+ "search",
507
+ "segments",
508
+ "shard_stats",
509
+ "sparse_vector",
510
+ "store",
511
+ "translog",
512
+ "warmer",
513
+ ],
514
+ ],
515
+ ]
516
+ ] = None,
362
517
  completion_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
363
518
  error_trace: t.Optional[bool] = None,
364
519
  fielddata_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -483,7 +638,14 @@ class NodesClient(NamespacedClient):
483
638
  self,
484
639
  *,
485
640
  node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None,
486
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
641
+ metric: t.Optional[
642
+ t.Union[
643
+ t.Sequence[
644
+ t.Union[str, t.Literal["_all", "aggregations", "rest_actions"]]
645
+ ],
646
+ t.Union[str, t.Literal["_all", "aggregations", "rest_actions"]],
647
+ ]
648
+ ] = None,
487
649
  error_trace: t.Optional[bool] = None,
488
650
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
489
651
  human: t.Optional[bool] = None,
@@ -502,7 +664,7 @@ class NodesClient(NamespacedClient):
502
664
  information. Use `_local` to return information from the node you're connecting
503
665
  to, leave empty to get information from all nodes.
504
666
  :param metric: Limits the information returned to the specific metrics. A comma-separated
505
- list of the following options: `_all`, `rest_actions`.
667
+ list of the following options: `_all`, `rest_actions`, `aggregations`.
506
668
  :param timeout: Period to wait for a response. If no response is received before
507
669
  the timeout expires, the request fails and returns an error.
508
670
  """