elasticsearch 8.17.2__py3-none-any.whl → 8.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. elasticsearch/_async/client/__init__.py +174 -79
  2. elasticsearch/_async/client/_base.py +0 -1
  3. elasticsearch/_async/client/async_search.py +12 -8
  4. elasticsearch/_async/client/autoscaling.py +4 -4
  5. elasticsearch/_async/client/cat.py +26 -26
  6. elasticsearch/_async/client/ccr.py +186 -72
  7. elasticsearch/_async/client/cluster.py +38 -19
  8. elasticsearch/_async/client/connector.py +30 -30
  9. elasticsearch/_async/client/dangling_indices.py +3 -3
  10. elasticsearch/_async/client/enrich.py +26 -5
  11. elasticsearch/_async/client/eql.py +32 -4
  12. elasticsearch/_async/client/esql.py +62 -6
  13. elasticsearch/_async/client/features.py +12 -2
  14. elasticsearch/_async/client/fleet.py +8 -2
  15. elasticsearch/_async/client/graph.py +1 -1
  16. elasticsearch/_async/client/ilm.py +23 -22
  17. elasticsearch/_async/client/indices.py +424 -132
  18. elasticsearch/_async/client/inference.py +1853 -115
  19. elasticsearch/_async/client/ingest.py +32 -38
  20. elasticsearch/_async/client/license.py +51 -16
  21. elasticsearch/_async/client/logstash.py +3 -3
  22. elasticsearch/_async/client/migration.py +3 -3
  23. elasticsearch/_async/client/ml.py +141 -112
  24. elasticsearch/_async/client/monitoring.py +1 -1
  25. elasticsearch/_async/client/nodes.py +9 -27
  26. elasticsearch/_async/client/query_rules.py +8 -8
  27. elasticsearch/_async/client/rollup.py +8 -8
  28. elasticsearch/_async/client/search_application.py +13 -13
  29. elasticsearch/_async/client/searchable_snapshots.py +4 -4
  30. elasticsearch/_async/client/security.py +71 -71
  31. elasticsearch/_async/client/shutdown.py +3 -10
  32. elasticsearch/_async/client/simulate.py +6 -6
  33. elasticsearch/_async/client/slm.py +9 -9
  34. elasticsearch/_async/client/snapshot.py +13 -17
  35. elasticsearch/_async/client/sql.py +6 -6
  36. elasticsearch/_async/client/ssl.py +1 -1
  37. elasticsearch/_async/client/synonyms.py +7 -7
  38. elasticsearch/_async/client/tasks.py +3 -9
  39. elasticsearch/_async/client/text_structure.py +4 -4
  40. elasticsearch/_async/client/transform.py +30 -28
  41. elasticsearch/_async/client/watcher.py +22 -14
  42. elasticsearch/_async/client/xpack.py +2 -2
  43. elasticsearch/_async/helpers.py +0 -1
  44. elasticsearch/_sync/client/__init__.py +174 -79
  45. elasticsearch/_sync/client/_base.py +0 -1
  46. elasticsearch/_sync/client/async_search.py +12 -8
  47. elasticsearch/_sync/client/autoscaling.py +4 -4
  48. elasticsearch/_sync/client/cat.py +26 -26
  49. elasticsearch/_sync/client/ccr.py +186 -72
  50. elasticsearch/_sync/client/cluster.py +38 -19
  51. elasticsearch/_sync/client/connector.py +30 -30
  52. elasticsearch/_sync/client/dangling_indices.py +3 -3
  53. elasticsearch/_sync/client/enrich.py +26 -5
  54. elasticsearch/_sync/client/eql.py +32 -4
  55. elasticsearch/_sync/client/esql.py +62 -6
  56. elasticsearch/_sync/client/features.py +12 -2
  57. elasticsearch/_sync/client/fleet.py +8 -2
  58. elasticsearch/_sync/client/graph.py +1 -1
  59. elasticsearch/_sync/client/ilm.py +23 -22
  60. elasticsearch/_sync/client/indices.py +424 -132
  61. elasticsearch/_sync/client/inference.py +1853 -115
  62. elasticsearch/_sync/client/ingest.py +32 -38
  63. elasticsearch/_sync/client/license.py +51 -16
  64. elasticsearch/_sync/client/logstash.py +3 -3
  65. elasticsearch/_sync/client/migration.py +3 -3
  66. elasticsearch/_sync/client/ml.py +141 -112
  67. elasticsearch/_sync/client/monitoring.py +1 -1
  68. elasticsearch/_sync/client/nodes.py +9 -27
  69. elasticsearch/_sync/client/query_rules.py +8 -8
  70. elasticsearch/_sync/client/rollup.py +8 -8
  71. elasticsearch/_sync/client/search_application.py +13 -13
  72. elasticsearch/_sync/client/searchable_snapshots.py +4 -4
  73. elasticsearch/_sync/client/security.py +71 -71
  74. elasticsearch/_sync/client/shutdown.py +3 -10
  75. elasticsearch/_sync/client/simulate.py +6 -6
  76. elasticsearch/_sync/client/slm.py +9 -9
  77. elasticsearch/_sync/client/snapshot.py +13 -17
  78. elasticsearch/_sync/client/sql.py +6 -6
  79. elasticsearch/_sync/client/ssl.py +1 -1
  80. elasticsearch/_sync/client/synonyms.py +7 -7
  81. elasticsearch/_sync/client/tasks.py +3 -9
  82. elasticsearch/_sync/client/text_structure.py +4 -4
  83. elasticsearch/_sync/client/transform.py +30 -28
  84. elasticsearch/_sync/client/utils.py +0 -3
  85. elasticsearch/_sync/client/watcher.py +22 -14
  86. elasticsearch/_sync/client/xpack.py +2 -2
  87. elasticsearch/_version.py +1 -1
  88. elasticsearch/dsl/__init__.py +203 -0
  89. elasticsearch/dsl/_async/__init__.py +16 -0
  90. elasticsearch/dsl/_async/document.py +522 -0
  91. elasticsearch/dsl/_async/faceted_search.py +50 -0
  92. elasticsearch/dsl/_async/index.py +639 -0
  93. elasticsearch/dsl/_async/mapping.py +49 -0
  94. elasticsearch/dsl/_async/search.py +233 -0
  95. elasticsearch/dsl/_async/update_by_query.py +47 -0
  96. elasticsearch/dsl/_sync/__init__.py +16 -0
  97. elasticsearch/dsl/_sync/document.py +514 -0
  98. elasticsearch/dsl/_sync/faceted_search.py +50 -0
  99. elasticsearch/dsl/_sync/index.py +597 -0
  100. elasticsearch/dsl/_sync/mapping.py +49 -0
  101. elasticsearch/dsl/_sync/search.py +226 -0
  102. elasticsearch/dsl/_sync/update_by_query.py +45 -0
  103. elasticsearch/dsl/aggs.py +3730 -0
  104. elasticsearch/dsl/analysis.py +341 -0
  105. elasticsearch/dsl/async_connections.py +37 -0
  106. elasticsearch/dsl/connections.py +142 -0
  107. elasticsearch/dsl/document.py +20 -0
  108. elasticsearch/dsl/document_base.py +444 -0
  109. elasticsearch/dsl/exceptions.py +32 -0
  110. elasticsearch/dsl/faceted_search.py +28 -0
  111. elasticsearch/dsl/faceted_search_base.py +489 -0
  112. elasticsearch/dsl/field.py +4254 -0
  113. elasticsearch/dsl/function.py +180 -0
  114. elasticsearch/dsl/index.py +23 -0
  115. elasticsearch/dsl/index_base.py +178 -0
  116. elasticsearch/dsl/mapping.py +19 -0
  117. elasticsearch/dsl/mapping_base.py +219 -0
  118. elasticsearch/dsl/query.py +2816 -0
  119. elasticsearch/dsl/response/__init__.py +388 -0
  120. elasticsearch/dsl/response/aggs.py +100 -0
  121. elasticsearch/dsl/response/hit.py +53 -0
  122. elasticsearch/dsl/search.py +20 -0
  123. elasticsearch/dsl/search_base.py +1040 -0
  124. elasticsearch/dsl/serializer.py +34 -0
  125. elasticsearch/dsl/types.py +6471 -0
  126. elasticsearch/dsl/update_by_query.py +19 -0
  127. elasticsearch/dsl/update_by_query_base.py +149 -0
  128. elasticsearch/dsl/utils.py +687 -0
  129. elasticsearch/dsl/wrappers.py +119 -0
  130. {elasticsearch-8.17.2.dist-info → elasticsearch-8.18.0.dist-info}/METADATA +12 -2
  131. elasticsearch-8.18.0.dist-info/RECORD +161 -0
  132. elasticsearch-8.17.2.dist-info/RECORD +0 -119
  133. {elasticsearch-8.17.2.dist-info → elasticsearch-8.18.0.dist-info}/WHEEL +0 -0
  134. {elasticsearch-8.17.2.dist-info → elasticsearch-8.18.0.dist-info}/licenses/LICENSE +0 -0
  135. {elasticsearch-8.17.2.dist-info → elasticsearch-8.18.0.dist-info}/licenses/NOTICE +0 -0
@@ -25,6 +25,74 @@ from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters
25
25
 
26
26
  class InferenceClient(NamespacedClient):
27
27
 
28
+ @_rewrite_parameters(
29
+ body_fields=("input", "task_settings"),
30
+ )
31
+ async def completion(
32
+ self,
33
+ *,
34
+ inference_id: str,
35
+ input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
36
+ error_trace: t.Optional[bool] = None,
37
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
38
+ human: t.Optional[bool] = None,
39
+ pretty: t.Optional[bool] = None,
40
+ task_settings: t.Optional[t.Any] = None,
41
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
42
+ body: t.Optional[t.Dict[str, t.Any]] = None,
43
+ ) -> ObjectApiResponse[t.Any]:
44
+ """
45
+ .. raw:: html
46
+
47
+ <p>Perform completion inference on the service</p>
48
+
49
+
50
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/post-inference-api.html>`_
51
+
52
+ :param inference_id: The inference Id
53
+ :param input: Inference input. Either a string or an array of strings.
54
+ :param task_settings: Optional task settings
55
+ :param timeout: Specifies the amount of time to wait for the inference request
56
+ to complete.
57
+ """
58
+ if inference_id in SKIP_IN_PATH:
59
+ raise ValueError("Empty value passed for parameter 'inference_id'")
60
+ if input is None and body is None:
61
+ raise ValueError("Empty value passed for parameter 'input'")
62
+ __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
63
+ __path = f'/_inference/completion/{__path_parts["inference_id"]}'
64
+ __query: t.Dict[str, t.Any] = {}
65
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
66
+ if error_trace is not None:
67
+ __query["error_trace"] = error_trace
68
+ if filter_path is not None:
69
+ __query["filter_path"] = filter_path
70
+ if human is not None:
71
+ __query["human"] = human
72
+ if pretty is not None:
73
+ __query["pretty"] = pretty
74
+ if timeout is not None:
75
+ __query["timeout"] = timeout
76
+ if not __body:
77
+ if input is not None:
78
+ __body["input"] = input
79
+ if task_settings is not None:
80
+ __body["task_settings"] = task_settings
81
+ if not __body:
82
+ __body = None # type: ignore[assignment]
83
+ __headers = {"accept": "application/json"}
84
+ if __body is not None:
85
+ __headers["content-type"] = "application/json"
86
+ return await self.perform_request( # type: ignore[return-value]
87
+ "POST",
88
+ __path,
89
+ params=__query,
90
+ headers=__headers,
91
+ body=__body,
92
+ endpoint_id="inference.completion",
93
+ path_parts=__path_parts,
94
+ )
95
+
28
96
  @_rewrite_parameters()
29
97
  async def delete(
30
98
  self,
@@ -33,7 +101,13 @@ class InferenceClient(NamespacedClient):
33
101
  task_type: t.Optional[
34
102
  t.Union[
35
103
  str,
36
- t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
104
+ t.Literal[
105
+ "chat_completion",
106
+ "completion",
107
+ "rerank",
108
+ "sparse_embedding",
109
+ "text_embedding",
110
+ ],
37
111
  ]
38
112
  ] = None,
39
113
  dry_run: t.Optional[bool] = None,
@@ -49,14 +123,14 @@ class InferenceClient(NamespacedClient):
49
123
  <p>Delete an inference endpoint</p>
50
124
 
51
125
 
52
- `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-inference-api.html>`_
126
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/delete-inference-api.html>`_
53
127
 
54
- :param inference_id: The inference Id
128
+ :param inference_id: The inference identifier.
55
129
  :param task_type: The task type
56
- :param dry_run: When true, the endpoint is not deleted, and a list of ingest
57
- processors which reference this endpoint is returned
130
+ :param dry_run: When true, the endpoint is not deleted and a list of ingest processors
131
+ which reference this endpoint is returned.
58
132
  :param force: When true, the inference endpoint is forcefully deleted even if
59
- it is still being used by ingest processors or semantic text fields
133
+ it is still being used by ingest processors or semantic text fields.
60
134
  """
61
135
  if inference_id in SKIP_IN_PATH:
62
136
  raise ValueError("Empty value passed for parameter 'inference_id'")
@@ -102,7 +176,13 @@ class InferenceClient(NamespacedClient):
102
176
  task_type: t.Optional[
103
177
  t.Union[
104
178
  str,
105
- t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
179
+ t.Literal[
180
+ "chat_completion",
181
+ "completion",
182
+ "rerank",
183
+ "sparse_embedding",
184
+ "text_embedding",
185
+ ],
106
186
  ]
107
187
  ] = None,
108
188
  inference_id: t.Optional[str] = None,
@@ -117,7 +197,7 @@ class InferenceClient(NamespacedClient):
117
197
  <p>Get an inference endpoint</p>
118
198
 
119
199
 
120
- `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-inference-api.html>`_
200
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/get-inference-api.html>`_
121
201
 
122
202
  :param task_type: The task type
123
203
  :param inference_id: The inference Id
@@ -155,48 +235,59 @@ class InferenceClient(NamespacedClient):
155
235
  )
156
236
 
157
237
  @_rewrite_parameters(
158
- body_fields=("input", "query", "task_settings"),
238
+ body_name="inference_config",
159
239
  )
160
- async def inference(
240
+ async def put(
161
241
  self,
162
242
  *,
163
243
  inference_id: str,
164
- input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
244
+ inference_config: t.Optional[t.Mapping[str, t.Any]] = None,
245
+ body: t.Optional[t.Mapping[str, t.Any]] = None,
165
246
  task_type: t.Optional[
166
247
  t.Union[
167
248
  str,
168
- t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
249
+ t.Literal[
250
+ "chat_completion",
251
+ "completion",
252
+ "rerank",
253
+ "sparse_embedding",
254
+ "text_embedding",
255
+ ],
169
256
  ]
170
257
  ] = None,
171
258
  error_trace: t.Optional[bool] = None,
172
259
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
173
260
  human: t.Optional[bool] = None,
174
261
  pretty: t.Optional[bool] = None,
175
- query: t.Optional[str] = None,
176
- task_settings: t.Optional[t.Any] = None,
177
- timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
178
- body: t.Optional[t.Dict[str, t.Any]] = None,
179
262
  ) -> ObjectApiResponse[t.Any]:
180
263
  """
181
264
  .. raw:: html
182
265
 
183
- <p>Perform inference on the service</p>
266
+ <p>Create an inference endpoint.
267
+ When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
268
+ After creating the endpoint, wait for the model deployment to complete before using it.
269
+ To verify the deployment status, use the get trained model statistics API.
270
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
271
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
272
+ <p>IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
273
+ For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.
274
+ However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.</p>
184
275
 
185
276
 
186
- `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/post-inference-api.html>`_
277
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/put-inference-api.html>`_
187
278
 
188
279
  :param inference_id: The inference Id
189
- :param input: Inference input. Either a string or an array of strings.
280
+ :param inference_config:
190
281
  :param task_type: The task type
191
- :param query: Query input, required for rerank task. Not required for other tasks.
192
- :param task_settings: Optional task settings
193
- :param timeout: Specifies the amount of time to wait for the inference request
194
- to complete.
195
282
  """
196
283
  if inference_id in SKIP_IN_PATH:
197
284
  raise ValueError("Empty value passed for parameter 'inference_id'")
198
- if input is None and body is None:
199
- raise ValueError("Empty value passed for parameter 'input'")
285
+ if inference_config is None and body is None:
286
+ raise ValueError(
287
+ "Empty value passed for parameters 'inference_config' and 'body', one of them should be set."
288
+ )
289
+ elif inference_config is not None and body is not None:
290
+ raise ValueError("Cannot set both 'inference_config' and 'body'")
200
291
  __path_parts: t.Dict[str, str]
201
292
  if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH:
202
293
  __path_parts = {
@@ -210,6 +301,91 @@ class InferenceClient(NamespacedClient):
210
301
  else:
211
302
  raise ValueError("Couldn't find a path for the given parameters")
212
303
  __query: t.Dict[str, t.Any] = {}
304
+ if error_trace is not None:
305
+ __query["error_trace"] = error_trace
306
+ if filter_path is not None:
307
+ __query["filter_path"] = filter_path
308
+ if human is not None:
309
+ __query["human"] = human
310
+ if pretty is not None:
311
+ __query["pretty"] = pretty
312
+ __body = inference_config if inference_config is not None else body
313
+ __headers = {"accept": "application/json", "content-type": "application/json"}
314
+ return await self.perform_request( # type: ignore[return-value]
315
+ "PUT",
316
+ __path,
317
+ params=__query,
318
+ headers=__headers,
319
+ body=__body,
320
+ endpoint_id="inference.put",
321
+ path_parts=__path_parts,
322
+ )
323
+
324
+ @_rewrite_parameters(
325
+ body_fields=(
326
+ "service",
327
+ "service_settings",
328
+ "chunking_settings",
329
+ "task_settings",
330
+ ),
331
+ )
332
+ async def put_alibabacloud(
333
+ self,
334
+ *,
335
+ task_type: t.Union[
336
+ str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"]
337
+ ],
338
+ alibabacloud_inference_id: str,
339
+ service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None,
340
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
341
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
342
+ error_trace: t.Optional[bool] = None,
343
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
344
+ human: t.Optional[bool] = None,
345
+ pretty: t.Optional[bool] = None,
346
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
347
+ body: t.Optional[t.Dict[str, t.Any]] = None,
348
+ ) -> ObjectApiResponse[t.Any]:
349
+ """
350
+ .. raw:: html
351
+
352
+ <p>Create an AlibabaCloud AI Search inference endpoint.</p>
353
+ <p>Create an inference endpoint to perform an inference task with the <code>alibabacloud-ai-search</code> service.</p>
354
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
355
+ After creating the endpoint, wait for the model deployment to complete before using it.
356
+ To verify the deployment status, use the get trained model statistics API.
357
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
358
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
359
+
360
+
361
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-alibabacloud-ai-search.html>`_
362
+
363
+ :param task_type: The type of the inference task that the model will perform.
364
+ :param alibabacloud_inference_id: The unique identifier of the inference endpoint.
365
+ :param service: The type of service supported for the specified task type. In
366
+ this case, `alibabacloud-ai-search`.
367
+ :param service_settings: Settings used to install the inference model. These
368
+ settings are specific to the `alibabacloud-ai-search` service.
369
+ :param chunking_settings: The chunking configuration object.
370
+ :param task_settings: Settings to configure the inference task. These settings
371
+ are specific to the task type you specified.
372
+ """
373
+ if task_type in SKIP_IN_PATH:
374
+ raise ValueError("Empty value passed for parameter 'task_type'")
375
+ if alibabacloud_inference_id in SKIP_IN_PATH:
376
+ raise ValueError(
377
+ "Empty value passed for parameter 'alibabacloud_inference_id'"
378
+ )
379
+ if service is None and body is None:
380
+ raise ValueError("Empty value passed for parameter 'service'")
381
+ if service_settings is None and body is None:
382
+ raise ValueError("Empty value passed for parameter 'service_settings'")
383
+ __path_parts: t.Dict[str, str] = {
384
+ "task_type": _quote(task_type),
385
+ "alibabacloud_inference_id": _quote(alibabacloud_inference_id),
386
+ }
387
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["alibabacloud_inference_id"]}'
388
+ __query: t.Dict[str, t.Any] = {}
213
389
  __body: t.Dict[str, t.Any] = body if body is not None else {}
214
390
  if error_trace is not None:
215
391
  __query["error_trace"] = error_trace
@@ -219,13 +395,13 @@ class InferenceClient(NamespacedClient):
219
395
  __query["human"] = human
220
396
  if pretty is not None:
221
397
  __query["pretty"] = pretty
222
- if timeout is not None:
223
- __query["timeout"] = timeout
224
398
  if not __body:
225
- if input is not None:
226
- __body["input"] = input
227
- if query is not None:
228
- __body["query"] = query
399
+ if service is not None:
400
+ __body["service"] = service
401
+ if service_settings is not None:
402
+ __body["service_settings"] = service_settings
403
+ if chunking_settings is not None:
404
+ __body["chunking_settings"] = chunking_settings
229
405
  if task_settings is not None:
230
406
  __body["task_settings"] = task_settings
231
407
  if not __body:
@@ -234,76 +410,83 @@ class InferenceClient(NamespacedClient):
234
410
  if __body is not None:
235
411
  __headers["content-type"] = "application/json"
236
412
  return await self.perform_request( # type: ignore[return-value]
237
- "POST",
413
+ "PUT",
238
414
  __path,
239
415
  params=__query,
240
416
  headers=__headers,
241
417
  body=__body,
242
- endpoint_id="inference.inference",
418
+ endpoint_id="inference.put_alibabacloud",
243
419
  path_parts=__path_parts,
244
420
  )
245
421
 
246
422
  @_rewrite_parameters(
247
- body_name="inference_config",
423
+ body_fields=(
424
+ "service",
425
+ "service_settings",
426
+ "chunking_settings",
427
+ "task_settings",
428
+ ),
248
429
  )
249
- async def put(
430
+ async def put_amazonbedrock(
250
431
  self,
251
432
  *,
252
- inference_id: str,
253
- inference_config: t.Optional[t.Mapping[str, t.Any]] = None,
254
- body: t.Optional[t.Mapping[str, t.Any]] = None,
255
- task_type: t.Optional[
256
- t.Union[
257
- str,
258
- t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
259
- ]
260
- ] = None,
433
+ task_type: t.Union[str, t.Literal["completion", "text_embedding"]],
434
+ amazonbedrock_inference_id: str,
435
+ service: t.Optional[t.Union[str, t.Literal["amazonbedrock"]]] = None,
436
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
437
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
261
438
  error_trace: t.Optional[bool] = None,
262
439
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
263
440
  human: t.Optional[bool] = None,
264
441
  pretty: t.Optional[bool] = None,
442
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
443
+ body: t.Optional[t.Dict[str, t.Any]] = None,
265
444
  ) -> ObjectApiResponse[t.Any]:
266
445
  """
267
446
  .. raw:: html
268
447
 
269
- <p>Create an inference endpoint.
270
- When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
448
+ <p>Create an Amazon Bedrock inference endpoint.</p>
449
+ <p>Creates an inference endpoint to perform an inference task with the <code>amazonbedrock</code> service.</p>
450
+ <blockquote>
451
+ <p>info
452
+ You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.</p>
453
+ </blockquote>
454
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
271
455
  After creating the endpoint, wait for the model deployment to complete before using it.
272
456
  To verify the deployment status, use the get trained model statistics API.
273
457
  Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
274
458
  Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
275
- <p>IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
276
- For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.
277
- However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.</p>
278
459
 
279
460
 
280
- `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-inference-api.html>`_
461
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-amazon-bedrock.html>`_
281
462
 
282
- :param inference_id: The inference Id
283
- :param inference_config:
284
- :param task_type: The task type
463
+ :param task_type: The type of the inference task that the model will perform.
464
+ :param amazonbedrock_inference_id: The unique identifier of the inference endpoint.
465
+ :param service: The type of service supported for the specified task type. In
466
+ this case, `amazonbedrock`.
467
+ :param service_settings: Settings used to install the inference model. These
468
+ settings are specific to the `amazonbedrock` service.
469
+ :param chunking_settings: The chunking configuration object.
470
+ :param task_settings: Settings to configure the inference task. These settings
471
+ are specific to the task type you specified.
285
472
  """
286
- if inference_id in SKIP_IN_PATH:
287
- raise ValueError("Empty value passed for parameter 'inference_id'")
288
- if inference_config is None and body is None:
473
+ if task_type in SKIP_IN_PATH:
474
+ raise ValueError("Empty value passed for parameter 'task_type'")
475
+ if amazonbedrock_inference_id in SKIP_IN_PATH:
289
476
  raise ValueError(
290
- "Empty value passed for parameters 'inference_config' and 'body', one of them should be set."
477
+ "Empty value passed for parameter 'amazonbedrock_inference_id'"
291
478
  )
292
- elif inference_config is not None and body is not None:
293
- raise ValueError("Cannot set both 'inference_config' and 'body'")
294
- __path_parts: t.Dict[str, str]
295
- if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH:
296
- __path_parts = {
297
- "task_type": _quote(task_type),
298
- "inference_id": _quote(inference_id),
299
- }
300
- __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}'
301
- elif inference_id not in SKIP_IN_PATH:
302
- __path_parts = {"inference_id": _quote(inference_id)}
303
- __path = f'/_inference/{__path_parts["inference_id"]}'
304
- else:
305
- raise ValueError("Couldn't find a path for the given parameters")
479
+ if service is None and body is None:
480
+ raise ValueError("Empty value passed for parameter 'service'")
481
+ if service_settings is None and body is None:
482
+ raise ValueError("Empty value passed for parameter 'service_settings'")
483
+ __path_parts: t.Dict[str, str] = {
484
+ "task_type": _quote(task_type),
485
+ "amazonbedrock_inference_id": _quote(amazonbedrock_inference_id),
486
+ }
487
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonbedrock_inference_id"]}'
306
488
  __query: t.Dict[str, t.Any] = {}
489
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
307
490
  if error_trace is not None:
308
491
  __query["error_trace"] = error_trace
309
492
  if filter_path is not None:
@@ -312,75 +495,95 @@ class InferenceClient(NamespacedClient):
312
495
  __query["human"] = human
313
496
  if pretty is not None:
314
497
  __query["pretty"] = pretty
315
- __body = inference_config if inference_config is not None else body
316
- __headers = {"accept": "application/json", "content-type": "application/json"}
498
+ if not __body:
499
+ if service is not None:
500
+ __body["service"] = service
501
+ if service_settings is not None:
502
+ __body["service_settings"] = service_settings
503
+ if chunking_settings is not None:
504
+ __body["chunking_settings"] = chunking_settings
505
+ if task_settings is not None:
506
+ __body["task_settings"] = task_settings
507
+ if not __body:
508
+ __body = None # type: ignore[assignment]
509
+ __headers = {"accept": "application/json"}
510
+ if __body is not None:
511
+ __headers["content-type"] = "application/json"
317
512
  return await self.perform_request( # type: ignore[return-value]
318
513
  "PUT",
319
514
  __path,
320
515
  params=__query,
321
516
  headers=__headers,
322
517
  body=__body,
323
- endpoint_id="inference.put",
518
+ endpoint_id="inference.put_amazonbedrock",
324
519
  path_parts=__path_parts,
325
520
  )
326
521
 
327
522
  @_rewrite_parameters(
328
- body_name="inference_config",
523
+ body_fields=(
524
+ "service",
525
+ "service_settings",
526
+ "chunking_settings",
527
+ "task_settings",
528
+ ),
329
529
  )
330
- async def update(
530
+ async def put_anthropic(
331
531
  self,
332
532
  *,
333
- inference_id: str,
334
- inference_config: t.Optional[t.Mapping[str, t.Any]] = None,
335
- body: t.Optional[t.Mapping[str, t.Any]] = None,
336
- task_type: t.Optional[
337
- t.Union[
338
- str,
339
- t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
340
- ]
341
- ] = None,
533
+ task_type: t.Union[str, t.Literal["completion"]],
534
+ anthropic_inference_id: str,
535
+ service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None,
536
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
537
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
342
538
  error_trace: t.Optional[bool] = None,
343
539
  filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
344
540
  human: t.Optional[bool] = None,
345
541
  pretty: t.Optional[bool] = None,
542
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
543
+ body: t.Optional[t.Dict[str, t.Any]] = None,
346
544
  ) -> ObjectApiResponse[t.Any]:
347
545
  """
348
546
  .. raw:: html
349
547
 
350
- <p>Update an inference endpoint.</p>
351
- <p>Modify <code>task_settings</code>, secrets (within <code>service_settings</code>), or <code>num_allocations</code> for an inference endpoint, depending on the specific endpoint service and <code>task_type</code>.</p>
352
- <p>IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
353
- For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.
354
- However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.</p>
548
+ <p>Create an Anthropic inference endpoint.</p>
549
+ <p>Create an inference endpoint to perform an inference task with the <code>anthropic</code> service.</p>
550
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
551
+ After creating the endpoint, wait for the model deployment to complete before using it.
552
+ To verify the deployment status, use the get trained model statistics API.
553
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
554
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
355
555
 
356
556
 
357
- `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-inference-api.html>`_
557
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-anthropic.html>`_
358
558
 
359
- :param inference_id: The unique identifier of the inference endpoint.
360
- :param inference_config:
361
- :param task_type: The type of inference task that the model performs.
559
+ :param task_type: The task type. The only valid task type for the model to perform
560
+ is `completion`.
561
+ :param anthropic_inference_id: The unique identifier of the inference endpoint.
562
+ :param service: The type of service supported for the specified task type. In
563
+ this case, `anthropic`.
564
+ :param service_settings: Settings used to install the inference model. These
565
+ settings are specific to the `watsonxai` service.
566
+ :param chunking_settings: The chunking configuration object.
567
+ :param task_settings: Settings to configure the inference task. These settings
568
+ are specific to the task type you specified.
362
569
  """
363
- if inference_id in SKIP_IN_PATH:
364
- raise ValueError("Empty value passed for parameter 'inference_id'")
365
- if inference_config is None and body is None:
570
+ if task_type in SKIP_IN_PATH:
571
+ raise ValueError("Empty value passed for parameter 'task_type'")
572
+ if anthropic_inference_id in SKIP_IN_PATH:
366
573
  raise ValueError(
367
- "Empty value passed for parameters 'inference_config' and 'body', one of them should be set."
574
+ "Empty value passed for parameter 'anthropic_inference_id'"
368
575
  )
369
- elif inference_config is not None and body is not None:
370
- raise ValueError("Cannot set both 'inference_config' and 'body'")
371
- __path_parts: t.Dict[str, str]
372
- if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH:
373
- __path_parts = {
374
- "task_type": _quote(task_type),
375
- "inference_id": _quote(inference_id),
376
- }
377
- __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}/_update'
378
- elif inference_id not in SKIP_IN_PATH:
379
- __path_parts = {"inference_id": _quote(inference_id)}
380
- __path = f'/_inference/{__path_parts["inference_id"]}/_update'
381
- else:
382
- raise ValueError("Couldn't find a path for the given parameters")
576
+ if service is None and body is None:
577
+ raise ValueError("Empty value passed for parameter 'service'")
578
+ if service_settings is None and body is None:
579
+ raise ValueError("Empty value passed for parameter 'service_settings'")
580
+ __path_parts: t.Dict[str, str] = {
581
+ "task_type": _quote(task_type),
582
+ "anthropic_inference_id": _quote(anthropic_inference_id),
583
+ }
584
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["anthropic_inference_id"]}'
383
585
  __query: t.Dict[str, t.Any] = {}
586
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
384
587
  if error_trace is not None:
385
588
  __query["error_trace"] = error_trace
386
589
  if filter_path is not None:
@@ -389,10 +592,1545 @@ class InferenceClient(NamespacedClient):
389
592
  __query["human"] = human
390
593
  if pretty is not None:
391
594
  __query["pretty"] = pretty
392
- __body = inference_config if inference_config is not None else body
393
- __headers = {"accept": "application/json", "content-type": "application/json"}
595
+ if not __body:
596
+ if service is not None:
597
+ __body["service"] = service
598
+ if service_settings is not None:
599
+ __body["service_settings"] = service_settings
600
+ if chunking_settings is not None:
601
+ __body["chunking_settings"] = chunking_settings
602
+ if task_settings is not None:
603
+ __body["task_settings"] = task_settings
604
+ if not __body:
605
+ __body = None # type: ignore[assignment]
606
+ __headers = {"accept": "application/json"}
607
+ if __body is not None:
608
+ __headers["content-type"] = "application/json"
394
609
  return await self.perform_request( # type: ignore[return-value]
395
- "POST",
610
+ "PUT",
611
+ __path,
612
+ params=__query,
613
+ headers=__headers,
614
+ body=__body,
615
+ endpoint_id="inference.put_anthropic",
616
+ path_parts=__path_parts,
617
+ )
618
+
619
+ @_rewrite_parameters(
620
+ body_fields=(
621
+ "service",
622
+ "service_settings",
623
+ "chunking_settings",
624
+ "task_settings",
625
+ ),
626
+ )
627
+ async def put_azureaistudio(
628
+ self,
629
+ *,
630
+ task_type: t.Union[str, t.Literal["completion", "text_embedding"]],
631
+ azureaistudio_inference_id: str,
632
+ service: t.Optional[t.Union[str, t.Literal["azureaistudio"]]] = None,
633
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
634
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
635
+ error_trace: t.Optional[bool] = None,
636
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
637
+ human: t.Optional[bool] = None,
638
+ pretty: t.Optional[bool] = None,
639
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
640
+ body: t.Optional[t.Dict[str, t.Any]] = None,
641
+ ) -> ObjectApiResponse[t.Any]:
642
+ """
643
+ .. raw:: html
644
+
645
+ <p>Create an Azure AI studio inference endpoint.</p>
646
+ <p>Create an inference endpoint to perform an inference task with the <code>azureaistudio</code> service.</p>
647
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
648
+ After creating the endpoint, wait for the model deployment to complete before using it.
649
+ To verify the deployment status, use the get trained model statistics API.
650
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
651
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
652
+
653
+
654
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-azure-ai-studio.html>`_
655
+
656
+ :param task_type: The type of the inference task that the model will perform.
657
+ :param azureaistudio_inference_id: The unique identifier of the inference endpoint.
658
+ :param service: The type of service supported for the specified task type. In
659
+ this case, `azureaistudio`.
660
+ :param service_settings: Settings used to install the inference model. These
661
+ settings are specific to the `openai` service.
662
+ :param chunking_settings: The chunking configuration object.
663
+ :param task_settings: Settings to configure the inference task. These settings
664
+ are specific to the task type you specified.
665
+ """
666
+ if task_type in SKIP_IN_PATH:
667
+ raise ValueError("Empty value passed for parameter 'task_type'")
668
+ if azureaistudio_inference_id in SKIP_IN_PATH:
669
+ raise ValueError(
670
+ "Empty value passed for parameter 'azureaistudio_inference_id'"
671
+ )
672
+ if service is None and body is None:
673
+ raise ValueError("Empty value passed for parameter 'service'")
674
+ if service_settings is None and body is None:
675
+ raise ValueError("Empty value passed for parameter 'service_settings'")
676
+ __path_parts: t.Dict[str, str] = {
677
+ "task_type": _quote(task_type),
678
+ "azureaistudio_inference_id": _quote(azureaistudio_inference_id),
679
+ }
680
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureaistudio_inference_id"]}'
681
+ __query: t.Dict[str, t.Any] = {}
682
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
683
+ if error_trace is not None:
684
+ __query["error_trace"] = error_trace
685
+ if filter_path is not None:
686
+ __query["filter_path"] = filter_path
687
+ if human is not None:
688
+ __query["human"] = human
689
+ if pretty is not None:
690
+ __query["pretty"] = pretty
691
+ if not __body:
692
+ if service is not None:
693
+ __body["service"] = service
694
+ if service_settings is not None:
695
+ __body["service_settings"] = service_settings
696
+ if chunking_settings is not None:
697
+ __body["chunking_settings"] = chunking_settings
698
+ if task_settings is not None:
699
+ __body["task_settings"] = task_settings
700
+ if not __body:
701
+ __body = None # type: ignore[assignment]
702
+ __headers = {"accept": "application/json"}
703
+ if __body is not None:
704
+ __headers["content-type"] = "application/json"
705
+ return await self.perform_request( # type: ignore[return-value]
706
+ "PUT",
707
+ __path,
708
+ params=__query,
709
+ headers=__headers,
710
+ body=__body,
711
+ endpoint_id="inference.put_azureaistudio",
712
+ path_parts=__path_parts,
713
+ )
714
+
715
+ @_rewrite_parameters(
716
+ body_fields=(
717
+ "service",
718
+ "service_settings",
719
+ "chunking_settings",
720
+ "task_settings",
721
+ ),
722
+ )
723
+ async def put_azureopenai(
724
+ self,
725
+ *,
726
+ task_type: t.Union[str, t.Literal["completion", "text_embedding"]],
727
+ azureopenai_inference_id: str,
728
+ service: t.Optional[t.Union[str, t.Literal["azureopenai"]]] = None,
729
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
730
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
731
+ error_trace: t.Optional[bool] = None,
732
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
733
+ human: t.Optional[bool] = None,
734
+ pretty: t.Optional[bool] = None,
735
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
736
+ body: t.Optional[t.Dict[str, t.Any]] = None,
737
+ ) -> ObjectApiResponse[t.Any]:
738
+ """
739
+ .. raw:: html
740
+
741
+ <p>Create an Azure OpenAI inference endpoint.</p>
742
+ <p>Create an inference endpoint to perform an inference task with the <code>azureopenai</code> service.</p>
743
+ <p>The list of chat completion models that you can choose from in your Azure OpenAI deployment include:</p>
744
+ <ul>
745
+ <li><a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models">GPT-4 and GPT-4 Turbo models</a></li>
746
+ <li><a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35">GPT-3.5</a></li>
747
+ </ul>
748
+ <p>The list of embeddings models that you can choose from in your deployment can be found in the <a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings">Azure models documentation</a>.</p>
749
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
750
+ After creating the endpoint, wait for the model deployment to complete before using it.
751
+ To verify the deployment status, use the get trained model statistics API.
752
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
753
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
754
+
755
+
756
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-azure-openai.html>`_
757
+
758
+ :param task_type: The type of the inference task that the model will perform.
759
+ NOTE: The `chat_completion` task type only supports streaming and only through
760
+ the _stream API.
761
+ :param azureopenai_inference_id: The unique identifier of the inference endpoint.
762
+ :param service: The type of service supported for the specified task type. In
763
+ this case, `azureopenai`.
764
+ :param service_settings: Settings used to install the inference model. These
765
+ settings are specific to the `azureopenai` service.
766
+ :param chunking_settings: The chunking configuration object.
767
+ :param task_settings: Settings to configure the inference task. These settings
768
+ are specific to the task type you specified.
769
+ """
770
+ if task_type in SKIP_IN_PATH:
771
+ raise ValueError("Empty value passed for parameter 'task_type'")
772
+ if azureopenai_inference_id in SKIP_IN_PATH:
773
+ raise ValueError(
774
+ "Empty value passed for parameter 'azureopenai_inference_id'"
775
+ )
776
+ if service is None and body is None:
777
+ raise ValueError("Empty value passed for parameter 'service'")
778
+ if service_settings is None and body is None:
779
+ raise ValueError("Empty value passed for parameter 'service_settings'")
780
+ __path_parts: t.Dict[str, str] = {
781
+ "task_type": _quote(task_type),
782
+ "azureopenai_inference_id": _quote(azureopenai_inference_id),
783
+ }
784
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureopenai_inference_id"]}'
785
+ __query: t.Dict[str, t.Any] = {}
786
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
787
+ if error_trace is not None:
788
+ __query["error_trace"] = error_trace
789
+ if filter_path is not None:
790
+ __query["filter_path"] = filter_path
791
+ if human is not None:
792
+ __query["human"] = human
793
+ if pretty is not None:
794
+ __query["pretty"] = pretty
795
+ if not __body:
796
+ if service is not None:
797
+ __body["service"] = service
798
+ if service_settings is not None:
799
+ __body["service_settings"] = service_settings
800
+ if chunking_settings is not None:
801
+ __body["chunking_settings"] = chunking_settings
802
+ if task_settings is not None:
803
+ __body["task_settings"] = task_settings
804
+ if not __body:
805
+ __body = None # type: ignore[assignment]
806
+ __headers = {"accept": "application/json"}
807
+ if __body is not None:
808
+ __headers["content-type"] = "application/json"
809
+ return await self.perform_request( # type: ignore[return-value]
810
+ "PUT",
811
+ __path,
812
+ params=__query,
813
+ headers=__headers,
814
+ body=__body,
815
+ endpoint_id="inference.put_azureopenai",
816
+ path_parts=__path_parts,
817
+ )
818
+
819
+ @_rewrite_parameters(
820
+ body_fields=(
821
+ "service",
822
+ "service_settings",
823
+ "chunking_settings",
824
+ "task_settings",
825
+ ),
826
+ )
827
+ async def put_cohere(
828
+ self,
829
+ *,
830
+ task_type: t.Union[str, t.Literal["completion", "rerank", "text_embedding"]],
831
+ cohere_inference_id: str,
832
+ service: t.Optional[t.Union[str, t.Literal["cohere"]]] = None,
833
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
834
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
835
+ error_trace: t.Optional[bool] = None,
836
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
837
+ human: t.Optional[bool] = None,
838
+ pretty: t.Optional[bool] = None,
839
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
840
+ body: t.Optional[t.Dict[str, t.Any]] = None,
841
+ ) -> ObjectApiResponse[t.Any]:
842
+ """
843
+ .. raw:: html
844
+
845
+ <p>Create a Cohere inference endpoint.</p>
846
+ <p>Create an inference endpoint to perform an inference task with the <code>cohere</code> service.</p>
847
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
848
+ After creating the endpoint, wait for the model deployment to complete before using it.
849
+ To verify the deployment status, use the get trained model statistics API.
850
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
851
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
852
+
853
+
854
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-cohere.html>`_
855
+
856
+ :param task_type: The type of the inference task that the model will perform.
857
+ :param cohere_inference_id: The unique identifier of the inference endpoint.
858
+ :param service: The type of service supported for the specified task type. In
859
+ this case, `cohere`.
860
+ :param service_settings: Settings used to install the inference model. These
861
+ settings are specific to the `cohere` service.
862
+ :param chunking_settings: The chunking configuration object.
863
+ :param task_settings: Settings to configure the inference task. These settings
864
+ are specific to the task type you specified.
865
+ """
866
+ if task_type in SKIP_IN_PATH:
867
+ raise ValueError("Empty value passed for parameter 'task_type'")
868
+ if cohere_inference_id in SKIP_IN_PATH:
869
+ raise ValueError("Empty value passed for parameter 'cohere_inference_id'")
870
+ if service is None and body is None:
871
+ raise ValueError("Empty value passed for parameter 'service'")
872
+ if service_settings is None and body is None:
873
+ raise ValueError("Empty value passed for parameter 'service_settings'")
874
+ __path_parts: t.Dict[str, str] = {
875
+ "task_type": _quote(task_type),
876
+ "cohere_inference_id": _quote(cohere_inference_id),
877
+ }
878
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["cohere_inference_id"]}'
879
+ __query: t.Dict[str, t.Any] = {}
880
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
881
+ if error_trace is not None:
882
+ __query["error_trace"] = error_trace
883
+ if filter_path is not None:
884
+ __query["filter_path"] = filter_path
885
+ if human is not None:
886
+ __query["human"] = human
887
+ if pretty is not None:
888
+ __query["pretty"] = pretty
889
+ if not __body:
890
+ if service is not None:
891
+ __body["service"] = service
892
+ if service_settings is not None:
893
+ __body["service_settings"] = service_settings
894
+ if chunking_settings is not None:
895
+ __body["chunking_settings"] = chunking_settings
896
+ if task_settings is not None:
897
+ __body["task_settings"] = task_settings
898
+ if not __body:
899
+ __body = None # type: ignore[assignment]
900
+ __headers = {"accept": "application/json"}
901
+ if __body is not None:
902
+ __headers["content-type"] = "application/json"
903
+ return await self.perform_request( # type: ignore[return-value]
904
+ "PUT",
905
+ __path,
906
+ params=__query,
907
+ headers=__headers,
908
+ body=__body,
909
+ endpoint_id="inference.put_cohere",
910
+ path_parts=__path_parts,
911
+ )
912
+
913
+ @_rewrite_parameters(
914
+ body_fields=(
915
+ "service",
916
+ "service_settings",
917
+ "chunking_settings",
918
+ "task_settings",
919
+ ),
920
+ )
921
+ async def put_elasticsearch(
922
+ self,
923
+ *,
924
+ task_type: t.Union[
925
+ str, t.Literal["rerank", "sparse_embedding", "text_embedding"]
926
+ ],
927
+ elasticsearch_inference_id: str,
928
+ service: t.Optional[t.Union[str, t.Literal["elasticsearch"]]] = None,
929
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
930
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
931
+ error_trace: t.Optional[bool] = None,
932
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
933
+ human: t.Optional[bool] = None,
934
+ pretty: t.Optional[bool] = None,
935
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
936
+ body: t.Optional[t.Dict[str, t.Any]] = None,
937
+ ) -> ObjectApiResponse[t.Any]:
938
+ """
939
+ .. raw:: html
940
+
941
+ <p>Create an Elasticsearch inference endpoint.</p>
942
+ <p>Create an inference endpoint to perform an inference task with the <code>elasticsearch</code> service.</p>
943
+ <blockquote>
944
+ <p>info
945
+ Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.</p>
946
+ </blockquote>
947
+ <p>If you use the ELSER or the E5 model through the <code>elasticsearch</code> service, the API request will automatically download and deploy the model if it isn't downloaded yet.</p>
948
+ <blockquote>
949
+ <p>info
950
+ You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.</p>
951
+ </blockquote>
952
+ <p>After creating the endpoint, wait for the model deployment to complete before using it.
953
+ To verify the deployment status, use the get trained model statistics API.
954
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
955
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
956
+
957
+
958
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-elasticsearch.html>`_
959
+
960
+ :param task_type: The type of the inference task that the model will perform.
961
+ :param elasticsearch_inference_id: The unique identifier of the inference endpoint.
962
+ The must not match the `model_id`.
963
+ :param service: The type of service supported for the specified task type. In
964
+ this case, `elasticsearch`.
965
+ :param service_settings: Settings used to install the inference model. These
966
+ settings are specific to the `elasticsearch` service.
967
+ :param chunking_settings: The chunking configuration object.
968
+ :param task_settings: Settings to configure the inference task. These settings
969
+ are specific to the task type you specified.
970
+ """
971
+ if task_type in SKIP_IN_PATH:
972
+ raise ValueError("Empty value passed for parameter 'task_type'")
973
+ if elasticsearch_inference_id in SKIP_IN_PATH:
974
+ raise ValueError(
975
+ "Empty value passed for parameter 'elasticsearch_inference_id'"
976
+ )
977
+ if service is None and body is None:
978
+ raise ValueError("Empty value passed for parameter 'service'")
979
+ if service_settings is None and body is None:
980
+ raise ValueError("Empty value passed for parameter 'service_settings'")
981
+ __path_parts: t.Dict[str, str] = {
982
+ "task_type": _quote(task_type),
983
+ "elasticsearch_inference_id": _quote(elasticsearch_inference_id),
984
+ }
985
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elasticsearch_inference_id"]}'
986
+ __query: t.Dict[str, t.Any] = {}
987
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
988
+ if error_trace is not None:
989
+ __query["error_trace"] = error_trace
990
+ if filter_path is not None:
991
+ __query["filter_path"] = filter_path
992
+ if human is not None:
993
+ __query["human"] = human
994
+ if pretty is not None:
995
+ __query["pretty"] = pretty
996
+ if not __body:
997
+ if service is not None:
998
+ __body["service"] = service
999
+ if service_settings is not None:
1000
+ __body["service_settings"] = service_settings
1001
+ if chunking_settings is not None:
1002
+ __body["chunking_settings"] = chunking_settings
1003
+ if task_settings is not None:
1004
+ __body["task_settings"] = task_settings
1005
+ if not __body:
1006
+ __body = None # type: ignore[assignment]
1007
+ __headers = {"accept": "application/json"}
1008
+ if __body is not None:
1009
+ __headers["content-type"] = "application/json"
1010
+ return await self.perform_request( # type: ignore[return-value]
1011
+ "PUT",
1012
+ __path,
1013
+ params=__query,
1014
+ headers=__headers,
1015
+ body=__body,
1016
+ endpoint_id="inference.put_elasticsearch",
1017
+ path_parts=__path_parts,
1018
+ )
1019
+
1020
+ @_rewrite_parameters(
1021
+ body_fields=("service", "service_settings", "chunking_settings"),
1022
+ )
1023
+ async def put_elser(
1024
+ self,
1025
+ *,
1026
+ task_type: t.Union[str, t.Literal["sparse_embedding"]],
1027
+ elser_inference_id: str,
1028
+ service: t.Optional[t.Union[str, t.Literal["elser"]]] = None,
1029
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1030
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1031
+ error_trace: t.Optional[bool] = None,
1032
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1033
+ human: t.Optional[bool] = None,
1034
+ pretty: t.Optional[bool] = None,
1035
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1036
+ ) -> ObjectApiResponse[t.Any]:
1037
+ """
1038
+ .. raw:: html
1039
+
1040
+ <p>Create an ELSER inference endpoint.</p>
1041
+ <p>Create an inference endpoint to perform an inference task with the <code>elser</code> service.
1042
+ You can also deploy ELSER by using the Elasticsearch inference integration.</p>
1043
+ <blockquote>
1044
+ <p>info
1045
+ Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.</p>
1046
+ </blockquote>
1047
+ <p>The API request will automatically download and deploy the ELSER model if it isn't already downloaded.</p>
1048
+ <blockquote>
1049
+ <p>info
1050
+ You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.</p>
1051
+ </blockquote>
1052
+ <p>After creating the endpoint, wait for the model deployment to complete before using it.
1053
+ To verify the deployment status, use the get trained model statistics API.
1054
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
1055
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
1056
+
1057
+
1058
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-elser.html>`_
1059
+
1060
+ :param task_type: The type of the inference task that the model will perform.
1061
+ :param elser_inference_id: The unique identifier of the inference endpoint.
1062
+ :param service: The type of service supported for the specified task type. In
1063
+ this case, `elser`.
1064
+ :param service_settings: Settings used to install the inference model. These
1065
+ settings are specific to the `elser` service.
1066
+ :param chunking_settings: The chunking configuration object.
1067
+ """
1068
+ if task_type in SKIP_IN_PATH:
1069
+ raise ValueError("Empty value passed for parameter 'task_type'")
1070
+ if elser_inference_id in SKIP_IN_PATH:
1071
+ raise ValueError("Empty value passed for parameter 'elser_inference_id'")
1072
+ if service is None and body is None:
1073
+ raise ValueError("Empty value passed for parameter 'service'")
1074
+ if service_settings is None and body is None:
1075
+ raise ValueError("Empty value passed for parameter 'service_settings'")
1076
+ __path_parts: t.Dict[str, str] = {
1077
+ "task_type": _quote(task_type),
1078
+ "elser_inference_id": _quote(elser_inference_id),
1079
+ }
1080
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elser_inference_id"]}'
1081
+ __query: t.Dict[str, t.Any] = {}
1082
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1083
+ if error_trace is not None:
1084
+ __query["error_trace"] = error_trace
1085
+ if filter_path is not None:
1086
+ __query["filter_path"] = filter_path
1087
+ if human is not None:
1088
+ __query["human"] = human
1089
+ if pretty is not None:
1090
+ __query["pretty"] = pretty
1091
+ if not __body:
1092
+ if service is not None:
1093
+ __body["service"] = service
1094
+ if service_settings is not None:
1095
+ __body["service_settings"] = service_settings
1096
+ if chunking_settings is not None:
1097
+ __body["chunking_settings"] = chunking_settings
1098
+ if not __body:
1099
+ __body = None # type: ignore[assignment]
1100
+ __headers = {"accept": "application/json"}
1101
+ if __body is not None:
1102
+ __headers["content-type"] = "application/json"
1103
+ return await self.perform_request( # type: ignore[return-value]
1104
+ "PUT",
1105
+ __path,
1106
+ params=__query,
1107
+ headers=__headers,
1108
+ body=__body,
1109
+ endpoint_id="inference.put_elser",
1110
+ path_parts=__path_parts,
1111
+ )
1112
+
1113
+ @_rewrite_parameters(
1114
+ body_fields=("service", "service_settings", "chunking_settings"),
1115
+ )
1116
+ async def put_googleaistudio(
1117
+ self,
1118
+ *,
1119
+ task_type: t.Union[str, t.Literal["completion", "text_embedding"]],
1120
+ googleaistudio_inference_id: str,
1121
+ service: t.Optional[t.Union[str, t.Literal["googleaistudio"]]] = None,
1122
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1123
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1124
+ error_trace: t.Optional[bool] = None,
1125
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1126
+ human: t.Optional[bool] = None,
1127
+ pretty: t.Optional[bool] = None,
1128
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1129
+ ) -> ObjectApiResponse[t.Any]:
1130
+ """
1131
+ .. raw:: html
1132
+
1133
+ <p>Create an Google AI Studio inference endpoint.</p>
1134
+ <p>Create an inference endpoint to perform an inference task with the <code>googleaistudio</code> service.</p>
1135
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
1136
+ After creating the endpoint, wait for the model deployment to complete before using it.
1137
+ To verify the deployment status, use the get trained model statistics API.
1138
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
1139
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
1140
+
1141
+
1142
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-google-ai-studio.html>`_
1143
+
1144
+ :param task_type: The type of the inference task that the model will perform.
1145
+ :param googleaistudio_inference_id: The unique identifier of the inference endpoint.
1146
+ :param service: The type of service supported for the specified task type. In
1147
+ this case, `googleaistudio`.
1148
+ :param service_settings: Settings used to install the inference model. These
1149
+ settings are specific to the `googleaistudio` service.
1150
+ :param chunking_settings: The chunking configuration object.
1151
+ """
1152
+ if task_type in SKIP_IN_PATH:
1153
+ raise ValueError("Empty value passed for parameter 'task_type'")
1154
+ if googleaistudio_inference_id in SKIP_IN_PATH:
1155
+ raise ValueError(
1156
+ "Empty value passed for parameter 'googleaistudio_inference_id'"
1157
+ )
1158
+ if service is None and body is None:
1159
+ raise ValueError("Empty value passed for parameter 'service'")
1160
+ if service_settings is None and body is None:
1161
+ raise ValueError("Empty value passed for parameter 'service_settings'")
1162
+ __path_parts: t.Dict[str, str] = {
1163
+ "task_type": _quote(task_type),
1164
+ "googleaistudio_inference_id": _quote(googleaistudio_inference_id),
1165
+ }
1166
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googleaistudio_inference_id"]}'
1167
+ __query: t.Dict[str, t.Any] = {}
1168
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1169
+ if error_trace is not None:
1170
+ __query["error_trace"] = error_trace
1171
+ if filter_path is not None:
1172
+ __query["filter_path"] = filter_path
1173
+ if human is not None:
1174
+ __query["human"] = human
1175
+ if pretty is not None:
1176
+ __query["pretty"] = pretty
1177
+ if not __body:
1178
+ if service is not None:
1179
+ __body["service"] = service
1180
+ if service_settings is not None:
1181
+ __body["service_settings"] = service_settings
1182
+ if chunking_settings is not None:
1183
+ __body["chunking_settings"] = chunking_settings
1184
+ if not __body:
1185
+ __body = None # type: ignore[assignment]
1186
+ __headers = {"accept": "application/json"}
1187
+ if __body is not None:
1188
+ __headers["content-type"] = "application/json"
1189
+ return await self.perform_request( # type: ignore[return-value]
1190
+ "PUT",
1191
+ __path,
1192
+ params=__query,
1193
+ headers=__headers,
1194
+ body=__body,
1195
+ endpoint_id="inference.put_googleaistudio",
1196
+ path_parts=__path_parts,
1197
+ )
1198
+
1199
+ @_rewrite_parameters(
1200
+ body_fields=(
1201
+ "service",
1202
+ "service_settings",
1203
+ "chunking_settings",
1204
+ "task_settings",
1205
+ ),
1206
+ )
1207
+ async def put_googlevertexai(
1208
+ self,
1209
+ *,
1210
+ task_type: t.Union[str, t.Literal["rerank", "text_embedding"]],
1211
+ googlevertexai_inference_id: str,
1212
+ service: t.Optional[t.Union[str, t.Literal["googlevertexai"]]] = None,
1213
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1214
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1215
+ error_trace: t.Optional[bool] = None,
1216
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1217
+ human: t.Optional[bool] = None,
1218
+ pretty: t.Optional[bool] = None,
1219
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1220
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1221
+ ) -> ObjectApiResponse[t.Any]:
1222
+ """
1223
+ .. raw:: html
1224
+
1225
+ <p>Create a Google Vertex AI inference endpoint.</p>
1226
+ <p>Create an inference endpoint to perform an inference task with the <code>googlevertexai</code> service.</p>
1227
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
1228
+ After creating the endpoint, wait for the model deployment to complete before using it.
1229
+ To verify the deployment status, use the get trained model statistics API.
1230
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
1231
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
1232
+
1233
+
1234
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-google-vertex-ai.html>`_
1235
+
1236
+ :param task_type: The type of the inference task that the model will perform.
1237
+ :param googlevertexai_inference_id: The unique identifier of the inference endpoint.
1238
+ :param service: The type of service supported for the specified task type. In
1239
+ this case, `googlevertexai`.
1240
+ :param service_settings: Settings used to install the inference model. These
1241
+ settings are specific to the `googlevertexai` service.
1242
+ :param chunking_settings: The chunking configuration object.
1243
+ :param task_settings: Settings to configure the inference task. These settings
1244
+ are specific to the task type you specified.
1245
+ """
1246
+ if task_type in SKIP_IN_PATH:
1247
+ raise ValueError("Empty value passed for parameter 'task_type'")
1248
+ if googlevertexai_inference_id in SKIP_IN_PATH:
1249
+ raise ValueError(
1250
+ "Empty value passed for parameter 'googlevertexai_inference_id'"
1251
+ )
1252
+ if service is None and body is None:
1253
+ raise ValueError("Empty value passed for parameter 'service'")
1254
+ if service_settings is None and body is None:
1255
+ raise ValueError("Empty value passed for parameter 'service_settings'")
1256
+ __path_parts: t.Dict[str, str] = {
1257
+ "task_type": _quote(task_type),
1258
+ "googlevertexai_inference_id": _quote(googlevertexai_inference_id),
1259
+ }
1260
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googlevertexai_inference_id"]}'
1261
+ __query: t.Dict[str, t.Any] = {}
1262
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1263
+ if error_trace is not None:
1264
+ __query["error_trace"] = error_trace
1265
+ if filter_path is not None:
1266
+ __query["filter_path"] = filter_path
1267
+ if human is not None:
1268
+ __query["human"] = human
1269
+ if pretty is not None:
1270
+ __query["pretty"] = pretty
1271
+ if not __body:
1272
+ if service is not None:
1273
+ __body["service"] = service
1274
+ if service_settings is not None:
1275
+ __body["service_settings"] = service_settings
1276
+ if chunking_settings is not None:
1277
+ __body["chunking_settings"] = chunking_settings
1278
+ if task_settings is not None:
1279
+ __body["task_settings"] = task_settings
1280
+ if not __body:
1281
+ __body = None # type: ignore[assignment]
1282
+ __headers = {"accept": "application/json"}
1283
+ if __body is not None:
1284
+ __headers["content-type"] = "application/json"
1285
+ return await self.perform_request( # type: ignore[return-value]
1286
+ "PUT",
1287
+ __path,
1288
+ params=__query,
1289
+ headers=__headers,
1290
+ body=__body,
1291
+ endpoint_id="inference.put_googlevertexai",
1292
+ path_parts=__path_parts,
1293
+ )
1294
+
1295
+ @_rewrite_parameters(
1296
+ body_fields=("service", "service_settings", "chunking_settings"),
1297
+ )
1298
+ async def put_hugging_face(
1299
+ self,
1300
+ *,
1301
+ task_type: t.Union[str, t.Literal["text_embedding"]],
1302
+ huggingface_inference_id: str,
1303
+ service: t.Optional[t.Union[str, t.Literal["hugging_face"]]] = None,
1304
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1305
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1306
+ error_trace: t.Optional[bool] = None,
1307
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1308
+ human: t.Optional[bool] = None,
1309
+ pretty: t.Optional[bool] = None,
1310
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1311
+ ) -> ObjectApiResponse[t.Any]:
1312
+ """
1313
+ .. raw:: html
1314
+
1315
+ <p>Create a Hugging Face inference endpoint.</p>
1316
+ <p>Create an inference endpoint to perform an inference task with the <code>hugging_face</code> service.</p>
1317
+ <p>You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL.
1318
+ Select the model you want to use on the new endpoint creation page (for example <code>intfloat/e5-small-v2</code>), then select the sentence embeddings task under the advanced configuration section.
1319
+ Create the endpoint and copy the URL after the endpoint initialization has been finished.</p>
1320
+ <p>The following models are recommended for the Hugging Face service:</p>
1321
+ <ul>
1322
+ <li><code>all-MiniLM-L6-v2</code></li>
1323
+ <li><code>all-MiniLM-L12-v2</code></li>
1324
+ <li><code>all-mpnet-base-v2</code></li>
1325
+ <li><code>e5-base-v2</code></li>
1326
+ <li><code>e5-small-v2</code></li>
1327
+ <li><code>multilingual-e5-base</code></li>
1328
+ <li><code>multilingual-e5-small</code></li>
1329
+ </ul>
1330
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
1331
+ After creating the endpoint, wait for the model deployment to complete before using it.
1332
+ To verify the deployment status, use the get trained model statistics API.
1333
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
1334
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
1335
+
1336
+
1337
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-hugging-face.html>`_
1338
+
1339
+ :param task_type: The type of the inference task that the model will perform.
1340
+ :param huggingface_inference_id: The unique identifier of the inference endpoint.
1341
+ :param service: The type of service supported for the specified task type. In
1342
+ this case, `hugging_face`.
1343
+ :param service_settings: Settings used to install the inference model. These
1344
+ settings are specific to the `hugging_face` service.
1345
+ :param chunking_settings: The chunking configuration object.
1346
+ """
1347
+ if task_type in SKIP_IN_PATH:
1348
+ raise ValueError("Empty value passed for parameter 'task_type'")
1349
+ if huggingface_inference_id in SKIP_IN_PATH:
1350
+ raise ValueError(
1351
+ "Empty value passed for parameter 'huggingface_inference_id'"
1352
+ )
1353
+ if service is None and body is None:
1354
+ raise ValueError("Empty value passed for parameter 'service'")
1355
+ if service_settings is None and body is None:
1356
+ raise ValueError("Empty value passed for parameter 'service_settings'")
1357
+ __path_parts: t.Dict[str, str] = {
1358
+ "task_type": _quote(task_type),
1359
+ "huggingface_inference_id": _quote(huggingface_inference_id),
1360
+ }
1361
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["huggingface_inference_id"]}'
1362
+ __query: t.Dict[str, t.Any] = {}
1363
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1364
+ if error_trace is not None:
1365
+ __query["error_trace"] = error_trace
1366
+ if filter_path is not None:
1367
+ __query["filter_path"] = filter_path
1368
+ if human is not None:
1369
+ __query["human"] = human
1370
+ if pretty is not None:
1371
+ __query["pretty"] = pretty
1372
+ if not __body:
1373
+ if service is not None:
1374
+ __body["service"] = service
1375
+ if service_settings is not None:
1376
+ __body["service_settings"] = service_settings
1377
+ if chunking_settings is not None:
1378
+ __body["chunking_settings"] = chunking_settings
1379
+ if not __body:
1380
+ __body = None # type: ignore[assignment]
1381
+ __headers = {"accept": "application/json"}
1382
+ if __body is not None:
1383
+ __headers["content-type"] = "application/json"
1384
+ return await self.perform_request( # type: ignore[return-value]
1385
+ "PUT",
1386
+ __path,
1387
+ params=__query,
1388
+ headers=__headers,
1389
+ body=__body,
1390
+ endpoint_id="inference.put_hugging_face",
1391
+ path_parts=__path_parts,
1392
+ )
1393
+
1394
+ @_rewrite_parameters(
1395
+ body_fields=(
1396
+ "service",
1397
+ "service_settings",
1398
+ "chunking_settings",
1399
+ "task_settings",
1400
+ ),
1401
+ )
1402
+ async def put_jinaai(
1403
+ self,
1404
+ *,
1405
+ task_type: t.Union[str, t.Literal["rerank", "text_embedding"]],
1406
+ jinaai_inference_id: str,
1407
+ service: t.Optional[t.Union[str, t.Literal["jinaai"]]] = None,
1408
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1409
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1410
+ error_trace: t.Optional[bool] = None,
1411
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1412
+ human: t.Optional[bool] = None,
1413
+ pretty: t.Optional[bool] = None,
1414
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1415
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1416
+ ) -> ObjectApiResponse[t.Any]:
1417
+ """
1418
+ .. raw:: html
1419
+
1420
+ <p>Create an JinaAI inference endpoint.</p>
1421
+ <p>Create an inference endpoint to perform an inference task with the <code>jinaai</code> service.</p>
1422
+ <p>To review the available <code>rerank</code> models, refer to <a href="https://jina.ai/reranker">https://jina.ai/reranker</a>.
1423
+ To review the available <code>text_embedding</code> models, refer to the <a href="https://jina.ai/embeddings/">https://jina.ai/embeddings/</a>.</p>
1424
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
1425
+ After creating the endpoint, wait for the model deployment to complete before using it.
1426
+ To verify the deployment status, use the get trained model statistics API.
1427
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
1428
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
1429
+
1430
+
1431
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-jinaai.html>`_
1432
+
1433
+ :param task_type: The type of the inference task that the model will perform.
1434
+ :param jinaai_inference_id: The unique identifier of the inference endpoint.
1435
+ :param service: The type of service supported for the specified task type. In
1436
+ this case, `jinaai`.
1437
+ :param service_settings: Settings used to install the inference model. These
1438
+ settings are specific to the `jinaai` service.
1439
+ :param chunking_settings: The chunking configuration object.
1440
+ :param task_settings: Settings to configure the inference task. These settings
1441
+ are specific to the task type you specified.
1442
+ """
1443
+ if task_type in SKIP_IN_PATH:
1444
+ raise ValueError("Empty value passed for parameter 'task_type'")
1445
+ if jinaai_inference_id in SKIP_IN_PATH:
1446
+ raise ValueError("Empty value passed for parameter 'jinaai_inference_id'")
1447
+ if service is None and body is None:
1448
+ raise ValueError("Empty value passed for parameter 'service'")
1449
+ if service_settings is None and body is None:
1450
+ raise ValueError("Empty value passed for parameter 'service_settings'")
1451
+ __path_parts: t.Dict[str, str] = {
1452
+ "task_type": _quote(task_type),
1453
+ "jinaai_inference_id": _quote(jinaai_inference_id),
1454
+ }
1455
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["jinaai_inference_id"]}'
1456
+ __query: t.Dict[str, t.Any] = {}
1457
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1458
+ if error_trace is not None:
1459
+ __query["error_trace"] = error_trace
1460
+ if filter_path is not None:
1461
+ __query["filter_path"] = filter_path
1462
+ if human is not None:
1463
+ __query["human"] = human
1464
+ if pretty is not None:
1465
+ __query["pretty"] = pretty
1466
+ if not __body:
1467
+ if service is not None:
1468
+ __body["service"] = service
1469
+ if service_settings is not None:
1470
+ __body["service_settings"] = service_settings
1471
+ if chunking_settings is not None:
1472
+ __body["chunking_settings"] = chunking_settings
1473
+ if task_settings is not None:
1474
+ __body["task_settings"] = task_settings
1475
+ if not __body:
1476
+ __body = None # type: ignore[assignment]
1477
+ __headers = {"accept": "application/json"}
1478
+ if __body is not None:
1479
+ __headers["content-type"] = "application/json"
1480
+ return await self.perform_request( # type: ignore[return-value]
1481
+ "PUT",
1482
+ __path,
1483
+ params=__query,
1484
+ headers=__headers,
1485
+ body=__body,
1486
+ endpoint_id="inference.put_jinaai",
1487
+ path_parts=__path_parts,
1488
+ )
1489
+
1490
+ @_rewrite_parameters(
1491
+ body_fields=("service", "service_settings", "chunking_settings"),
1492
+ )
1493
+ async def put_mistral(
1494
+ self,
1495
+ *,
1496
+ task_type: t.Union[str, t.Literal["text_embedding"]],
1497
+ mistral_inference_id: str,
1498
+ service: t.Optional[t.Union[str, t.Literal["mistral"]]] = None,
1499
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1500
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1501
+ error_trace: t.Optional[bool] = None,
1502
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1503
+ human: t.Optional[bool] = None,
1504
+ pretty: t.Optional[bool] = None,
1505
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1506
+ ) -> ObjectApiResponse[t.Any]:
1507
+ """
1508
+ .. raw:: html
1509
+
1510
+ <p>Create a Mistral inference endpoint.</p>
1511
+ <p>Creates an inference endpoint to perform an inference task with the <code>mistral</code> service.</p>
1512
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
1513
+ After creating the endpoint, wait for the model deployment to complete before using it.
1514
+ To verify the deployment status, use the get trained model statistics API.
1515
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
1516
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
1517
+
1518
+
1519
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/{brnach}/infer-service-mistral.html>`_
1520
+
1521
+ :param task_type: The task type. The only valid task type for the model to perform
1522
+ is `text_embedding`.
1523
+ :param mistral_inference_id: The unique identifier of the inference endpoint.
1524
+ :param service: The type of service supported for the specified task type. In
1525
+ this case, `mistral`.
1526
+ :param service_settings: Settings used to install the inference model. These
1527
+ settings are specific to the `mistral` service.
1528
+ :param chunking_settings: The chunking configuration object.
1529
+ """
1530
+ if task_type in SKIP_IN_PATH:
1531
+ raise ValueError("Empty value passed for parameter 'task_type'")
1532
+ if mistral_inference_id in SKIP_IN_PATH:
1533
+ raise ValueError("Empty value passed for parameter 'mistral_inference_id'")
1534
+ if service is None and body is None:
1535
+ raise ValueError("Empty value passed for parameter 'service'")
1536
+ if service_settings is None and body is None:
1537
+ raise ValueError("Empty value passed for parameter 'service_settings'")
1538
+ __path_parts: t.Dict[str, str] = {
1539
+ "task_type": _quote(task_type),
1540
+ "mistral_inference_id": _quote(mistral_inference_id),
1541
+ }
1542
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["mistral_inference_id"]}'
1543
+ __query: t.Dict[str, t.Any] = {}
1544
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1545
+ if error_trace is not None:
1546
+ __query["error_trace"] = error_trace
1547
+ if filter_path is not None:
1548
+ __query["filter_path"] = filter_path
1549
+ if human is not None:
1550
+ __query["human"] = human
1551
+ if pretty is not None:
1552
+ __query["pretty"] = pretty
1553
+ if not __body:
1554
+ if service is not None:
1555
+ __body["service"] = service
1556
+ if service_settings is not None:
1557
+ __body["service_settings"] = service_settings
1558
+ if chunking_settings is not None:
1559
+ __body["chunking_settings"] = chunking_settings
1560
+ if not __body:
1561
+ __body = None # type: ignore[assignment]
1562
+ __headers = {"accept": "application/json"}
1563
+ if __body is not None:
1564
+ __headers["content-type"] = "application/json"
1565
+ return await self.perform_request( # type: ignore[return-value]
1566
+ "PUT",
1567
+ __path,
1568
+ params=__query,
1569
+ headers=__headers,
1570
+ body=__body,
1571
+ endpoint_id="inference.put_mistral",
1572
+ path_parts=__path_parts,
1573
+ )
1574
+
1575
+ @_rewrite_parameters(
1576
+ body_fields=(
1577
+ "service",
1578
+ "service_settings",
1579
+ "chunking_settings",
1580
+ "task_settings",
1581
+ ),
1582
+ )
1583
+ async def put_openai(
1584
+ self,
1585
+ *,
1586
+ task_type: t.Union[
1587
+ str, t.Literal["chat_completion", "completion", "text_embedding"]
1588
+ ],
1589
+ openai_inference_id: str,
1590
+ service: t.Optional[t.Union[str, t.Literal["openai"]]] = None,
1591
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1592
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1593
+ error_trace: t.Optional[bool] = None,
1594
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1595
+ human: t.Optional[bool] = None,
1596
+ pretty: t.Optional[bool] = None,
1597
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1598
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1599
+ ) -> ObjectApiResponse[t.Any]:
1600
+ """
1601
+ .. raw:: html
1602
+
1603
+ <p>Create an OpenAI inference endpoint.</p>
1604
+ <p>Create an inference endpoint to perform an inference task with the <code>openai</code> service or <code>openai</code> compatible APIs.</p>
1605
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
1606
+ After creating the endpoint, wait for the model deployment to complete before using it.
1607
+ To verify the deployment status, use the get trained model statistics API.
1608
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
1609
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
1610
+
1611
+
1612
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-openai.html>`_
1613
+
1614
+ :param task_type: The type of the inference task that the model will perform.
1615
+ NOTE: The `chat_completion` task type only supports streaming and only through
1616
+ the _stream API.
1617
+ :param openai_inference_id: The unique identifier of the inference endpoint.
1618
+ :param service: The type of service supported for the specified task type. In
1619
+ this case, `openai`.
1620
+ :param service_settings: Settings used to install the inference model. These
1621
+ settings are specific to the `openai` service.
1622
+ :param chunking_settings: The chunking configuration object.
1623
+ :param task_settings: Settings to configure the inference task. These settings
1624
+ are specific to the task type you specified.
1625
+ """
1626
+ if task_type in SKIP_IN_PATH:
1627
+ raise ValueError("Empty value passed for parameter 'task_type'")
1628
+ if openai_inference_id in SKIP_IN_PATH:
1629
+ raise ValueError("Empty value passed for parameter 'openai_inference_id'")
1630
+ if service is None and body is None:
1631
+ raise ValueError("Empty value passed for parameter 'service'")
1632
+ if service_settings is None and body is None:
1633
+ raise ValueError("Empty value passed for parameter 'service_settings'")
1634
+ __path_parts: t.Dict[str, str] = {
1635
+ "task_type": _quote(task_type),
1636
+ "openai_inference_id": _quote(openai_inference_id),
1637
+ }
1638
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}'
1639
+ __query: t.Dict[str, t.Any] = {}
1640
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1641
+ if error_trace is not None:
1642
+ __query["error_trace"] = error_trace
1643
+ if filter_path is not None:
1644
+ __query["filter_path"] = filter_path
1645
+ if human is not None:
1646
+ __query["human"] = human
1647
+ if pretty is not None:
1648
+ __query["pretty"] = pretty
1649
+ if not __body:
1650
+ if service is not None:
1651
+ __body["service"] = service
1652
+ if service_settings is not None:
1653
+ __body["service_settings"] = service_settings
1654
+ if chunking_settings is not None:
1655
+ __body["chunking_settings"] = chunking_settings
1656
+ if task_settings is not None:
1657
+ __body["task_settings"] = task_settings
1658
+ if not __body:
1659
+ __body = None # type: ignore[assignment]
1660
+ __headers = {"accept": "application/json"}
1661
+ if __body is not None:
1662
+ __headers["content-type"] = "application/json"
1663
+ return await self.perform_request( # type: ignore[return-value]
1664
+ "PUT",
1665
+ __path,
1666
+ params=__query,
1667
+ headers=__headers,
1668
+ body=__body,
1669
+ endpoint_id="inference.put_openai",
1670
+ path_parts=__path_parts,
1671
+ )
1672
+
1673
+ @_rewrite_parameters(
1674
+ body_fields=(
1675
+ "service",
1676
+ "service_settings",
1677
+ "chunking_settings",
1678
+ "task_settings",
1679
+ ),
1680
+ )
1681
+ async def put_voyageai(
1682
+ self,
1683
+ *,
1684
+ task_type: t.Union[str, t.Literal["rerank", "text_embedding"]],
1685
+ voyageai_inference_id: str,
1686
+ service: t.Optional[t.Union[str, t.Literal["voyageai"]]] = None,
1687
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1688
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1689
+ error_trace: t.Optional[bool] = None,
1690
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1691
+ human: t.Optional[bool] = None,
1692
+ pretty: t.Optional[bool] = None,
1693
+ task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1694
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1695
+ ) -> ObjectApiResponse[t.Any]:
1696
+ """
1697
+ .. raw:: html
1698
+
1699
+ <p>Create a VoyageAI inference endpoint.</p>
1700
+ <p>Create an inference endpoint to perform an inference task with the <code>voyageai</code> service.</p>
1701
+ <p>Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
1702
+
1703
+
1704
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-voyageai.html>`_
1705
+
1706
+ :param task_type: The type of the inference task that the model will perform.
1707
+ :param voyageai_inference_id: The unique identifier of the inference endpoint.
1708
+ :param service: The type of service supported for the specified task type. In
1709
+ this case, `voyageai`.
1710
+ :param service_settings: Settings used to install the inference model. These
1711
+ settings are specific to the `voyageai` service.
1712
+ :param chunking_settings: The chunking configuration object.
1713
+ :param task_settings: Settings to configure the inference task. These settings
1714
+ are specific to the task type you specified.
1715
+ """
1716
+ if task_type in SKIP_IN_PATH:
1717
+ raise ValueError("Empty value passed for parameter 'task_type'")
1718
+ if voyageai_inference_id in SKIP_IN_PATH:
1719
+ raise ValueError("Empty value passed for parameter 'voyageai_inference_id'")
1720
+ if service is None and body is None:
1721
+ raise ValueError("Empty value passed for parameter 'service'")
1722
+ if service_settings is None and body is None:
1723
+ raise ValueError("Empty value passed for parameter 'service_settings'")
1724
+ __path_parts: t.Dict[str, str] = {
1725
+ "task_type": _quote(task_type),
1726
+ "voyageai_inference_id": _quote(voyageai_inference_id),
1727
+ }
1728
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["voyageai_inference_id"]}'
1729
+ __query: t.Dict[str, t.Any] = {}
1730
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1731
+ if error_trace is not None:
1732
+ __query["error_trace"] = error_trace
1733
+ if filter_path is not None:
1734
+ __query["filter_path"] = filter_path
1735
+ if human is not None:
1736
+ __query["human"] = human
1737
+ if pretty is not None:
1738
+ __query["pretty"] = pretty
1739
+ if not __body:
1740
+ if service is not None:
1741
+ __body["service"] = service
1742
+ if service_settings is not None:
1743
+ __body["service_settings"] = service_settings
1744
+ if chunking_settings is not None:
1745
+ __body["chunking_settings"] = chunking_settings
1746
+ if task_settings is not None:
1747
+ __body["task_settings"] = task_settings
1748
+ if not __body:
1749
+ __body = None # type: ignore[assignment]
1750
+ __headers = {"accept": "application/json"}
1751
+ if __body is not None:
1752
+ __headers["content-type"] = "application/json"
1753
+ return await self.perform_request( # type: ignore[return-value]
1754
+ "PUT",
1755
+ __path,
1756
+ params=__query,
1757
+ headers=__headers,
1758
+ body=__body,
1759
+ endpoint_id="inference.put_voyageai",
1760
+ path_parts=__path_parts,
1761
+ )
1762
+
1763
+ @_rewrite_parameters(
1764
+ body_fields=("service", "service_settings"),
1765
+ )
1766
+ async def put_watsonx(
1767
+ self,
1768
+ *,
1769
+ task_type: t.Union[str, t.Literal["text_embedding"]],
1770
+ watsonx_inference_id: str,
1771
+ service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None,
1772
+ service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
1773
+ error_trace: t.Optional[bool] = None,
1774
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1775
+ human: t.Optional[bool] = None,
1776
+ pretty: t.Optional[bool] = None,
1777
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1778
+ ) -> ObjectApiResponse[t.Any]:
1779
+ """
1780
+ .. raw:: html
1781
+
1782
+ <p>Create a Watsonx inference endpoint.</p>
1783
+ <p>Create an inference endpoint to perform an inference task with the <code>watsonxai</code> service.
1784
+ You need an IBM Cloud Databases for Elasticsearch deployment to use the <code>watsonxai</code> inference service.
1785
+ You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.</p>
1786
+ <p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
1787
+ After creating the endpoint, wait for the model deployment to complete before using it.
1788
+ To verify the deployment status, use the get trained model statistics API.
1789
+ Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
1790
+ Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
1791
+
1792
+
1793
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-watsonx-ai.html>`_
1794
+
1795
+ :param task_type: The task type. The only valid task type for the model to perform
1796
+ is `text_embedding`.
1797
+ :param watsonx_inference_id: The unique identifier of the inference endpoint.
1798
+ :param service: The type of service supported for the specified task type. In
1799
+ this case, `watsonxai`.
1800
+ :param service_settings: Settings used to install the inference model. These
1801
+ settings are specific to the `watsonxai` service.
1802
+ """
1803
+ if task_type in SKIP_IN_PATH:
1804
+ raise ValueError("Empty value passed for parameter 'task_type'")
1805
+ if watsonx_inference_id in SKIP_IN_PATH:
1806
+ raise ValueError("Empty value passed for parameter 'watsonx_inference_id'")
1807
+ if service is None and body is None:
1808
+ raise ValueError("Empty value passed for parameter 'service'")
1809
+ if service_settings is None and body is None:
1810
+ raise ValueError("Empty value passed for parameter 'service_settings'")
1811
+ __path_parts: t.Dict[str, str] = {
1812
+ "task_type": _quote(task_type),
1813
+ "watsonx_inference_id": _quote(watsonx_inference_id),
1814
+ }
1815
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["watsonx_inference_id"]}'
1816
+ __query: t.Dict[str, t.Any] = {}
1817
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1818
+ if error_trace is not None:
1819
+ __query["error_trace"] = error_trace
1820
+ if filter_path is not None:
1821
+ __query["filter_path"] = filter_path
1822
+ if human is not None:
1823
+ __query["human"] = human
1824
+ if pretty is not None:
1825
+ __query["pretty"] = pretty
1826
+ if not __body:
1827
+ if service is not None:
1828
+ __body["service"] = service
1829
+ if service_settings is not None:
1830
+ __body["service_settings"] = service_settings
1831
+ if not __body:
1832
+ __body = None # type: ignore[assignment]
1833
+ __headers = {"accept": "application/json"}
1834
+ if __body is not None:
1835
+ __headers["content-type"] = "application/json"
1836
+ return await self.perform_request( # type: ignore[return-value]
1837
+ "PUT",
1838
+ __path,
1839
+ params=__query,
1840
+ headers=__headers,
1841
+ body=__body,
1842
+ endpoint_id="inference.put_watsonx",
1843
+ path_parts=__path_parts,
1844
+ )
1845
+
1846
+ @_rewrite_parameters(
1847
+ body_fields=("input", "query", "task_settings"),
1848
+ )
1849
+ async def rerank(
1850
+ self,
1851
+ *,
1852
+ inference_id: str,
1853
+ input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1854
+ query: t.Optional[str] = None,
1855
+ error_trace: t.Optional[bool] = None,
1856
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1857
+ human: t.Optional[bool] = None,
1858
+ pretty: t.Optional[bool] = None,
1859
+ task_settings: t.Optional[t.Any] = None,
1860
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
1861
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1862
+ ) -> ObjectApiResponse[t.Any]:
1863
+ """
1864
+ .. raw:: html
1865
+
1866
+ <p>Perform rereanking inference on the service</p>
1867
+
1868
+
1869
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/post-inference-api.html>`_
1870
+
1871
+ :param inference_id: The unique identifier for the inference endpoint.
1872
+ :param input: The text on which you want to perform the inference task. It can
1873
+ be a single string or an array. > info > Inference endpoints for the `completion`
1874
+ task type currently only support a single string as input.
1875
+ :param query: Query input.
1876
+ :param task_settings: Task settings for the individual inference request. These
1877
+ settings are specific to the task type you specified and override the task
1878
+ settings specified when initializing the service.
1879
+ :param timeout: The amount of time to wait for the inference request to complete.
1880
+ """
1881
+ if inference_id in SKIP_IN_PATH:
1882
+ raise ValueError("Empty value passed for parameter 'inference_id'")
1883
+ if input is None and body is None:
1884
+ raise ValueError("Empty value passed for parameter 'input'")
1885
+ if query is None and body is None:
1886
+ raise ValueError("Empty value passed for parameter 'query'")
1887
+ __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
1888
+ __path = f'/_inference/rerank/{__path_parts["inference_id"]}'
1889
+ __query: t.Dict[str, t.Any] = {}
1890
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1891
+ if error_trace is not None:
1892
+ __query["error_trace"] = error_trace
1893
+ if filter_path is not None:
1894
+ __query["filter_path"] = filter_path
1895
+ if human is not None:
1896
+ __query["human"] = human
1897
+ if pretty is not None:
1898
+ __query["pretty"] = pretty
1899
+ if timeout is not None:
1900
+ __query["timeout"] = timeout
1901
+ if not __body:
1902
+ if input is not None:
1903
+ __body["input"] = input
1904
+ if query is not None:
1905
+ __body["query"] = query
1906
+ if task_settings is not None:
1907
+ __body["task_settings"] = task_settings
1908
+ if not __body:
1909
+ __body = None # type: ignore[assignment]
1910
+ __headers = {"accept": "application/json"}
1911
+ if __body is not None:
1912
+ __headers["content-type"] = "application/json"
1913
+ return await self.perform_request( # type: ignore[return-value]
1914
+ "POST",
1915
+ __path,
1916
+ params=__query,
1917
+ headers=__headers,
1918
+ body=__body,
1919
+ endpoint_id="inference.rerank",
1920
+ path_parts=__path_parts,
1921
+ )
1922
+
1923
+ @_rewrite_parameters(
1924
+ body_fields=("input", "task_settings"),
1925
+ )
1926
+ async def sparse_embedding(
1927
+ self,
1928
+ *,
1929
+ inference_id: str,
1930
+ input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1931
+ error_trace: t.Optional[bool] = None,
1932
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1933
+ human: t.Optional[bool] = None,
1934
+ pretty: t.Optional[bool] = None,
1935
+ task_settings: t.Optional[t.Any] = None,
1936
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
1937
+ body: t.Optional[t.Dict[str, t.Any]] = None,
1938
+ ) -> ObjectApiResponse[t.Any]:
1939
+ """
1940
+ .. raw:: html
1941
+
1942
+ <p>Perform sparse embedding inference on the service</p>
1943
+
1944
+
1945
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/post-inference-api.html>`_
1946
+
1947
+ :param inference_id: The inference Id
1948
+ :param input: Inference input. Either a string or an array of strings.
1949
+ :param task_settings: Optional task settings
1950
+ :param timeout: Specifies the amount of time to wait for the inference request
1951
+ to complete.
1952
+ """
1953
+ if inference_id in SKIP_IN_PATH:
1954
+ raise ValueError("Empty value passed for parameter 'inference_id'")
1955
+ if input is None and body is None:
1956
+ raise ValueError("Empty value passed for parameter 'input'")
1957
+ __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
1958
+ __path = f'/_inference/sparse_embedding/{__path_parts["inference_id"]}'
1959
+ __query: t.Dict[str, t.Any] = {}
1960
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
1961
+ if error_trace is not None:
1962
+ __query["error_trace"] = error_trace
1963
+ if filter_path is not None:
1964
+ __query["filter_path"] = filter_path
1965
+ if human is not None:
1966
+ __query["human"] = human
1967
+ if pretty is not None:
1968
+ __query["pretty"] = pretty
1969
+ if timeout is not None:
1970
+ __query["timeout"] = timeout
1971
+ if not __body:
1972
+ if input is not None:
1973
+ __body["input"] = input
1974
+ if task_settings is not None:
1975
+ __body["task_settings"] = task_settings
1976
+ if not __body:
1977
+ __body = None # type: ignore[assignment]
1978
+ __headers = {"accept": "application/json"}
1979
+ if __body is not None:
1980
+ __headers["content-type"] = "application/json"
1981
+ return await self.perform_request( # type: ignore[return-value]
1982
+ "POST",
1983
+ __path,
1984
+ params=__query,
1985
+ headers=__headers,
1986
+ body=__body,
1987
+ endpoint_id="inference.sparse_embedding",
1988
+ path_parts=__path_parts,
1989
+ )
1990
+
1991
+ @_rewrite_parameters(
1992
+ body_fields=("input", "task_settings"),
1993
+ )
1994
+ async def text_embedding(
1995
+ self,
1996
+ *,
1997
+ inference_id: str,
1998
+ input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
1999
+ error_trace: t.Optional[bool] = None,
2000
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
2001
+ human: t.Optional[bool] = None,
2002
+ pretty: t.Optional[bool] = None,
2003
+ task_settings: t.Optional[t.Any] = None,
2004
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
2005
+ body: t.Optional[t.Dict[str, t.Any]] = None,
2006
+ ) -> ObjectApiResponse[t.Any]:
2007
+ """
2008
+ .. raw:: html
2009
+
2010
+ <p>Perform text embedding inference on the service</p>
2011
+
2012
+
2013
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/post-inference-api.html>`_
2014
+
2015
+ :param inference_id: The inference Id
2016
+ :param input: Inference input. Either a string or an array of strings.
2017
+ :param task_settings: Optional task settings
2018
+ :param timeout: Specifies the amount of time to wait for the inference request
2019
+ to complete.
2020
+ """
2021
+ if inference_id in SKIP_IN_PATH:
2022
+ raise ValueError("Empty value passed for parameter 'inference_id'")
2023
+ if input is None and body is None:
2024
+ raise ValueError("Empty value passed for parameter 'input'")
2025
+ __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
2026
+ __path = f'/_inference/text_embedding/{__path_parts["inference_id"]}'
2027
+ __query: t.Dict[str, t.Any] = {}
2028
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
2029
+ if error_trace is not None:
2030
+ __query["error_trace"] = error_trace
2031
+ if filter_path is not None:
2032
+ __query["filter_path"] = filter_path
2033
+ if human is not None:
2034
+ __query["human"] = human
2035
+ if pretty is not None:
2036
+ __query["pretty"] = pretty
2037
+ if timeout is not None:
2038
+ __query["timeout"] = timeout
2039
+ if not __body:
2040
+ if input is not None:
2041
+ __body["input"] = input
2042
+ if task_settings is not None:
2043
+ __body["task_settings"] = task_settings
2044
+ if not __body:
2045
+ __body = None # type: ignore[assignment]
2046
+ __headers = {"accept": "application/json"}
2047
+ if __body is not None:
2048
+ __headers["content-type"] = "application/json"
2049
+ return await self.perform_request( # type: ignore[return-value]
2050
+ "POST",
2051
+ __path,
2052
+ params=__query,
2053
+ headers=__headers,
2054
+ body=__body,
2055
+ endpoint_id="inference.text_embedding",
2056
+ path_parts=__path_parts,
2057
+ )
2058
+
2059
+ @_rewrite_parameters(
2060
+ body_name="inference_config",
2061
+ )
2062
+ async def update(
2063
+ self,
2064
+ *,
2065
+ inference_id: str,
2066
+ inference_config: t.Optional[t.Mapping[str, t.Any]] = None,
2067
+ body: t.Optional[t.Mapping[str, t.Any]] = None,
2068
+ task_type: t.Optional[
2069
+ t.Union[
2070
+ str,
2071
+ t.Literal[
2072
+ "chat_completion",
2073
+ "completion",
2074
+ "rerank",
2075
+ "sparse_embedding",
2076
+ "text_embedding",
2077
+ ],
2078
+ ]
2079
+ ] = None,
2080
+ error_trace: t.Optional[bool] = None,
2081
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
2082
+ human: t.Optional[bool] = None,
2083
+ pretty: t.Optional[bool] = None,
2084
+ ) -> ObjectApiResponse[t.Any]:
2085
+ """
2086
+ .. raw:: html
2087
+
2088
+ <p>Update an inference endpoint.</p>
2089
+ <p>Modify <code>task_settings</code>, secrets (within <code>service_settings</code>), or <code>num_allocations</code> for an inference endpoint, depending on the specific endpoint service and <code>task_type</code>.</p>
2090
+ <p>IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
2091
+ For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.
2092
+ However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.</p>
2093
+
2094
+
2095
+ `<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/update-inference-api.html>`_
2096
+
2097
+ :param inference_id: The unique identifier of the inference endpoint.
2098
+ :param inference_config:
2099
+ :param task_type: The type of inference task that the model performs.
2100
+ """
2101
+ if inference_id in SKIP_IN_PATH:
2102
+ raise ValueError("Empty value passed for parameter 'inference_id'")
2103
+ if inference_config is None and body is None:
2104
+ raise ValueError(
2105
+ "Empty value passed for parameters 'inference_config' and 'body', one of them should be set."
2106
+ )
2107
+ elif inference_config is not None and body is not None:
2108
+ raise ValueError("Cannot set both 'inference_config' and 'body'")
2109
+ __path_parts: t.Dict[str, str]
2110
+ if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH:
2111
+ __path_parts = {
2112
+ "task_type": _quote(task_type),
2113
+ "inference_id": _quote(inference_id),
2114
+ }
2115
+ __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}/_update'
2116
+ elif inference_id not in SKIP_IN_PATH:
2117
+ __path_parts = {"inference_id": _quote(inference_id)}
2118
+ __path = f'/_inference/{__path_parts["inference_id"]}/_update'
2119
+ else:
2120
+ raise ValueError("Couldn't find a path for the given parameters")
2121
+ __query: t.Dict[str, t.Any] = {}
2122
+ if error_trace is not None:
2123
+ __query["error_trace"] = error_trace
2124
+ if filter_path is not None:
2125
+ __query["filter_path"] = filter_path
2126
+ if human is not None:
2127
+ __query["human"] = human
2128
+ if pretty is not None:
2129
+ __query["pretty"] = pretty
2130
+ __body = inference_config if inference_config is not None else body
2131
+ __headers = {"accept": "application/json", "content-type": "application/json"}
2132
+ return await self.perform_request( # type: ignore[return-value]
2133
+ "PUT",
396
2134
  __path,
397
2135
  params=__query,
398
2136
  headers=__headers,