@aws-sdk/client-bedrock 3.476.0 → 3.478.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,25 +1,4 @@
1
+ import { createPaginator } from "@smithy/core";
1
2
  import { BedrockClient } from "../BedrockClient";
2
3
  import { ListModelCustomizationJobsCommand, } from "../commands/ListModelCustomizationJobsCommand";
3
- const makePagedClientRequest = async (client, input, ...args) => {
4
- return await client.send(new ListModelCustomizationJobsCommand(input), ...args);
5
- };
6
- export async function* paginateListModelCustomizationJobs(config, input, ...additionalArguments) {
7
- let token = config.startingToken || undefined;
8
- let hasNext = true;
9
- let page;
10
- while (hasNext) {
11
- input.nextToken = token;
12
- input["maxResults"] = config.pageSize;
13
- if (config.client instanceof BedrockClient) {
14
- page = await makePagedClientRequest(config.client, input, ...additionalArguments);
15
- }
16
- else {
17
- throw new Error("Invalid client, expected Bedrock | BedrockClient");
18
- }
19
- yield page;
20
- const prevToken = token;
21
- token = page.nextToken;
22
- hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken));
23
- }
24
- return undefined;
25
- }
4
+ export const paginateListModelCustomizationJobs = createPaginator(BedrockClient, ListModelCustomizationJobsCommand, "nextToken", "nextToken", "maxResults");
@@ -1,25 +1,4 @@
1
+ import { createPaginator } from "@smithy/core";
1
2
  import { BedrockClient } from "../BedrockClient";
2
3
  import { ListProvisionedModelThroughputsCommand, } from "../commands/ListProvisionedModelThroughputsCommand";
3
- const makePagedClientRequest = async (client, input, ...args) => {
4
- return await client.send(new ListProvisionedModelThroughputsCommand(input), ...args);
5
- };
6
- export async function* paginateListProvisionedModelThroughputs(config, input, ...additionalArguments) {
7
- let token = config.startingToken || undefined;
8
- let hasNext = true;
9
- let page;
10
- while (hasNext) {
11
- input.nextToken = token;
12
- input["maxResults"] = config.pageSize;
13
- if (config.client instanceof BedrockClient) {
14
- page = await makePagedClientRequest(config.client, input, ...additionalArguments);
15
- }
16
- else {
17
- throw new Error("Invalid client, expected Bedrock | BedrockClient");
18
- }
19
- yield page;
20
- const prevToken = token;
21
- token = page.nextToken;
22
- hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken));
23
- }
24
- return undefined;
25
- }
4
+ export const paginateListProvisionedModelThroughputs = createPaginator(BedrockClient, ListProvisionedModelThroughputsCommand, "nextToken", "nextToken", "maxResults");
@@ -1,14 +1,14 @@
1
- import { HttpRequest as __HttpRequest } from "@smithy/protocol-http";
2
- import { _json, collectBody, decorateServiceException as __decorateServiceException, expectInt32 as __expectInt32, expectNonNull as __expectNonNull, expectObject as __expectObject, expectString as __expectString, limitedParseFloat32 as __limitedParseFloat32, map, parseRfc3339DateTimeWithOffset as __parseRfc3339DateTimeWithOffset, resolvedPath as __resolvedPath, take, withBaseException, } from "@smithy/smithy-client";
1
+ import { requestBuilder as rb } from "@smithy/core";
2
+ import { _json, collectBody, decorateServiceException as __decorateServiceException, expectInt32 as __expectInt32, expectNonNull as __expectNonNull, expectObject as __expectObject, expectString as __expectString, limitedParseFloat32 as __limitedParseFloat32, map, parseRfc3339DateTimeWithOffset as __parseRfc3339DateTimeWithOffset, take, withBaseException, } from "@smithy/smithy-client";
3
3
  import { v4 as generateIdempotencyToken } from "uuid";
4
4
  import { BedrockServiceException as __BaseException } from "../models/BedrockServiceException";
5
5
  import { AccessDeniedException, ConflictException, InternalServerException, ResourceNotFoundException, ServiceQuotaExceededException, ThrottlingException, TooManyTagsException, ValidationException, } from "../models/models_0";
6
6
  export const se_CreateModelCustomizationJobCommand = async (input, context) => {
7
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
7
+ const b = rb(input, context);
8
8
  const headers = {
9
9
  "content-type": "application/json",
10
10
  };
11
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/model-customization-jobs";
11
+ b.bp("/model-customization-jobs");
12
12
  let body;
13
13
  body = JSON.stringify(take(input, {
14
14
  baseModelIdentifier: [],
@@ -26,22 +26,15 @@ export const se_CreateModelCustomizationJobCommand = async (input, context) => {
26
26
  validationDataConfig: (_) => _json(_),
27
27
  vpcConfig: (_) => _json(_),
28
28
  }));
29
- return new __HttpRequest({
30
- protocol,
31
- hostname,
32
- port,
33
- method: "POST",
34
- headers,
35
- path: resolvedPath,
36
- body,
37
- });
29
+ b.m("POST").h(headers).b(body);
30
+ return b.build();
38
31
  };
39
32
  export const se_CreateProvisionedModelThroughputCommand = async (input, context) => {
40
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
33
+ const b = rb(input, context);
41
34
  const headers = {
42
35
  "content-type": "application/json",
43
36
  };
44
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/provisioned-model-throughput";
37
+ b.bp("/provisioned-model-throughput");
45
38
  let body;
46
39
  body = JSON.stringify(take(input, {
47
40
  clientRequestToken: [true, (_) => _ ?? generateIdempotencyToken()],
@@ -51,391 +44,250 @@ export const se_CreateProvisionedModelThroughputCommand = async (input, context)
51
44
  provisionedModelName: [],
52
45
  tags: (_) => _json(_),
53
46
  }));
54
- return new __HttpRequest({
55
- protocol,
56
- hostname,
57
- port,
58
- method: "POST",
59
- headers,
60
- path: resolvedPath,
61
- body,
62
- });
47
+ b.m("POST").h(headers).b(body);
48
+ return b.build();
63
49
  };
64
50
  export const se_DeleteCustomModelCommand = async (input, context) => {
65
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
51
+ const b = rb(input, context);
66
52
  const headers = {};
67
- let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/custom-models/{modelIdentifier}";
68
- resolvedPath = __resolvedPath(resolvedPath, input, "modelIdentifier", () => input.modelIdentifier, "{modelIdentifier}", false);
53
+ b.bp("/custom-models/{modelIdentifier}");
54
+ b.p("modelIdentifier", () => input.modelIdentifier, "{modelIdentifier}", false);
69
55
  let body;
70
- return new __HttpRequest({
71
- protocol,
72
- hostname,
73
- port,
74
- method: "DELETE",
75
- headers,
76
- path: resolvedPath,
77
- body,
78
- });
56
+ b.m("DELETE").h(headers).b(body);
57
+ return b.build();
79
58
  };
80
59
  export const se_DeleteModelInvocationLoggingConfigurationCommand = async (input, context) => {
81
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
60
+ const b = rb(input, context);
82
61
  const headers = {
83
62
  "content-type": "application/json",
84
63
  };
85
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/logging/modelinvocations";
64
+ b.bp("/logging/modelinvocations");
86
65
  let body;
87
66
  body = "";
88
- return new __HttpRequest({
89
- protocol,
90
- hostname,
91
- port,
92
- method: "DELETE",
93
- headers,
94
- path: resolvedPath,
95
- body,
96
- });
67
+ b.m("DELETE").h(headers).b(body);
68
+ return b.build();
97
69
  };
98
70
  export const se_DeleteProvisionedModelThroughputCommand = async (input, context) => {
99
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
71
+ const b = rb(input, context);
100
72
  const headers = {};
101
- let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` +
102
- "/provisioned-model-throughput/{provisionedModelId}";
103
- resolvedPath = __resolvedPath(resolvedPath, input, "provisionedModelId", () => input.provisionedModelId, "{provisionedModelId}", false);
73
+ b.bp("/provisioned-model-throughput/{provisionedModelId}");
74
+ b.p("provisionedModelId", () => input.provisionedModelId, "{provisionedModelId}", false);
104
75
  let body;
105
- return new __HttpRequest({
106
- protocol,
107
- hostname,
108
- port,
109
- method: "DELETE",
110
- headers,
111
- path: resolvedPath,
112
- body,
113
- });
76
+ b.m("DELETE").h(headers).b(body);
77
+ return b.build();
114
78
  };
115
79
  export const se_GetCustomModelCommand = async (input, context) => {
116
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
80
+ const b = rb(input, context);
117
81
  const headers = {};
118
- let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/custom-models/{modelIdentifier}";
119
- resolvedPath = __resolvedPath(resolvedPath, input, "modelIdentifier", () => input.modelIdentifier, "{modelIdentifier}", false);
82
+ b.bp("/custom-models/{modelIdentifier}");
83
+ b.p("modelIdentifier", () => input.modelIdentifier, "{modelIdentifier}", false);
120
84
  let body;
121
- return new __HttpRequest({
122
- protocol,
123
- hostname,
124
- port,
125
- method: "GET",
126
- headers,
127
- path: resolvedPath,
128
- body,
129
- });
85
+ b.m("GET").h(headers).b(body);
86
+ return b.build();
130
87
  };
131
88
  export const se_GetFoundationModelCommand = async (input, context) => {
132
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
89
+ const b = rb(input, context);
133
90
  const headers = {};
134
- let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/foundation-models/{modelIdentifier}";
135
- resolvedPath = __resolvedPath(resolvedPath, input, "modelIdentifier", () => input.modelIdentifier, "{modelIdentifier}", false);
91
+ b.bp("/foundation-models/{modelIdentifier}");
92
+ b.p("modelIdentifier", () => input.modelIdentifier, "{modelIdentifier}", false);
136
93
  let body;
137
- return new __HttpRequest({
138
- protocol,
139
- hostname,
140
- port,
141
- method: "GET",
142
- headers,
143
- path: resolvedPath,
144
- body,
145
- });
94
+ b.m("GET").h(headers).b(body);
95
+ return b.build();
146
96
  };
147
97
  export const se_GetModelCustomizationJobCommand = async (input, context) => {
148
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
98
+ const b = rb(input, context);
149
99
  const headers = {};
150
- let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/model-customization-jobs/{jobIdentifier}";
151
- resolvedPath = __resolvedPath(resolvedPath, input, "jobIdentifier", () => input.jobIdentifier, "{jobIdentifier}", false);
100
+ b.bp("/model-customization-jobs/{jobIdentifier}");
101
+ b.p("jobIdentifier", () => input.jobIdentifier, "{jobIdentifier}", false);
152
102
  let body;
153
- return new __HttpRequest({
154
- protocol,
155
- hostname,
156
- port,
157
- method: "GET",
158
- headers,
159
- path: resolvedPath,
160
- body,
161
- });
103
+ b.m("GET").h(headers).b(body);
104
+ return b.build();
162
105
  };
163
106
  export const se_GetModelInvocationLoggingConfigurationCommand = async (input, context) => {
164
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
107
+ const b = rb(input, context);
165
108
  const headers = {
166
109
  "content-type": "application/json",
167
110
  };
168
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/logging/modelinvocations";
111
+ b.bp("/logging/modelinvocations");
169
112
  let body;
170
113
  body = "";
171
- return new __HttpRequest({
172
- protocol,
173
- hostname,
174
- port,
175
- method: "GET",
176
- headers,
177
- path: resolvedPath,
178
- body,
179
- });
114
+ b.m("GET").h(headers).b(body);
115
+ return b.build();
180
116
  };
181
117
  export const se_GetProvisionedModelThroughputCommand = async (input, context) => {
182
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
118
+ const b = rb(input, context);
183
119
  const headers = {};
184
- let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` +
185
- "/provisioned-model-throughput/{provisionedModelId}";
186
- resolvedPath = __resolvedPath(resolvedPath, input, "provisionedModelId", () => input.provisionedModelId, "{provisionedModelId}", false);
120
+ b.bp("/provisioned-model-throughput/{provisionedModelId}");
121
+ b.p("provisionedModelId", () => input.provisionedModelId, "{provisionedModelId}", false);
187
122
  let body;
188
- return new __HttpRequest({
189
- protocol,
190
- hostname,
191
- port,
192
- method: "GET",
193
- headers,
194
- path: resolvedPath,
195
- body,
196
- });
123
+ b.m("GET").h(headers).b(body);
124
+ return b.build();
197
125
  };
198
126
  export const se_ListCustomModelsCommand = async (input, context) => {
199
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
127
+ const b = rb(input, context);
200
128
  const headers = {};
201
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/custom-models";
129
+ b.bp("/custom-models");
202
130
  const query = map({
203
- creationTimeBefore: [
131
+ [_cTB]: [
204
132
  () => input.creationTimeBefore !== void 0,
205
- () => (input.creationTimeBefore.toISOString().split(".")[0] + "Z").toString(),
133
+ () => (input[_cTB].toISOString().split(".")[0] + "Z").toString(),
206
134
  ],
207
- creationTimeAfter: [
135
+ [_cTA]: [
208
136
  () => input.creationTimeAfter !== void 0,
209
- () => (input.creationTimeAfter.toISOString().split(".")[0] + "Z").toString(),
137
+ () => (input[_cTA].toISOString().split(".")[0] + "Z").toString(),
210
138
  ],
211
- nameContains: [, input.nameContains],
212
- baseModelArnEquals: [, input.baseModelArnEquals],
213
- foundationModelArnEquals: [, input.foundationModelArnEquals],
214
- maxResults: [() => input.maxResults !== void 0, () => input.maxResults.toString()],
215
- nextToken: [, input.nextToken],
216
- sortBy: [, input.sortBy],
217
- sortOrder: [, input.sortOrder],
139
+ [_nC]: [, input[_nC]],
140
+ [_bMAE]: [, input[_bMAE]],
141
+ [_fMAE]: [, input[_fMAE]],
142
+ [_mR]: [() => input.maxResults !== void 0, () => input[_mR].toString()],
143
+ [_nT]: [, input[_nT]],
144
+ [_sB]: [, input[_sB]],
145
+ [_sO]: [, input[_sO]],
218
146
  });
219
147
  let body;
220
- return new __HttpRequest({
221
- protocol,
222
- hostname,
223
- port,
224
- method: "GET",
225
- headers,
226
- path: resolvedPath,
227
- query,
228
- body,
229
- });
148
+ b.m("GET").h(headers).q(query).b(body);
149
+ return b.build();
230
150
  };
231
151
  export const se_ListFoundationModelsCommand = async (input, context) => {
232
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
152
+ const b = rb(input, context);
233
153
  const headers = {};
234
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/foundation-models";
154
+ b.bp("/foundation-models");
235
155
  const query = map({
236
- byProvider: [, input.byProvider],
237
- byCustomizationType: [, input.byCustomizationType],
238
- byOutputModality: [, input.byOutputModality],
239
- byInferenceType: [, input.byInferenceType],
156
+ [_bP]: [, input[_bP]],
157
+ [_bCT]: [, input[_bCT]],
158
+ [_bOM]: [, input[_bOM]],
159
+ [_bIT]: [, input[_bIT]],
240
160
  });
241
161
  let body;
242
- return new __HttpRequest({
243
- protocol,
244
- hostname,
245
- port,
246
- method: "GET",
247
- headers,
248
- path: resolvedPath,
249
- query,
250
- body,
251
- });
162
+ b.m("GET").h(headers).q(query).b(body);
163
+ return b.build();
252
164
  };
253
165
  export const se_ListModelCustomizationJobsCommand = async (input, context) => {
254
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
166
+ const b = rb(input, context);
255
167
  const headers = {};
256
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/model-customization-jobs";
168
+ b.bp("/model-customization-jobs");
257
169
  const query = map({
258
- creationTimeAfter: [
170
+ [_cTA]: [
259
171
  () => input.creationTimeAfter !== void 0,
260
- () => (input.creationTimeAfter.toISOString().split(".")[0] + "Z").toString(),
172
+ () => (input[_cTA].toISOString().split(".")[0] + "Z").toString(),
261
173
  ],
262
- creationTimeBefore: [
174
+ [_cTB]: [
263
175
  () => input.creationTimeBefore !== void 0,
264
- () => (input.creationTimeBefore.toISOString().split(".")[0] + "Z").toString(),
176
+ () => (input[_cTB].toISOString().split(".")[0] + "Z").toString(),
265
177
  ],
266
- statusEquals: [, input.statusEquals],
267
- nameContains: [, input.nameContains],
268
- maxResults: [() => input.maxResults !== void 0, () => input.maxResults.toString()],
269
- nextToken: [, input.nextToken],
270
- sortBy: [, input.sortBy],
271
- sortOrder: [, input.sortOrder],
178
+ [_sE]: [, input[_sE]],
179
+ [_nC]: [, input[_nC]],
180
+ [_mR]: [() => input.maxResults !== void 0, () => input[_mR].toString()],
181
+ [_nT]: [, input[_nT]],
182
+ [_sB]: [, input[_sB]],
183
+ [_sO]: [, input[_sO]],
272
184
  });
273
185
  let body;
274
- return new __HttpRequest({
275
- protocol,
276
- hostname,
277
- port,
278
- method: "GET",
279
- headers,
280
- path: resolvedPath,
281
- query,
282
- body,
283
- });
186
+ b.m("GET").h(headers).q(query).b(body);
187
+ return b.build();
284
188
  };
285
189
  export const se_ListProvisionedModelThroughputsCommand = async (input, context) => {
286
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
190
+ const b = rb(input, context);
287
191
  const headers = {};
288
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/provisioned-model-throughputs";
192
+ b.bp("/provisioned-model-throughputs");
289
193
  const query = map({
290
- creationTimeAfter: [
194
+ [_cTA]: [
291
195
  () => input.creationTimeAfter !== void 0,
292
- () => (input.creationTimeAfter.toISOString().split(".")[0] + "Z").toString(),
196
+ () => (input[_cTA].toISOString().split(".")[0] + "Z").toString(),
293
197
  ],
294
- creationTimeBefore: [
198
+ [_cTB]: [
295
199
  () => input.creationTimeBefore !== void 0,
296
- () => (input.creationTimeBefore.toISOString().split(".")[0] + "Z").toString(),
200
+ () => (input[_cTB].toISOString().split(".")[0] + "Z").toString(),
297
201
  ],
298
- statusEquals: [, input.statusEquals],
299
- modelArnEquals: [, input.modelArnEquals],
300
- nameContains: [, input.nameContains],
301
- maxResults: [() => input.maxResults !== void 0, () => input.maxResults.toString()],
302
- nextToken: [, input.nextToken],
303
- sortBy: [, input.sortBy],
304
- sortOrder: [, input.sortOrder],
202
+ [_sE]: [, input[_sE]],
203
+ [_mAE]: [, input[_mAE]],
204
+ [_nC]: [, input[_nC]],
205
+ [_mR]: [() => input.maxResults !== void 0, () => input[_mR].toString()],
206
+ [_nT]: [, input[_nT]],
207
+ [_sB]: [, input[_sB]],
208
+ [_sO]: [, input[_sO]],
305
209
  });
306
210
  let body;
307
- return new __HttpRequest({
308
- protocol,
309
- hostname,
310
- port,
311
- method: "GET",
312
- headers,
313
- path: resolvedPath,
314
- query,
315
- body,
316
- });
211
+ b.m("GET").h(headers).q(query).b(body);
212
+ return b.build();
317
213
  };
318
214
  export const se_ListTagsForResourceCommand = async (input, context) => {
319
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
215
+ const b = rb(input, context);
320
216
  const headers = {
321
217
  "content-type": "application/json",
322
218
  };
323
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/listTagsForResource";
219
+ b.bp("/listTagsForResource");
324
220
  let body;
325
221
  body = JSON.stringify(take(input, {
326
222
  resourceARN: [],
327
223
  }));
328
- return new __HttpRequest({
329
- protocol,
330
- hostname,
331
- port,
332
- method: "POST",
333
- headers,
334
- path: resolvedPath,
335
- body,
336
- });
224
+ b.m("POST").h(headers).b(body);
225
+ return b.build();
337
226
  };
338
227
  export const se_PutModelInvocationLoggingConfigurationCommand = async (input, context) => {
339
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
228
+ const b = rb(input, context);
340
229
  const headers = {
341
230
  "content-type": "application/json",
342
231
  };
343
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/logging/modelinvocations";
232
+ b.bp("/logging/modelinvocations");
344
233
  let body;
345
234
  body = JSON.stringify(take(input, {
346
235
  loggingConfig: (_) => _json(_),
347
236
  }));
348
- return new __HttpRequest({
349
- protocol,
350
- hostname,
351
- port,
352
- method: "PUT",
353
- headers,
354
- path: resolvedPath,
355
- body,
356
- });
237
+ b.m("PUT").h(headers).b(body);
238
+ return b.build();
357
239
  };
358
240
  export const se_StopModelCustomizationJobCommand = async (input, context) => {
359
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
241
+ const b = rb(input, context);
360
242
  const headers = {};
361
- let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` +
362
- "/model-customization-jobs/{jobIdentifier}/stop";
363
- resolvedPath = __resolvedPath(resolvedPath, input, "jobIdentifier", () => input.jobIdentifier, "{jobIdentifier}", false);
243
+ b.bp("/model-customization-jobs/{jobIdentifier}/stop");
244
+ b.p("jobIdentifier", () => input.jobIdentifier, "{jobIdentifier}", false);
364
245
  let body;
365
- return new __HttpRequest({
366
- protocol,
367
- hostname,
368
- port,
369
- method: "POST",
370
- headers,
371
- path: resolvedPath,
372
- body,
373
- });
246
+ b.m("POST").h(headers).b(body);
247
+ return b.build();
374
248
  };
375
249
  export const se_TagResourceCommand = async (input, context) => {
376
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
250
+ const b = rb(input, context);
377
251
  const headers = {
378
252
  "content-type": "application/json",
379
253
  };
380
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tagResource";
254
+ b.bp("/tagResource");
381
255
  let body;
382
256
  body = JSON.stringify(take(input, {
383
257
  resourceARN: [],
384
258
  tags: (_) => _json(_),
385
259
  }));
386
- return new __HttpRequest({
387
- protocol,
388
- hostname,
389
- port,
390
- method: "POST",
391
- headers,
392
- path: resolvedPath,
393
- body,
394
- });
260
+ b.m("POST").h(headers).b(body);
261
+ return b.build();
395
262
  };
396
263
  export const se_UntagResourceCommand = async (input, context) => {
397
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
264
+ const b = rb(input, context);
398
265
  const headers = {
399
266
  "content-type": "application/json",
400
267
  };
401
- const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/untagResource";
268
+ b.bp("/untagResource");
402
269
  let body;
403
270
  body = JSON.stringify(take(input, {
404
271
  resourceARN: [],
405
272
  tagKeys: (_) => _json(_),
406
273
  }));
407
- return new __HttpRequest({
408
- protocol,
409
- hostname,
410
- port,
411
- method: "POST",
412
- headers,
413
- path: resolvedPath,
414
- body,
415
- });
274
+ b.m("POST").h(headers).b(body);
275
+ return b.build();
416
276
  };
417
277
  export const se_UpdateProvisionedModelThroughputCommand = async (input, context) => {
418
- const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
278
+ const b = rb(input, context);
419
279
  const headers = {
420
280
  "content-type": "application/json",
421
281
  };
422
- let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` +
423
- "/provisioned-model-throughput/{provisionedModelId}";
424
- resolvedPath = __resolvedPath(resolvedPath, input, "provisionedModelId", () => input.provisionedModelId, "{provisionedModelId}", false);
282
+ b.bp("/provisioned-model-throughput/{provisionedModelId}");
283
+ b.p("provisionedModelId", () => input.provisionedModelId, "{provisionedModelId}", false);
425
284
  let body;
426
285
  body = JSON.stringify(take(input, {
427
286
  desiredModelId: [],
428
287
  desiredProvisionedModelName: [],
429
288
  }));
430
- return new __HttpRequest({
431
- protocol,
432
- hostname,
433
- port,
434
- method: "PATCH",
435
- headers,
436
- path: resolvedPath,
437
- body,
438
- });
289
+ b.m("PATCH").h(headers).b(body);
290
+ return b.build();
439
291
  };
440
292
  export const de_CreateModelCustomizationJobCommand = async (output, context) => {
441
293
  if (output.statusCode !== 201 && output.statusCode >= 300) {
@@ -1553,6 +1405,21 @@ const isSerializableHeaderValue = (value) => value !== undefined &&
1553
1405
  value !== "" &&
1554
1406
  (!Object.getOwnPropertyNames(value).includes("length") || value.length != 0) &&
1555
1407
  (!Object.getOwnPropertyNames(value).includes("size") || value.size != 0);
1408
+ const _bCT = "byCustomizationType";
1409
+ const _bIT = "byInferenceType";
1410
+ const _bMAE = "baseModelArnEquals";
1411
+ const _bOM = "byOutputModality";
1412
+ const _bP = "byProvider";
1413
+ const _cTA = "creationTimeAfter";
1414
+ const _cTB = "creationTimeBefore";
1415
+ const _fMAE = "foundationModelArnEquals";
1416
+ const _mAE = "modelArnEquals";
1417
+ const _mR = "maxResults";
1418
+ const _nC = "nameContains";
1419
+ const _nT = "nextToken";
1420
+ const _sB = "sortBy";
1421
+ const _sE = "statusEquals";
1422
+ const _sO = "sortOrder";
1556
1423
  const parseBody = (streamBody, context) => collectBodyString(streamBody, context).then((encoded) => {
1557
1424
  if (encoded.length) {
1558
1425
  return JSON.parse(encoded);
@@ -4,4 +4,4 @@ import { BedrockPaginationConfiguration } from "./Interfaces";
4
4
  /**
5
5
  * @public
6
6
  */
7
- export declare function paginateListCustomModels(config: BedrockPaginationConfiguration, input: ListCustomModelsCommandInput, ...additionalArguments: any): Paginator<ListCustomModelsCommandOutput>;
7
+ export declare const paginateListCustomModels: (config: BedrockPaginationConfiguration, input: ListCustomModelsCommandInput, ...rest: any[]) => Paginator<ListCustomModelsCommandOutput>;
@@ -4,4 +4,4 @@ import { BedrockPaginationConfiguration } from "./Interfaces";
4
4
  /**
5
5
  * @public
6
6
  */
7
- export declare function paginateListModelCustomizationJobs(config: BedrockPaginationConfiguration, input: ListModelCustomizationJobsCommandInput, ...additionalArguments: any): Paginator<ListModelCustomizationJobsCommandOutput>;
7
+ export declare const paginateListModelCustomizationJobs: (config: BedrockPaginationConfiguration, input: ListModelCustomizationJobsCommandInput, ...rest: any[]) => Paginator<ListModelCustomizationJobsCommandOutput>;