@aws-sdk/client-appflow 3.276.0 → 3.278.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -18,8 +18,7 @@ Slack, and ServiceNow, and Amazon Web Services like Amazon S3 and Amazon Redshif
18
18
  <ul>
19
19
  <li>
20
20
  <p>
21
- <a href="https://docs.aws.amazon.com/appflow/1.0/APIReference/API_Operations.html">Actions</a>: An alphabetical list of all Amazon AppFlow API
22
- operations.</p>
21
+ <a href="https://docs.aws.amazon.com/appflow/1.0/APIReference/API_Operations.html">Actions</a>: An alphabetical list of all Amazon AppFlow API operations.</p>
23
22
  </li>
24
23
  <li>
25
24
  <p>
@@ -36,7 +35,8 @@ types</a>: An alphabetical list of all Amazon AppFlow data types.</p>
36
35
  errors</a>: Client and server errors that all operations can return.</p>
37
36
  </li>
38
37
  </ul>
39
- <p>If you're new to Amazon AppFlow, we recommend that you review the <a href="https://docs.aws.amazon.com/appflow/latest/userguide/what-is-appflow.html">Amazon AppFlow User Guide</a>.</p>
38
+ <p>If you're new to Amazon AppFlow, we recommend that you review the <a href="https://docs.aws.amazon.com/appflow/latest/userguide/what-is-appflow.html">Amazon AppFlow
39
+ User Guide</a>.</p>
40
40
  <p>Amazon AppFlow API users can use vendor-specific mechanisms for OAuth, and include
41
41
  applicable OAuth attributes (such as <code>auth-code</code> and <code>redirecturi</code>) with
42
42
  the connector-specific <code>ConnectorProfileProperties</code> when creating a new connector
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.ruleSet = void 0;
4
- const q = "fn", r = "argv", s = "ref";
5
- const a = true, b = false, c = "String", d = "PartitionResult", e = "tree", f = "error", g = "endpoint", h = { "required": true, "default": false, "type": "Boolean" }, i = { [s]: "Endpoint" }, j = { [q]: "booleanEquals", [r]: [{ [s]: "UseFIPS" }, true] }, k = { [q]: "booleanEquals", [r]: [{ [s]: "UseDualStack" }, true] }, l = {}, m = { [q]: "booleanEquals", [r]: [true, { [q]: "getAttr", [r]: [{ [s]: d }, "supportsFIPS"] }] }, n = { [q]: "booleanEquals", [r]: [true, { [q]: "getAttr", [r]: [{ [s]: d }, "supportsDualStack"] }] }, o = [j], p = [k];
6
- const _data = { version: "1.0", parameters: { Region: { required: a, type: c }, UseDualStack: h, UseFIPS: h, Endpoint: { required: b, type: c } }, rules: [{ conditions: [{ [q]: "aws.partition", [r]: [{ [s]: "Region" }], assign: d }], type: e, rules: [{ conditions: [{ [q]: "isSet", [r]: [i] }], type: e, rules: [{ conditions: o, error: "Invalid Configuration: FIPS and custom endpoint are not supported", type: f }, { type: e, rules: [{ conditions: p, error: "Invalid Configuration: Dualstack and custom endpoint are not supported", type: f }, { endpoint: { url: i, properties: l, headers: l }, type: g }] }] }, { conditions: [j, k], type: e, rules: [{ conditions: [m, n], type: e, rules: [{ endpoint: { url: "https://appflow-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", properties: l, headers: l }, type: g }] }, { error: "FIPS and DualStack are enabled, but this partition does not support one or both", type: f }] }, { conditions: o, type: e, rules: [{ conditions: [m], type: e, rules: [{ type: e, rules: [{ endpoint: { url: "https://appflow-fips.{Region}.{PartitionResult#dnsSuffix}", properties: l, headers: l }, type: g }] }] }, { error: "FIPS is enabled but this partition does not support FIPS", type: f }] }, { conditions: p, type: e, rules: [{ conditions: [n], type: e, rules: [{ endpoint: { url: "https://appflow.{Region}.{PartitionResult#dualStackDnsSuffix}", properties: l, headers: l }, type: g }] }, { error: "DualStack is enabled but this partition does not support DualStack", type: f }] }, { endpoint: { url: "https://appflow.{Region}.{PartitionResult#dnsSuffix}", properties: l, headers: l }, type: g }] }] };
4
+ const q = "required", r = "fn", s = "argv", t = "ref";
5
+ const a = "isSet", b = "tree", c = "error", d = "endpoint", e = "PartitionResult", f = { [q]: false, "type": "String" }, g = { [q]: true, "default": false, "type": "Boolean" }, h = { [t]: "Endpoint" }, i = { [r]: "booleanEquals", [s]: [{ [t]: "UseFIPS" }, true] }, j = { [r]: "booleanEquals", [s]: [{ [t]: "UseDualStack" }, true] }, k = {}, l = { [r]: "booleanEquals", [s]: [true, { [r]: "getAttr", [s]: [{ [t]: e }, "supportsFIPS"] }] }, m = { [r]: "booleanEquals", [s]: [true, { [r]: "getAttr", [s]: [{ [t]: e }, "supportsDualStack"] }] }, n = [i], o = [j], p = [{ [t]: "Region" }];
6
+ const _data = { version: "1.0", parameters: { Region: f, UseDualStack: g, UseFIPS: g, Endpoint: f }, rules: [{ conditions: [{ [r]: a, [s]: [h] }], type: b, rules: [{ conditions: n, error: "Invalid Configuration: FIPS and custom endpoint are not supported", type: c }, { type: b, rules: [{ conditions: o, error: "Invalid Configuration: Dualstack and custom endpoint are not supported", type: c }, { endpoint: { url: h, properties: k, headers: k }, type: d }] }] }, { type: b, rules: [{ conditions: [{ [r]: a, [s]: p }], type: b, rules: [{ conditions: [{ [r]: "aws.partition", [s]: p, assign: e }], type: b, rules: [{ conditions: [i, j], type: b, rules: [{ conditions: [l, m], type: b, rules: [{ type: b, rules: [{ endpoint: { url: "https://appflow-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", properties: k, headers: k }, type: d }] }] }, { error: "FIPS and DualStack are enabled, but this partition does not support one or both", type: c }] }, { conditions: n, type: b, rules: [{ conditions: [l], type: b, rules: [{ type: b, rules: [{ endpoint: { url: "https://appflow-fips.{Region}.{PartitionResult#dnsSuffix}", properties: k, headers: k }, type: d }] }] }, { error: "FIPS is enabled but this partition does not support FIPS", type: c }] }, { conditions: o, type: b, rules: [{ conditions: [m], type: b, rules: [{ type: b, rules: [{ endpoint: { url: "https://appflow.{Region}.{PartitionResult#dualStackDnsSuffix}", properties: k, headers: k }, type: d }] }] }, { error: "DualStack is enabled but this partition does not support DualStack", type: c }] }, { type: b, rules: [{ endpoint: { url: "https://appflow.{Region}.{PartitionResult#dnsSuffix}", properties: k, headers: k }, type: d }] }] }] }, { error: "Invalid Configuration: Missing Region", type: c }] }] };
7
7
  exports.ruleSet = _data;
@@ -2425,6 +2425,9 @@ const serializeAws_restJson1SalesforceConnectorProfileProperties = (input, conte
2425
2425
  return {
2426
2426
  ...(input.instanceUrl != null && { instanceUrl: input.instanceUrl }),
2427
2427
  ...(input.isSandboxEnvironment != null && { isSandboxEnvironment: input.isSandboxEnvironment }),
2428
+ ...(input.usePrivateLinkForMetadataAndAuthorization != null && {
2429
+ usePrivateLinkForMetadataAndAuthorization: input.usePrivateLinkForMetadataAndAuthorization,
2430
+ }),
2428
2431
  };
2429
2432
  };
2430
2433
  const serializeAws_restJson1SalesforceDestinationProperties = (input, context) => {
@@ -3874,6 +3877,7 @@ const deserializeAws_restJson1SalesforceConnectorProfileProperties = (output, co
3874
3877
  return {
3875
3878
  instanceUrl: (0, smithy_client_1.expectString)(output.instanceUrl),
3876
3879
  isSandboxEnvironment: (0, smithy_client_1.expectBoolean)(output.isSandboxEnvironment),
3880
+ usePrivateLinkForMetadataAndAuthorization: (0, smithy_client_1.expectBoolean)(output.usePrivateLinkForMetadataAndAuthorization),
3877
3881
  };
3878
3882
  };
3879
3883
  const deserializeAws_restJson1SalesforceDataTransferApiList = (output, context) => {
@@ -1,4 +1,4 @@
1
- const q = "fn", r = "argv", s = "ref";
2
- const a = true, b = false, c = "String", d = "PartitionResult", e = "tree", f = "error", g = "endpoint", h = { "required": true, "default": false, "type": "Boolean" }, i = { [s]: "Endpoint" }, j = { [q]: "booleanEquals", [r]: [{ [s]: "UseFIPS" }, true] }, k = { [q]: "booleanEquals", [r]: [{ [s]: "UseDualStack" }, true] }, l = {}, m = { [q]: "booleanEquals", [r]: [true, { [q]: "getAttr", [r]: [{ [s]: d }, "supportsFIPS"] }] }, n = { [q]: "booleanEquals", [r]: [true, { [q]: "getAttr", [r]: [{ [s]: d }, "supportsDualStack"] }] }, o = [j], p = [k];
3
- const _data = { version: "1.0", parameters: { Region: { required: a, type: c }, UseDualStack: h, UseFIPS: h, Endpoint: { required: b, type: c } }, rules: [{ conditions: [{ [q]: "aws.partition", [r]: [{ [s]: "Region" }], assign: d }], type: e, rules: [{ conditions: [{ [q]: "isSet", [r]: [i] }], type: e, rules: [{ conditions: o, error: "Invalid Configuration: FIPS and custom endpoint are not supported", type: f }, { type: e, rules: [{ conditions: p, error: "Invalid Configuration: Dualstack and custom endpoint are not supported", type: f }, { endpoint: { url: i, properties: l, headers: l }, type: g }] }] }, { conditions: [j, k], type: e, rules: [{ conditions: [m, n], type: e, rules: [{ endpoint: { url: "https://appflow-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", properties: l, headers: l }, type: g }] }, { error: "FIPS and DualStack are enabled, but this partition does not support one or both", type: f }] }, { conditions: o, type: e, rules: [{ conditions: [m], type: e, rules: [{ type: e, rules: [{ endpoint: { url: "https://appflow-fips.{Region}.{PartitionResult#dnsSuffix}", properties: l, headers: l }, type: g }] }] }, { error: "FIPS is enabled but this partition does not support FIPS", type: f }] }, { conditions: p, type: e, rules: [{ conditions: [n], type: e, rules: [{ endpoint: { url: "https://appflow.{Region}.{PartitionResult#dualStackDnsSuffix}", properties: l, headers: l }, type: g }] }, { error: "DualStack is enabled but this partition does not support DualStack", type: f }] }, { endpoint: { url: "https://appflow.{Region}.{PartitionResult#dnsSuffix}", properties: l, headers: l }, type: g }] }] };
1
+ const q = "required", r = "fn", s = "argv", t = "ref";
2
+ const a = "isSet", b = "tree", c = "error", d = "endpoint", e = "PartitionResult", f = { [q]: false, "type": "String" }, g = { [q]: true, "default": false, "type": "Boolean" }, h = { [t]: "Endpoint" }, i = { [r]: "booleanEquals", [s]: [{ [t]: "UseFIPS" }, true] }, j = { [r]: "booleanEquals", [s]: [{ [t]: "UseDualStack" }, true] }, k = {}, l = { [r]: "booleanEquals", [s]: [true, { [r]: "getAttr", [s]: [{ [t]: e }, "supportsFIPS"] }] }, m = { [r]: "booleanEquals", [s]: [true, { [r]: "getAttr", [s]: [{ [t]: e }, "supportsDualStack"] }] }, n = [i], o = [j], p = [{ [t]: "Region" }];
3
+ const _data = { version: "1.0", parameters: { Region: f, UseDualStack: g, UseFIPS: g, Endpoint: f }, rules: [{ conditions: [{ [r]: a, [s]: [h] }], type: b, rules: [{ conditions: n, error: "Invalid Configuration: FIPS and custom endpoint are not supported", type: c }, { type: b, rules: [{ conditions: o, error: "Invalid Configuration: Dualstack and custom endpoint are not supported", type: c }, { endpoint: { url: h, properties: k, headers: k }, type: d }] }] }, { type: b, rules: [{ conditions: [{ [r]: a, [s]: p }], type: b, rules: [{ conditions: [{ [r]: "aws.partition", [s]: p, assign: e }], type: b, rules: [{ conditions: [i, j], type: b, rules: [{ conditions: [l, m], type: b, rules: [{ type: b, rules: [{ endpoint: { url: "https://appflow-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", properties: k, headers: k }, type: d }] }] }, { error: "FIPS and DualStack are enabled, but this partition does not support one or both", type: c }] }, { conditions: n, type: b, rules: [{ conditions: [l], type: b, rules: [{ type: b, rules: [{ endpoint: { url: "https://appflow-fips.{Region}.{PartitionResult#dnsSuffix}", properties: k, headers: k }, type: d }] }] }, { error: "FIPS is enabled but this partition does not support FIPS", type: c }] }, { conditions: o, type: b, rules: [{ conditions: [m], type: b, rules: [{ type: b, rules: [{ endpoint: { url: "https://appflow.{Region}.{PartitionResult#dualStackDnsSuffix}", properties: k, headers: k }, type: d }] }] }, { error: "DualStack is enabled but this partition does not support DualStack", type: c }] }, { type: b, rules: [{ endpoint: { url: "https://appflow.{Region}.{PartitionResult#dnsSuffix}", properties: k, headers: k }, type: d }] }] }] }, { error: "Invalid Configuration: Missing Region", type: c }] }] };
4
4
  export const ruleSet = _data;
@@ -2376,6 +2376,9 @@ const serializeAws_restJson1SalesforceConnectorProfileProperties = (input, conte
2376
2376
  return {
2377
2377
  ...(input.instanceUrl != null && { instanceUrl: input.instanceUrl }),
2378
2378
  ...(input.isSandboxEnvironment != null && { isSandboxEnvironment: input.isSandboxEnvironment }),
2379
+ ...(input.usePrivateLinkForMetadataAndAuthorization != null && {
2380
+ usePrivateLinkForMetadataAndAuthorization: input.usePrivateLinkForMetadataAndAuthorization,
2381
+ }),
2379
2382
  };
2380
2383
  };
2381
2384
  const serializeAws_restJson1SalesforceDestinationProperties = (input, context) => {
@@ -3825,6 +3828,7 @@ const deserializeAws_restJson1SalesforceConnectorProfileProperties = (output, co
3825
3828
  return {
3826
3829
  instanceUrl: __expectString(output.instanceUrl),
3827
3830
  isSandboxEnvironment: __expectBoolean(output.isSandboxEnvironment),
3831
+ usePrivateLinkForMetadataAndAuthorization: __expectBoolean(output.usePrivateLinkForMetadataAndAuthorization),
3828
3832
  };
3829
3833
  };
3830
3834
  const deserializeAws_restJson1SalesforceDataTransferApiList = (output, context) => {
@@ -33,8 +33,7 @@ import { UpdateFlowCommandInput, UpdateFlowCommandOutput } from "./commands/Upda
33
33
  * <ul>
34
34
  * <li>
35
35
  * <p>
36
- * <a href="https://docs.aws.amazon.com/appflow/1.0/APIReference/API_Operations.html">Actions</a>: An alphabetical list of all Amazon AppFlow API
37
- * operations.</p>
36
+ * <a href="https://docs.aws.amazon.com/appflow/1.0/APIReference/API_Operations.html">Actions</a>: An alphabetical list of all Amazon AppFlow API operations.</p>
38
37
  * </li>
39
38
  * <li>
40
39
  * <p>
@@ -51,7 +50,8 @@ import { UpdateFlowCommandInput, UpdateFlowCommandOutput } from "./commands/Upda
51
50
  * errors</a>: Client and server errors that all operations can return.</p>
52
51
  * </li>
53
52
  * </ul>
54
- * <p>If you're new to Amazon AppFlow, we recommend that you review the <a href="https://docs.aws.amazon.com/appflow/latest/userguide/what-is-appflow.html">Amazon AppFlow User Guide</a>.</p>
53
+ * <p>If you're new to Amazon AppFlow, we recommend that you review the <a href="https://docs.aws.amazon.com/appflow/latest/userguide/what-is-appflow.html">Amazon AppFlow
54
+ * User Guide</a>.</p>
55
55
  * <p>Amazon AppFlow API users can use vendor-specific mechanisms for OAuth, and include
56
56
  * applicable OAuth attributes (such as <code>auth-code</code> and <code>redirecturi</code>) with
57
57
  * the connector-specific <code>ConnectorProfileProperties</code> when creating a new connector
@@ -62,9 +62,10 @@ import { UpdateFlowCommandInput, UpdateFlowCommandOutput } from "./commands/Upda
62
62
  */
63
63
  export declare class Appflow extends AppflowClient {
64
64
  /**
65
- * <p> Creates a new connector profile associated with your Amazon Web Services account. There
66
- * is a soft quota of 100 connector profiles per Amazon Web Services account. If you need more
67
- * connector profiles than this quota allows, you can submit a request to the Amazon AppFlow team through the Amazon AppFlow support channel. In each connector profile that you
65
+ * <p> Creates a new connector profile associated with your Amazon Web Services account. There is
66
+ * a soft quota of 100 connector profiles per Amazon Web Services account. If you need more
67
+ * connector profiles than this quota allows, you can submit a request to the Amazon AppFlow
68
+ * team through the Amazon AppFlow support channel. In each connector profile that you
68
69
  * create, you can provide the credentials and properties for only one connector.</p>
69
70
  */
70
71
  createConnectorProfile(args: CreateConnectorProfileCommandInput, options?: __HttpHandlerOptions): Promise<CreateConnectorProfileCommandOutput>;
@@ -155,8 +155,7 @@ export interface AppflowClientResolvedConfig extends AppflowClientResolvedConfig
155
155
  * <ul>
156
156
  * <li>
157
157
  * <p>
158
- * <a href="https://docs.aws.amazon.com/appflow/1.0/APIReference/API_Operations.html">Actions</a>: An alphabetical list of all Amazon AppFlow API
159
- * operations.</p>
158
+ * <a href="https://docs.aws.amazon.com/appflow/1.0/APIReference/API_Operations.html">Actions</a>: An alphabetical list of all Amazon AppFlow API operations.</p>
160
159
  * </li>
161
160
  * <li>
162
161
  * <p>
@@ -173,7 +172,8 @@ export interface AppflowClientResolvedConfig extends AppflowClientResolvedConfig
173
172
  * errors</a>: Client and server errors that all operations can return.</p>
174
173
  * </li>
175
174
  * </ul>
176
- * <p>If you're new to Amazon AppFlow, we recommend that you review the <a href="https://docs.aws.amazon.com/appflow/latest/userguide/what-is-appflow.html">Amazon AppFlow User Guide</a>.</p>
175
+ * <p>If you're new to Amazon AppFlow, we recommend that you review the <a href="https://docs.aws.amazon.com/appflow/latest/userguide/what-is-appflow.html">Amazon AppFlow
176
+ * User Guide</a>.</p>
177
177
  * <p>Amazon AppFlow API users can use vendor-specific mechanisms for OAuth, and include
178
178
  * applicable OAuth attributes (such as <code>auth-code</code> and <code>redirecturi</code>) with
179
179
  * the connector-specific <code>ConnectorProfileProperties</code> when creating a new connector
@@ -8,9 +8,10 @@ export interface CreateConnectorProfileCommandInput extends CreateConnectorProfi
8
8
  export interface CreateConnectorProfileCommandOutput extends CreateConnectorProfileResponse, __MetadataBearer {
9
9
  }
10
10
  /**
11
- * <p> Creates a new connector profile associated with your Amazon Web Services account. There
12
- * is a soft quota of 100 connector profiles per Amazon Web Services account. If you need more
13
- * connector profiles than this quota allows, you can submit a request to the Amazon AppFlow team through the Amazon AppFlow support channel. In each connector profile that you
11
+ * <p> Creates a new connector profile associated with your Amazon Web Services account. There is
12
+ * a soft quota of 100 connector profiles per Amazon Web Services account. If you need more
13
+ * connector profiles than this quota allows, you can submit a request to the Amazon AppFlow
14
+ * team through the Amazon AppFlow support channel. In each connector profile that you
14
15
  * create, you can provide the credentials and properties for only one connector.</p>
15
16
  * @example
16
17
  * Use a bare-bones client and the command you need to make an API call.
@@ -12,7 +12,7 @@ export declare const resolveClientEndpointParameters: <T>(options: T & ClientInp
12
12
  defaultSigningName: string;
13
13
  };
14
14
  export interface EndpointParameters extends __EndpointParameters {
15
- Region: string;
15
+ Region?: string;
16
16
  UseDualStack?: boolean;
17
17
  UseFIPS?: boolean;
18
18
  Endpoint?: string;
@@ -1448,7 +1448,8 @@ export interface PardotConnectorProfileProperties {
1448
1448
  */
1449
1449
  instanceUrl?: string;
1450
1450
  /**
1451
- * <p>Indicates whether the connector profile applies to a sandbox or production environment.</p>
1451
+ * <p>Indicates whether the connector profile applies to a sandbox or production
1452
+ * environment.</p>
1452
1453
  */
1453
1454
  isSandboxEnvironment?: boolean;
1454
1455
  /**
@@ -1516,6 +1517,55 @@ export interface SalesforceConnectorProfileProperties {
1516
1517
  * </p>
1517
1518
  */
1518
1519
  isSandboxEnvironment?: boolean;
1520
+ /**
1521
+ * <p>If the connection mode for the connector profile is private, this parameter sets whether
1522
+ * Amazon AppFlow uses the private network to send metadata and authorization calls to
1523
+ * Salesforce. Amazon AppFlow sends private calls through Amazon Web Services PrivateLink. These
1524
+ * calls travel through Amazon Web Services infrastructure without being exposed to the public
1525
+ * internet.</p>
1526
+ * <p>Set either of the following values:</p>
1527
+ * <dl>
1528
+ * <dt>true</dt>
1529
+ * <dd>
1530
+ * <p>Amazon AppFlow sends all calls to Salesforce over the private network.</p>
1531
+ * <p>These private calls are:</p>
1532
+ * <ul>
1533
+ * <li>
1534
+ * <p>Calls to get metadata about your Salesforce records. This metadata describes
1535
+ * your Salesforce objects and their fields.</p>
1536
+ * </li>
1537
+ * <li>
1538
+ * <p>Calls to get or refresh access tokens that allow Amazon AppFlow to access
1539
+ * your Salesforce records.</p>
1540
+ * </li>
1541
+ * <li>
1542
+ * <p>Calls to transfer your Salesforce records as part of a flow run.</p>
1543
+ * </li>
1544
+ * </ul>
1545
+ * </dd>
1546
+ * <dt>false</dt>
1547
+ * <dd>
1548
+ * <p>The default value. Amazon AppFlow sends some calls to Salesforce privately and
1549
+ * other calls over the public internet.</p>
1550
+ * <p>The public calls are: </p>
1551
+ * <ul>
1552
+ * <li>
1553
+ * <p>Calls to get metadata about your Salesforce records.</p>
1554
+ * </li>
1555
+ * <li>
1556
+ * <p>Calls to get or refresh access tokens.</p>
1557
+ * </li>
1558
+ * </ul>
1559
+ * <p>The private calls are:</p>
1560
+ * <ul>
1561
+ * <li>
1562
+ * <p>Calls to transfer your Salesforce records as part of a flow run.</p>
1563
+ * </li>
1564
+ * </ul>
1565
+ * </dd>
1566
+ * </dl>
1567
+ */
1568
+ usePrivateLinkForMetadataAndAuthorization?: boolean;
1519
1569
  }
1520
1570
  /**
1521
1571
  * <p> The OAuth properties required for OAuth type authentication. </p>
@@ -2017,8 +2067,7 @@ export interface PardotConnectorProfileCredentials {
2017
2067
  clientCredentialsArn?: string;
2018
2068
  }
2019
2069
  /**
2020
- * <p> The connector-specific profile credentials required when using Amazon Redshift.
2021
- * </p>
2070
+ * <p> The connector-specific profile credentials required when using Amazon Redshift. </p>
2022
2071
  */
2023
2072
  export interface RedshiftConnectorProfileCredentials {
2024
2073
  /**
@@ -2379,9 +2428,9 @@ export declare class ValidationException extends __BaseException {
2379
2428
  constructor(opts: __ExceptionOptionType<ValidationException, __BaseException>);
2380
2429
  }
2381
2430
  /**
2382
- * <p> The settings that determine how Amazon AppFlow handles an error when placing data
2383
- * in the destination. For example, this setting would determine if the flow should fail after
2384
- * one insertion error, or continue and attempt to insert every record regardless of the initial
2431
+ * <p> The settings that determine how Amazon AppFlow handles an error when placing data in
2432
+ * the destination. For example, this setting would determine if the flow should fail after one
2433
+ * insertion error, or continue and attempt to insert every record regardless of the initial
2385
2434
  * failure. <code>ErrorHandlingConfig</code> is a part of the destination connector details.
2386
2435
  * </p>
2387
2436
  */
@@ -2455,9 +2504,9 @@ export interface EventBridgeDestinationProperties {
2455
2504
  */
2456
2505
  object: string | undefined;
2457
2506
  /**
2458
- * <p> The settings that determine how Amazon AppFlow handles an error when placing data
2459
- * in the destination. For example, this setting would determine if the flow should fail after
2460
- * one insertion error, or continue and attempt to insert every record regardless of the initial
2507
+ * <p> The settings that determine how Amazon AppFlow handles an error when placing data in
2508
+ * the destination. For example, this setting would determine if the flow should fail after one
2509
+ * insertion error, or continue and attempt to insert every record regardless of the initial
2461
2510
  * failure. <code>ErrorHandlingConfig</code> is a part of the destination connector details.
2462
2511
  * </p>
2463
2512
  */
@@ -2472,9 +2521,9 @@ export interface HoneycodeDestinationProperties {
2472
2521
  */
2473
2522
  object: string | undefined;
2474
2523
  /**
2475
- * <p> The settings that determine how Amazon AppFlow handles an error when placing data
2476
- * in the destination. For example, this setting would determine if the flow should fail after
2477
- * one insertion error, or continue and attempt to insert every record regardless of the initial
2524
+ * <p> The settings that determine how Amazon AppFlow handles an error when placing data in
2525
+ * the destination. For example, this setting would determine if the flow should fail after one
2526
+ * insertion error, or continue and attempt to insert every record regardless of the initial
2478
2527
  * failure. <code>ErrorHandlingConfig</code> is a part of the destination connector details.
2479
2528
  * </p>
2480
2529
  */
@@ -2496,9 +2545,9 @@ export interface MarketoDestinationProperties {
2496
2545
  */
2497
2546
  object: string | undefined;
2498
2547
  /**
2499
- * <p> The settings that determine how Amazon AppFlow handles an error when placing data
2500
- * in the destination. For example, this setting would determine if the flow should fail after
2501
- * one insertion error, or continue and attempt to insert every record regardless of the initial
2548
+ * <p> The settings that determine how Amazon AppFlow handles an error when placing data in
2549
+ * the destination. For example, this setting would determine if the flow should fail after one
2550
+ * insertion error, or continue and attempt to insert every record regardless of the initial
2502
2551
  * failure. <code>ErrorHandlingConfig</code> is a part of the destination connector details.
2503
2552
  * </p>
2504
2553
  */
@@ -2523,8 +2572,8 @@ export interface RedshiftDestinationProperties {
2523
2572
  */
2524
2573
  bucketPrefix?: string;
2525
2574
  /**
2526
- * <p> The settings that determine how Amazon AppFlow handles an error when placing data
2527
- * in the Amazon Redshift destination. For example, this setting would determine if the flow
2575
+ * <p> The settings that determine how Amazon AppFlow handles an error when placing data in
2576
+ * the Amazon Redshift destination. For example, this setting would determine if the flow
2528
2577
  * should fail after one insertion error, or continue and attempt to insert every record
2529
2578
  * regardless of the initial failure. <code>ErrorHandlingConfig</code> is a part of the
2530
2579
  * destination connector details. </p>
@@ -2602,8 +2651,8 @@ export interface PrefixConfig {
2602
2651
  */
2603
2652
  export interface S3OutputFormatConfig {
2604
2653
  /**
2605
- * <p> Indicates the file type that Amazon AppFlow places in the Amazon S3
2606
- * bucket. </p>
2654
+ * <p> Indicates the file type that Amazon AppFlow places in the Amazon S3 bucket.
2655
+ * </p>
2607
2656
  */
2608
2657
  fileType?: FileType | string;
2609
2658
  /**
@@ -2668,10 +2717,10 @@ export interface SalesforceDestinationProperties {
2668
2717
  */
2669
2718
  idFieldNames?: string[];
2670
2719
  /**
2671
- * <p> The settings that determine how Amazon AppFlow handles an error when placing data
2672
- * in the Salesforce destination. For example, this setting would determine if the flow should
2673
- * fail after one insertion error, or continue and attempt to insert every record regardless of
2674
- * the initial failure. <code>ErrorHandlingConfig</code> is a part of the destination connector
2720
+ * <p> The settings that determine how Amazon AppFlow handles an error when placing data in
2721
+ * the Salesforce destination. For example, this setting would determine if the flow should fail
2722
+ * after one insertion error, or continue and attempt to insert every record regardless of the
2723
+ * initial failure. <code>ErrorHandlingConfig</code> is a part of the destination connector
2675
2724
  * details. </p>
2676
2725
  */
2677
2726
  errorHandlingConfig?: ErrorHandlingConfig;
@@ -2758,9 +2807,9 @@ export interface SAPODataDestinationProperties {
2758
2807
  */
2759
2808
  idFieldNames?: string[];
2760
2809
  /**
2761
- * <p> The settings that determine how Amazon AppFlow handles an error when placing data
2762
- * in the destination. For example, this setting would determine if the flow should fail after
2763
- * one insertion error, or continue and attempt to insert every record regardless of the initial
2810
+ * <p> The settings that determine how Amazon AppFlow handles an error when placing data in
2811
+ * the destination. For example, this setting would determine if the flow should fail after one
2812
+ * insertion error, or continue and attempt to insert every record regardless of the initial
2764
2813
  * failure. <code>ErrorHandlingConfig</code> is a part of the destination connector details.
2765
2814
  * </p>
2766
2815
  */
@@ -2790,10 +2839,10 @@ export interface SnowflakeDestinationProperties {
2790
2839
  */
2791
2840
  bucketPrefix?: string;
2792
2841
  /**
2793
- * <p> The settings that determine how Amazon AppFlow handles an error when placing data
2794
- * in the Snowflake destination. For example, this setting would determine if the flow should
2795
- * fail after one insertion error, or continue and attempt to insert every record regardless of
2796
- * the initial failure. <code>ErrorHandlingConfig</code> is a part of the destination connector
2842
+ * <p> The settings that determine how Amazon AppFlow handles an error when placing data in
2843
+ * the Snowflake destination. For example, this setting would determine if the flow should fail
2844
+ * after one insertion error, or continue and attempt to insert every record regardless of the
2845
+ * initial failure. <code>ErrorHandlingConfig</code> is a part of the destination connector
2797
2846
  * details. </p>
2798
2847
  */
2799
2848
  errorHandlingConfig?: ErrorHandlingConfig;
@@ -2804,7 +2853,8 @@ export interface SnowflakeDestinationProperties {
2804
2853
  */
2805
2854
  export interface UpsolverS3OutputFormatConfig {
2806
2855
  /**
2807
- * <p> Indicates the file type that Amazon AppFlow places in the Upsolver Amazon S3 bucket. </p>
2856
+ * <p> Indicates the file type that Amazon AppFlow places in the Upsolver Amazon S3
2857
+ * bucket. </p>
2808
2858
  */
2809
2859
  fileType?: FileType | string;
2810
2860
  /**
@@ -2851,9 +2901,9 @@ export interface ZendeskDestinationProperties {
2851
2901
  */
2852
2902
  idFieldNames?: string[];
2853
2903
  /**
2854
- * <p> The settings that determine how Amazon AppFlow handles an error when placing data
2855
- * in the destination. For example, this setting would determine if the flow should fail after
2856
- * one insertion error, or continue and attempt to insert every record regardless of the initial
2904
+ * <p> The settings that determine how Amazon AppFlow handles an error when placing data in
2905
+ * the destination. For example, this setting would determine if the flow should fail after one
2906
+ * insertion error, or continue and attempt to insert every record regardless of the initial
2857
2907
  * failure. <code>ErrorHandlingConfig</code> is a part of the destination connector details.
2858
2908
  * </p>
2859
2909
  */
@@ -2946,9 +2996,9 @@ export interface DestinationFlowConfig {
2946
2996
  }
2947
2997
  /**
2948
2998
  * <p>Specifies the configuration that Amazon AppFlow uses when it catalogs your data with
2949
- * the Glue Data Catalog. When Amazon AppFlow catalogs your data, it stores
2950
- * metadata in Data Catalog tables. This metadata represents the data that's transferred
2951
- * by the flow that you configure with these settings.</p>
2999
+ * the Glue Data Catalog. When Amazon AppFlow catalogs your data, it stores metadata
3000
+ * in Data Catalog tables. This metadata represents the data that's transferred by the
3001
+ * flow that you configure with these settings.</p>
2952
3002
  * <note>
2953
3003
  * <p>You can configure a flow with these settings only when the flow destination is Amazon S3.</p>
2954
3004
  * </note>
@@ -2962,10 +3012,8 @@ export interface GlueDataCatalogConfig {
2962
3012
  */
2963
3013
  roleArn: string | undefined;
2964
3014
  /**
2965
- * <p>The name of the Data Catalog database that stores the metadata tables that
2966
- * Amazon AppFlow creates in your Amazon Web Services account. These tables contain
2967
- * metadata for the data that's transferred by the flow that you configure with this
2968
- * parameter.</p>
3015
+ * <p>The name of the Data Catalog database that stores the metadata tables that Amazon AppFlow creates in your Amazon Web Services account. These tables contain metadata for
3016
+ * the data that's transferred by the flow that you configure with this parameter.</p>
2969
3017
  * <note>
2970
3018
  * <p>When you configure a new flow with this parameter, you must specify an existing
2971
3019
  * database.</p>
@@ -2980,8 +3028,8 @@ export interface GlueDataCatalogConfig {
2980
3028
  tablePrefix: string | undefined;
2981
3029
  }
2982
3030
  /**
2983
- * <p>Specifies the configuration that Amazon AppFlow uses when it catalogs your data.
2984
- * When Amazon AppFlow catalogs your data, it stores metadata in a data catalog.</p>
3031
+ * <p>Specifies the configuration that Amazon AppFlow uses when it catalogs your data. When
3032
+ * Amazon AppFlow catalogs your data, it stores metadata in a data catalog.</p>
2985
3033
  */
2986
3034
  export interface MetadataCatalogConfig {
2987
3035
  /**
@@ -3517,8 +3565,8 @@ export interface CreateFlowRequest {
3517
3565
  */
3518
3566
  destinationFlowConfigList: DestinationFlowConfig[] | undefined;
3519
3567
  /**
3520
- * <p> A list of tasks that Amazon AppFlow performs while transferring the data in the
3521
- * flow run. </p>
3568
+ * <p> A list of tasks that Amazon AppFlow performs while transferring the data in the flow
3569
+ * run. </p>
3522
3570
  */
3523
3571
  tasks: Task[] | undefined;
3524
3572
  /**
@@ -3526,9 +3574,9 @@ export interface CreateFlowRequest {
3526
3574
  */
3527
3575
  tags?: Record<string, string>;
3528
3576
  /**
3529
- * <p>Specifies the configuration that Amazon AppFlow uses when it catalogs the data
3530
- * that's transferred by the associated flow. When Amazon AppFlow catalogs the data from a
3531
- * flow, it stores metadata in a data catalog.</p>
3577
+ * <p>Specifies the configuration that Amazon AppFlow uses when it catalogs the data that's
3578
+ * transferred by the associated flow. When Amazon AppFlow catalogs the data from a flow, it
3579
+ * stores metadata in a data catalog.</p>
3532
3580
  */
3533
3581
  metadataCatalogConfig?: MetadataCatalogConfig;
3534
3582
  }
@@ -3733,8 +3781,7 @@ export interface ExecutionDetails {
3733
3781
  mostRecentExecutionStatus?: ExecutionStatus | string;
3734
3782
  }
3735
3783
  /**
3736
- * <p>Describes the status of an attempt from Amazon AppFlow to register a
3737
- * resource.</p>
3784
+ * <p>Describes the status of an attempt from Amazon AppFlow to register a resource.</p>
3738
3785
  * <p>When you run a flow that you've configured to use a metadata catalog, Amazon AppFlow
3739
3786
  * registers a metadata table and data partitions with that catalog. This operation provides the
3740
3787
  * status of that registration attempt. The operation also indicates how many related resources
@@ -3766,15 +3813,16 @@ export interface MetadataCatalogDetail {
3766
3813
  * <dl>
3767
3814
  * <dt>GLUE</dt>
3768
3815
  * <dd>
3769
- * <p>The metadata catalog is provided by the Glue Data Catalog. Glue includes the Glue Data Catalog as a component.</p>
3816
+ * <p>The metadata catalog is provided by the Glue Data Catalog. Glue
3817
+ * includes the Glue Data Catalog as a component.</p>
3770
3818
  * </dd>
3771
3819
  * </dl>
3772
3820
  */
3773
3821
  catalogType?: CatalogType | string;
3774
3822
  /**
3775
3823
  * <p>The name of the table that stores the metadata for the associated flow run. The table
3776
- * stores metadata that represents the data that the flow transferred. Amazon AppFlow
3777
- * stores the table in the metadata catalog.</p>
3824
+ * stores metadata that represents the data that the flow transferred. Amazon AppFlow stores
3825
+ * the table in the metadata catalog.</p>
3778
3826
  */
3779
3827
  tableName?: string;
3780
3828
  /**
@@ -3826,8 +3874,8 @@ export interface DescribeFlowResponse {
3826
3874
  */
3827
3875
  sourceFlowConfig?: SourceFlowConfig;
3828
3876
  /**
3829
- * <p> The configuration that controls how Amazon AppFlow transfers data to the
3830
- * destination connector. </p>
3877
+ * <p> The configuration that controls how Amazon AppFlow transfers data to the destination
3878
+ * connector. </p>
3831
3879
  */
3832
3880
  destinationFlowConfigList?: DestinationFlowConfig[];
3833
3881
  /**
@@ -3839,8 +3887,8 @@ export interface DescribeFlowResponse {
3839
3887
  */
3840
3888
  triggerConfig?: TriggerConfig;
3841
3889
  /**
3842
- * <p> A list of tasks that Amazon AppFlow performs while transferring the data in the
3843
- * flow run. </p>
3890
+ * <p> A list of tasks that Amazon AppFlow performs while transferring the data in the flow
3891
+ * run. </p>
3844
3892
  */
3845
3893
  tasks?: Task[];
3846
3894
  /**
@@ -3864,9 +3912,9 @@ export interface DescribeFlowResponse {
3864
3912
  */
3865
3913
  tags?: Record<string, string>;
3866
3914
  /**
3867
- * <p>Specifies the configuration that Amazon AppFlow uses when it catalogs the data
3868
- * that's transferred by the associated flow. When Amazon AppFlow catalogs the data from a
3869
- * flow, it stores metadata in a data catalog.</p>
3915
+ * <p>Specifies the configuration that Amazon AppFlow uses when it catalogs the data that's
3916
+ * transferred by the associated flow. When Amazon AppFlow catalogs the data from a flow, it
3917
+ * stores metadata in a data catalog.</p>
3870
3918
  */
3871
3919
  metadataCatalogConfig?: MetadataCatalogConfig;
3872
3920
  /**
@@ -4353,19 +4401,19 @@ export interface UpdateFlowRequest {
4353
4401
  */
4354
4402
  sourceFlowConfig: SourceFlowConfig | undefined;
4355
4403
  /**
4356
- * <p> The configuration that controls how Amazon AppFlow transfers data to the
4357
- * destination connector. </p>
4404
+ * <p> The configuration that controls how Amazon AppFlow transfers data to the destination
4405
+ * connector. </p>
4358
4406
  */
4359
4407
  destinationFlowConfigList: DestinationFlowConfig[] | undefined;
4360
4408
  /**
4361
- * <p> A list of tasks that Amazon AppFlow performs while transferring the data in the
4362
- * flow run. </p>
4409
+ * <p> A list of tasks that Amazon AppFlow performs while transferring the data in the flow
4410
+ * run. </p>
4363
4411
  */
4364
4412
  tasks: Task[] | undefined;
4365
4413
  /**
4366
- * <p>Specifies the configuration that Amazon AppFlow uses when it catalogs the data
4367
- * that's transferred by the associated flow. When Amazon AppFlow catalogs the data from a
4368
- * flow, it stores metadata in a data catalog.</p>
4414
+ * <p>Specifies the configuration that Amazon AppFlow uses when it catalogs the data that's
4415
+ * transferred by the associated flow. When Amazon AppFlow catalogs the data from a flow, it
4416
+ * stores metadata in a data catalog.</p>
4369
4417
  */
4370
4418
  metadataCatalogConfig?: MetadataCatalogConfig;
4371
4419
  }
@@ -27,7 +27,7 @@ export declare const resolveClientEndpointParameters: <T>(
27
27
  defaultSigningName: string;
28
28
  };
29
29
  export interface EndpointParameters extends __EndpointParameters {
30
- Region: string;
30
+ Region?: string;
31
31
  UseDualStack?: boolean;
32
32
  UseFIPS?: boolean;
33
33
  Endpoint?: string;
@@ -704,6 +704,7 @@ export interface RedshiftConnectorProfileProperties {
704
704
  export interface SalesforceConnectorProfileProperties {
705
705
  instanceUrl?: string;
706
706
  isSandboxEnvironment?: boolean;
707
+ usePrivateLinkForMetadataAndAuthorization?: boolean;
707
708
  }
708
709
  export interface OAuthProperties {
709
710
  tokenUrl: string | undefined;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@aws-sdk/client-appflow",
3
3
  "description": "AWS SDK for JavaScript Appflow Client for Node.js, Browser and React Native",
4
- "version": "3.276.0",
4
+ "version": "3.278.0",
5
5
  "scripts": {
6
6
  "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'",
7
7
  "build:cjs": "tsc -p tsconfig.cjs.json",
@@ -20,15 +20,15 @@
20
20
  "dependencies": {
21
21
  "@aws-crypto/sha256-browser": "3.0.0",
22
22
  "@aws-crypto/sha256-js": "3.0.0",
23
- "@aws-sdk/client-sts": "3.276.0",
23
+ "@aws-sdk/client-sts": "3.278.0",
24
24
  "@aws-sdk/config-resolver": "3.272.0",
25
- "@aws-sdk/credential-provider-node": "3.272.0",
25
+ "@aws-sdk/credential-provider-node": "3.278.0",
26
26
  "@aws-sdk/fetch-http-handler": "3.272.0",
27
27
  "@aws-sdk/hash-node": "3.272.0",
28
28
  "@aws-sdk/invalid-dependency": "3.272.0",
29
29
  "@aws-sdk/middleware-content-length": "3.272.0",
30
30
  "@aws-sdk/middleware-endpoint": "3.272.0",
31
- "@aws-sdk/middleware-host-header": "3.272.0",
31
+ "@aws-sdk/middleware-host-header": "3.278.0",
32
32
  "@aws-sdk/middleware-logger": "3.272.0",
33
33
  "@aws-sdk/middleware-recursion-detection": "3.272.0",
34
34
  "@aws-sdk/middleware-retry": "3.272.0",