aws-sdk-neptunedata 1.0.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,2958 @@
1
+ # frozen_string_literal: true
2
+
3
+ # WARNING ABOUT GENERATED CODE
4
+ #
5
+ # This file is generated. See the contributing guide for more information:
6
+ # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
7
+ #
8
+ # WARNING ABOUT GENERATED CODE
9
+
10
+ require 'seahorse/client/plugins/content_length.rb'
11
+ require 'aws-sdk-core/plugins/credentials_configuration.rb'
12
+ require 'aws-sdk-core/plugins/logging.rb'
13
+ require 'aws-sdk-core/plugins/param_converter.rb'
14
+ require 'aws-sdk-core/plugins/param_validator.rb'
15
+ require 'aws-sdk-core/plugins/user_agent.rb'
16
+ require 'aws-sdk-core/plugins/helpful_socket_errors.rb'
17
+ require 'aws-sdk-core/plugins/retry_errors.rb'
18
+ require 'aws-sdk-core/plugins/global_configuration.rb'
19
+ require 'aws-sdk-core/plugins/regional_endpoint.rb'
20
+ require 'aws-sdk-core/plugins/endpoint_discovery.rb'
21
+ require 'aws-sdk-core/plugins/endpoint_pattern.rb'
22
+ require 'aws-sdk-core/plugins/response_paging.rb'
23
+ require 'aws-sdk-core/plugins/stub_responses.rb'
24
+ require 'aws-sdk-core/plugins/idempotency_token.rb'
25
+ require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
26
+ require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
27
+ require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
28
+ require 'aws-sdk-core/plugins/transfer_encoding.rb'
29
+ require 'aws-sdk-core/plugins/http_checksum.rb'
30
+ require 'aws-sdk-core/plugins/checksum_algorithm.rb'
31
+ require 'aws-sdk-core/plugins/request_compression.rb'
32
+ require 'aws-sdk-core/plugins/defaults_mode.rb'
33
+ require 'aws-sdk-core/plugins/recursion_detection.rb'
34
+ require 'aws-sdk-core/plugins/sign.rb'
35
+ require 'aws-sdk-core/plugins/protocols/rest_json.rb'
36
+
37
+ Aws::Plugins::GlobalConfiguration.add_identifier(:neptunedata)
38
+
39
+ module Aws::Neptunedata
40
+ # An API client for Neptunedata. To construct a client, you need to configure a `:region` and `:credentials`.
41
+ #
42
+ # client = Aws::Neptunedata::Client.new(
43
+ # region: region_name,
44
+ # credentials: credentials,
45
+ # # ...
46
+ # )
47
+ #
48
+ # For details on configuring region and credentials see
49
+ # the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
50
+ #
51
+ # See {#initialize} for a full list of supported configuration options.
52
+ class Client < Seahorse::Client::Base
53
+
54
+ include Aws::ClientStubs
55
+
56
+ @identifier = :neptunedata
57
+
58
+ set_api(ClientApi::API)
59
+
60
+ add_plugin(Seahorse::Client::Plugins::ContentLength)
61
+ add_plugin(Aws::Plugins::CredentialsConfiguration)
62
+ add_plugin(Aws::Plugins::Logging)
63
+ add_plugin(Aws::Plugins::ParamConverter)
64
+ add_plugin(Aws::Plugins::ParamValidator)
65
+ add_plugin(Aws::Plugins::UserAgent)
66
+ add_plugin(Aws::Plugins::HelpfulSocketErrors)
67
+ add_plugin(Aws::Plugins::RetryErrors)
68
+ add_plugin(Aws::Plugins::GlobalConfiguration)
69
+ add_plugin(Aws::Plugins::RegionalEndpoint)
70
+ add_plugin(Aws::Plugins::EndpointDiscovery)
71
+ add_plugin(Aws::Plugins::EndpointPattern)
72
+ add_plugin(Aws::Plugins::ResponsePaging)
73
+ add_plugin(Aws::Plugins::StubResponses)
74
+ add_plugin(Aws::Plugins::IdempotencyToken)
75
+ add_plugin(Aws::Plugins::JsonvalueConverter)
76
+ add_plugin(Aws::Plugins::ClientMetricsPlugin)
77
+ add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
78
+ add_plugin(Aws::Plugins::TransferEncoding)
79
+ add_plugin(Aws::Plugins::HttpChecksum)
80
+ add_plugin(Aws::Plugins::ChecksumAlgorithm)
81
+ add_plugin(Aws::Plugins::RequestCompression)
82
+ add_plugin(Aws::Plugins::DefaultsMode)
83
+ add_plugin(Aws::Plugins::RecursionDetection)
84
+ add_plugin(Aws::Plugins::Sign)
85
+ add_plugin(Aws::Plugins::Protocols::RestJson)
86
+ add_plugin(Aws::Neptunedata::Plugins::Endpoints)
87
+
88
+ # @overload initialize(options)
89
+ # @param [Hash] options
90
+ # @option options [required, Aws::CredentialProvider] :credentials
91
+ # Your AWS credentials. This can be an instance of any one of the
92
+ # following classes:
93
+ #
94
+ # * `Aws::Credentials` - Used for configuring static, non-refreshing
95
+ # credentials.
96
+ #
97
+ # * `Aws::SharedCredentials` - Used for loading static credentials from a
98
+ # shared file, such as `~/.aws/config`.
99
+ #
100
+ # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
101
+ #
102
+ # * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to
103
+ # assume a role after providing credentials via the web.
104
+ #
105
+ # * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an
106
+ # access token generated from `aws login`.
107
+ #
108
+ # * `Aws::ProcessCredentials` - Used for loading credentials from a
109
+ # process that outputs to stdout.
110
+ #
111
+ # * `Aws::InstanceProfileCredentials` - Used for loading credentials
112
+ # from an EC2 IMDS on an EC2 instance.
113
+ #
114
+ # * `Aws::ECSCredentials` - Used for loading credentials from
115
+ # instances running in ECS.
116
+ #
117
+ # * `Aws::CognitoIdentityCredentials` - Used for loading credentials
118
+ # from the Cognito Identity service.
119
+ #
120
+ # When `:credentials` are not configured directly, the following
121
+ # locations will be searched for credentials:
122
+ #
123
+ # * `Aws.config[:credentials]`
124
+ # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
125
+ # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
126
+ # * `~/.aws/credentials`
127
+ # * `~/.aws/config`
128
+ # * EC2/ECS IMDS instance profile - When used by default, the timeouts
129
+ # are very aggressive. Construct and pass an instance of
130
+ # `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to
131
+ # enable retries and extended timeouts. Instance profile credential
132
+ # fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED']
133
+ # to true.
134
+ #
135
+ # @option options [required, String] :region
136
+ # The AWS region to connect to. The configured `:region` is
137
+ # used to determine the service `:endpoint`. When not passed,
138
+ # a default `:region` is searched for in the following locations:
139
+ #
140
+ # * `Aws.config[:region]`
141
+ # * `ENV['AWS_REGION']`
142
+ # * `ENV['AMAZON_REGION']`
143
+ # * `ENV['AWS_DEFAULT_REGION']`
144
+ # * `~/.aws/credentials`
145
+ # * `~/.aws/config`
146
+ #
147
+ # @option options [String] :access_key_id
148
+ #
149
+ # @option options [Boolean] :active_endpoint_cache (false)
150
+ # When set to `true`, a thread polling for endpoints will be running in
151
+ # the background every 60 secs (default). Defaults to `false`.
152
+ #
153
+ # @option options [Boolean] :adaptive_retry_wait_to_fill (true)
154
+ # Used only in `adaptive` retry mode. When true, the request will sleep
155
+ # until there is sufficent client side capacity to retry the request.
156
+ # When false, the request will raise a `RetryCapacityNotAvailableError` and will
157
+ # not retry instead of sleeping.
158
+ #
159
+ # @option options [Boolean] :client_side_monitoring (false)
160
+ # When `true`, client-side metrics will be collected for all API requests from
161
+ # this client.
162
+ #
163
+ # @option options [String] :client_side_monitoring_client_id ("")
164
+ # Allows you to provide an identifier for this client which will be attached to
165
+ # all generated client side metrics. Defaults to an empty string.
166
+ #
167
+ # @option options [String] :client_side_monitoring_host ("127.0.0.1")
168
+ # Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client
169
+ # side monitoring agent is running on, where client metrics will be published via UDP.
170
+ #
171
+ # @option options [Integer] :client_side_monitoring_port (31000)
172
+ # Required for publishing client metrics. The port that the client side monitoring
173
+ # agent is running on, where client metrics will be published via UDP.
174
+ #
175
+ # @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher)
176
+ # Allows you to provide a custom client-side monitoring publisher class. By default,
177
+ # will use the Client Side Monitoring Agent Publisher.
178
+ #
179
+ # @option options [Boolean] :convert_params (true)
180
+ # When `true`, an attempt is made to coerce request parameters into
181
+ # the required types.
182
+ #
183
+ # @option options [Boolean] :correct_clock_skew (true)
184
+ # Used only in `standard` and adaptive retry modes. Specifies whether to apply
185
+ # a clock skew correction and retry requests with skewed client clocks.
186
+ #
187
+ # @option options [String] :defaults_mode ("legacy")
188
+ # See {Aws::DefaultsModeConfiguration} for a list of the
189
+ # accepted modes and the configuration defaults that are included.
190
+ #
191
+ # @option options [Boolean] :disable_host_prefix_injection (false)
192
+ # Set to true to disable SDK automatically adding host prefix
193
+ # to default service endpoint when available.
194
+ #
195
+ # @option options [Boolean] :disable_request_compression (false)
196
+ # When set to 'true' the request body will not be compressed
197
+ # for supported operations.
198
+ #
199
+ # @option options [String] :endpoint
200
+ # The client endpoint is normally constructed from the `:region`
201
+ # option. You should only configure an `:endpoint` when connecting
202
+ # to test or custom endpoints. This should be a valid HTTP(S) URI.
203
+ #
204
+ # @option options [Integer] :endpoint_cache_max_entries (1000)
205
+ # Used for the maximum size limit of the LRU cache storing endpoints data
206
+ # for endpoint discovery enabled operations. Defaults to 1000.
207
+ #
208
+ # @option options [Integer] :endpoint_cache_max_threads (10)
209
+ # Used for the maximum threads in use for polling endpoints to be cached, defaults to 10.
210
+ #
211
+ # @option options [Integer] :endpoint_cache_poll_interval (60)
212
+ # When :endpoint_discovery and :active_endpoint_cache is enabled,
213
+ # Use this option to config the time interval in seconds for making
214
+ # requests fetching endpoints information. Defaults to 60 sec.
215
+ #
216
+ # @option options [Boolean] :endpoint_discovery (false)
217
+ # When set to `true`, endpoint discovery will be enabled for operations when available.
218
+ #
219
+ # @option options [Boolean] :ignore_configured_endpoint_urls
220
+ # Setting to true disables use of endpoint URLs provided via environment
221
+ # variables and the shared configuration file.
222
+ #
223
+ # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
224
+ # The log formatter.
225
+ #
226
+ # @option options [Symbol] :log_level (:info)
227
+ # The log level to send messages to the `:logger` at.
228
+ #
229
+ # @option options [Logger] :logger
230
+ # The Logger instance to send log messages to. If this option
231
+ # is not set, logging will be disabled.
232
+ #
233
+ # @option options [Integer] :max_attempts (3)
234
+ # An integer representing the maximum number attempts that will be made for
235
+ # a single request, including the initial attempt. For example,
236
+ # setting this value to 5 will result in a request being retried up to
237
+ # 4 times. Used in `standard` and `adaptive` retry modes.
238
+ #
239
+ # @option options [String] :profile ("default")
240
+ # Used when loading credentials from the shared credentials file
241
+ # at HOME/.aws/credentials. When not specified, 'default' is used.
242
+ #
243
+ # @option options [Integer] :request_min_compression_size_bytes (10240)
244
+ # The minimum size in bytes that triggers compression for request
245
+ # bodies. The value must be non-negative integer value between 0
246
+ # and 10485780 bytes inclusive.
247
+ #
248
+ # @option options [Proc] :retry_backoff
249
+ # A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
250
+ # This option is only used in the `legacy` retry mode.
251
+ #
252
+ # @option options [Float] :retry_base_delay (0.3)
253
+ # The base delay in seconds used by the default backoff function. This option
254
+ # is only used in the `legacy` retry mode.
255
+ #
256
+ # @option options [Symbol] :retry_jitter (:none)
257
+ # A delay randomiser function used by the default backoff function.
258
+ # Some predefined functions can be referenced by name - :none, :equal, :full,
259
+ # otherwise a Proc that takes and returns a number. This option is only used
260
+ # in the `legacy` retry mode.
261
+ #
262
+ # @see https://www.awsarchitectureblog.com/2015/03/backoff.html
263
+ #
264
+ # @option options [Integer] :retry_limit (3)
265
+ # The maximum number of times to retry failed requests. Only
266
+ # ~ 500 level server errors and certain ~ 400 level client errors
267
+ # are retried. Generally, these are throttling errors, data
268
+ # checksum errors, networking errors, timeout errors, auth errors,
269
+ # endpoint discovery, and errors from expired credentials.
270
+ # This option is only used in the `legacy` retry mode.
271
+ #
272
+ # @option options [Integer] :retry_max_delay (0)
273
+ # The maximum number of seconds to delay between retries (0 for no limit)
274
+ # used by the default backoff function. This option is only used in the
275
+ # `legacy` retry mode.
276
+ #
277
+ # @option options [String] :retry_mode ("legacy")
278
+ # Specifies which retry algorithm to use. Values are:
279
+ #
280
+ # * `legacy` - The pre-existing retry behavior. This is default value if
281
+ # no retry mode is provided.
282
+ #
283
+ # * `standard` - A standardized set of retry rules across the AWS SDKs.
284
+ # This includes support for retry quotas, which limit the number of
285
+ # unsuccessful retries a client can make.
286
+ #
287
+ # * `adaptive` - An experimental retry mode that includes all the
288
+ # functionality of `standard` mode along with automatic client side
289
+ # throttling. This is a provisional mode that may change behavior
290
+ # in the future.
291
+ #
292
+ #
293
+ # @option options [String] :sdk_ua_app_id
294
+ # A unique and opaque application ID that is appended to the
295
+ # User-Agent header as app/<sdk_ua_app_id>. It should have a
296
+ # maximum length of 50.
297
+ #
298
+ # @option options [String] :secret_access_key
299
+ #
300
+ # @option options [String] :session_token
301
+ #
302
+ # @option options [Boolean] :stub_responses (false)
303
+ # Causes the client to return stubbed responses. By default
304
+ # fake responses are generated and returned. You can specify
305
+ # the response data to return or errors to raise by calling
306
+ # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
307
+ #
308
+ # ** Please note ** When response stubbing is enabled, no HTTP
309
+ # requests are made, and retries are disabled.
310
+ #
311
+ # @option options [Aws::TokenProvider] :token_provider
312
+ # A Bearer Token Provider. This can be an instance of any one of the
313
+ # following classes:
314
+ #
315
+ # * `Aws::StaticTokenProvider` - Used for configuring static, non-refreshing
316
+ # tokens.
317
+ #
318
+ # * `Aws::SSOTokenProvider` - Used for loading tokens from AWS SSO using an
319
+ # access token generated from `aws login`.
320
+ #
321
+ # When `:token_provider` is not configured directly, the `Aws::TokenProviderChain`
322
+ # will be used to search for tokens configured for your profile in shared configuration files.
323
+ #
324
+ # @option options [Boolean] :use_dualstack_endpoint
325
+ # When set to `true`, dualstack enabled endpoints (with `.aws` TLD)
326
+ # will be used if available.
327
+ #
328
+ # @option options [Boolean] :use_fips_endpoint
329
+ # When set to `true`, fips compatible endpoints will be used if available.
330
+ # When a `fips` region is used, the region is normalized and this config
331
+ # is set to `true`.
332
+ #
333
+ # @option options [Boolean] :validate_params (true)
334
+ # When `true`, request parameters are validated before
335
+ # sending the request.
336
+ #
337
+ # @option options [Aws::Neptunedata::EndpointProvider] :endpoint_provider
338
+ # The endpoint provider used to resolve endpoints. Any object that responds to `#resolve_endpoint(parameters)` where `parameters` is a Struct similar to `Aws::Neptunedata::EndpointParameters`
339
+ #
340
+ # @option options [URI::HTTP,String] :http_proxy A proxy to send
341
+ # requests through. Formatted like 'http://proxy.com:123'.
342
+ #
343
+ # @option options [Float] :http_open_timeout (15) The number of
344
+ # seconds to wait when opening a HTTP session before raising a
345
+ # `Timeout::Error`.
346
+ #
347
+ # @option options [Float] :http_read_timeout (60) The default
348
+ # number of seconds to wait for response data. This value can
349
+ # safely be set per-request on the session.
350
+ #
351
+ # @option options [Float] :http_idle_timeout (5) The number of
352
+ # seconds a connection is allowed to sit idle before it is
353
+ # considered stale. Stale connections are closed and removed
354
+ # from the pool before making a request.
355
+ #
356
+ # @option options [Float] :http_continue_timeout (1) The number of
357
+ # seconds to wait for a 100-continue response before sending the
358
+ # request body. This option has no effect unless the request has
359
+ # "Expect" header set to "100-continue". Defaults to `nil` which
360
+ # disables this behaviour. This value can safely be set per
361
+ # request on the session.
362
+ #
363
+ # @option options [Float] :ssl_timeout (nil) Sets the SSL timeout
364
+ # in seconds.
365
+ #
366
+ # @option options [Boolean] :http_wire_trace (false) When `true`,
367
+ # HTTP debug output will be sent to the `:logger`.
368
+ #
369
+ # @option options [Boolean] :ssl_verify_peer (true) When `true`,
370
+ # SSL peer certificates are verified when establishing a
371
+ # connection.
372
+ #
373
+ # @option options [String] :ssl_ca_bundle Full path to the SSL
374
+ # certificate authority bundle file that should be used when
375
+ # verifying peer certificates. If you do not pass
376
+ # `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default
377
+ # will be used if available.
378
+ #
379
+ # @option options [String] :ssl_ca_directory Full path of the
380
+ # directory that contains the unbundled SSL certificate
381
+ # authority files for verifying peer certificates. If you do
382
+ # not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the
383
+ # system default will be used if available.
384
+ #
385
+ def initialize(*args)
386
+ super
387
+ end
388
+
389
+ # @!group API Operations
390
+
391
+ # Cancels a Gremlin query. See [Gremlin query cancellation][1] for more
392
+ # information.
393
+ #
394
+ #
395
+ #
396
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/gremlin-api-status-cancel.html
397
+ #
398
+ # @option params [required, String] :query_id
399
+ # The unique identifier that identifies the query to be canceled.
400
+ #
401
+ # @return [Types::CancelGremlinQueryOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
402
+ #
403
+ # * {Types::CancelGremlinQueryOutput#status #status} => String
404
+ #
405
+ # @example Request syntax with placeholder values
406
+ #
407
+ # resp = client.cancel_gremlin_query({
408
+ # query_id: "String", # required
409
+ # })
410
+ #
411
+ # @example Response structure
412
+ #
413
+ # resp.status #=> String
414
+ #
415
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/CancelGremlinQuery AWS API Documentation
416
+ #
417
+ # @overload cancel_gremlin_query(params = {})
418
+ # @param [Hash] params ({})
419
+ def cancel_gremlin_query(params = {}, options = {})
420
+ req = build_request(:cancel_gremlin_query, params)
421
+ req.send_request(options)
422
+ end
423
+
424
+ # Cancels a specified load job. This is an HTTP `DELETE` request.
425
+ #
426
+ # See [Neptune Loader Get-Status API][1] for more information.
427
+ #
428
+ #
429
+ #
430
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/load-api-reference-status.htm
431
+ #
432
+ # @option params [required, String] :load_id
433
+ # The ID of the load job to be deleted.
434
+ #
435
+ # @return [Types::CancelLoaderJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
436
+ #
437
+ # * {Types::CancelLoaderJobOutput#status #status} => String
438
+ #
439
+ # @example Request syntax with placeholder values
440
+ #
441
+ # resp = client.cancel_loader_job({
442
+ # load_id: "String", # required
443
+ # })
444
+ #
445
+ # @example Response structure
446
+ #
447
+ # resp.status #=> String
448
+ #
449
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/CancelLoaderJob AWS API Documentation
450
+ #
451
+ # @overload cancel_loader_job(params = {})
452
+ # @param [Hash] params ({})
453
+ def cancel_loader_job(params = {}, options = {})
454
+ req = build_request(:cancel_loader_job, params)
455
+ req.send_request(options)
456
+ end
457
+
458
+ # Cancels a Neptune ML data processing job. See [The `dataprocessing`
459
+ # command][1].
460
+ #
461
+ #
462
+ #
463
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-dataprocessing.html
464
+ #
465
+ # @option params [required, String] :id
466
+ # The unique identifier of the data-processing job.
467
+ #
468
+ # @option params [String] :neptune_iam_role_arn
469
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
470
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
471
+ # group or an error will occur.
472
+ #
473
+ # @option params [Boolean] :clean
474
+ # If set to `TRUE`, this flag specifies that all Neptune ML S3 artifacts
475
+ # should be deleted when the job is stopped. The default is `FALSE`.
476
+ #
477
+ # @return [Types::CancelMLDataProcessingJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
478
+ #
479
+ # * {Types::CancelMLDataProcessingJobOutput#status #status} => String
480
+ #
481
+ # @example Request syntax with placeholder values
482
+ #
483
+ # resp = client.cancel_ml_data_processing_job({
484
+ # id: "String", # required
485
+ # neptune_iam_role_arn: "String",
486
+ # clean: false,
487
+ # })
488
+ #
489
+ # @example Response structure
490
+ #
491
+ # resp.status #=> String
492
+ #
493
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/CancelMLDataProcessingJob AWS API Documentation
494
+ #
495
+ # @overload cancel_ml_data_processing_job(params = {})
496
+ # @param [Hash] params ({})
497
+ def cancel_ml_data_processing_job(params = {}, options = {})
498
+ req = build_request(:cancel_ml_data_processing_job, params)
499
+ req.send_request(options)
500
+ end
501
+
502
+ # Cancels a Neptune ML model training job. See [Model training using the
503
+ # `modeltraining` command][1].
504
+ #
505
+ #
506
+ #
507
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-modeltraining.html
508
+ #
509
+ # @option params [required, String] :id
510
+ # The unique identifier of the model-training job to be canceled.
511
+ #
512
+ # @option params [String] :neptune_iam_role_arn
513
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
514
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
515
+ # group or an error will occur.
516
+ #
517
+ # @option params [Boolean] :clean
518
+ # If set to `TRUE`, this flag specifies that all Amazon S3 artifacts
519
+ # should be deleted when the job is stopped. The default is `FALSE`.
520
+ #
521
+ # @return [Types::CancelMLModelTrainingJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
522
+ #
523
+ # * {Types::CancelMLModelTrainingJobOutput#status #status} => String
524
+ #
525
+ # @example Request syntax with placeholder values
526
+ #
527
+ # resp = client.cancel_ml_model_training_job({
528
+ # id: "String", # required
529
+ # neptune_iam_role_arn: "String",
530
+ # clean: false,
531
+ # })
532
+ #
533
+ # @example Response structure
534
+ #
535
+ # resp.status #=> String
536
+ #
537
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/CancelMLModelTrainingJob AWS API Documentation
538
+ #
539
+ # @overload cancel_ml_model_training_job(params = {})
540
+ # @param [Hash] params ({})
541
+ def cancel_ml_model_training_job(params = {}, options = {})
542
+ req = build_request(:cancel_ml_model_training_job, params)
543
+ req.send_request(options)
544
+ end
545
+
546
+ # Cancels a specified model transform job. See [Use a trained model to
547
+ # generate new model artifacts][1].
548
+ #
549
+ #
550
+ #
551
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-model-transform.html
552
+ #
553
+ # @option params [required, String] :id
554
+ # The unique ID of the model transform job to be canceled.
555
+ #
556
+ # @option params [String] :neptune_iam_role_arn
557
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
558
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
559
+ # group or an error will occur.
560
+ #
561
+ # @option params [Boolean] :clean
562
+ # If this flag is set to `TRUE`, all Neptune ML S3 artifacts should be
563
+ # deleted when the job is stopped. The default is `FALSE`.
564
+ #
565
+ # @return [Types::CancelMLModelTransformJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
566
+ #
567
+ # * {Types::CancelMLModelTransformJobOutput#status #status} => String
568
+ #
569
+ # @example Request syntax with placeholder values
570
+ #
571
+ # resp = client.cancel_ml_model_transform_job({
572
+ # id: "String", # required
573
+ # neptune_iam_role_arn: "String",
574
+ # clean: false,
575
+ # })
576
+ #
577
+ # @example Response structure
578
+ #
579
+ # resp.status #=> String
580
+ #
581
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/CancelMLModelTransformJob AWS API Documentation
582
+ #
583
+ # @overload cancel_ml_model_transform_job(params = {})
584
+ # @param [Hash] params ({})
585
+ def cancel_ml_model_transform_job(params = {}, options = {})
586
+ req = build_request(:cancel_ml_model_transform_job, params)
587
+ req.send_request(options)
588
+ end
589
+
590
+ # Cancels a specified openCypher query. See [Neptune openCypher status
591
+ # endpoint][1] for more information.
592
+ #
593
+ #
594
+ #
595
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/access-graph-opencypher-status.html
596
+ #
597
+ # @option params [required, String] :query_id
598
+ # The unique ID of the openCypher query to cancel.
599
+ #
600
+ # @option params [Boolean] :silent
601
+ # If set to `TRUE`, causes the cancelation of the openCypher query to
602
+ # happen silently.
603
+ #
604
+ # @return [Types::CancelOpenCypherQueryOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
605
+ #
606
+ # * {Types::CancelOpenCypherQueryOutput#status #status} => String
607
+ # * {Types::CancelOpenCypherQueryOutput#payload #payload} => Boolean
608
+ #
609
+ # @example Request syntax with placeholder values
610
+ #
611
+ # resp = client.cancel_open_cypher_query({
612
+ # query_id: "String", # required
613
+ # silent: false,
614
+ # })
615
+ #
616
+ # @example Response structure
617
+ #
618
+ # resp.status #=> String
619
+ # resp.payload #=> Boolean
620
+ #
621
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/CancelOpenCypherQuery AWS API Documentation
622
+ #
623
+ # @overload cancel_open_cypher_query(params = {})
624
+ # @param [Hash] params ({})
625
+ def cancel_open_cypher_query(params = {}, options = {})
626
+ req = build_request(:cancel_open_cypher_query, params)
627
+ req.send_request(options)
628
+ end
629
+
630
+ # Creates a new Neptune ML inference endpoint that lets you query one
631
+ # specific model that the model-training process constructed. See
632
+ # [Managing inference endpoints using the endpoints command][1].
633
+ #
634
+ #
635
+ #
636
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-endpoints.html
637
+ #
638
+ # @option params [String] :id
639
+ # A unique identifier for the new inference endpoint. The default is an
640
+ # autogenerated timestamped name.
641
+ #
642
+ # @option params [String] :ml_model_training_job_id
643
+ # The job Id of the completed model-training job that has created the
644
+ # model that the inference endpoint will point to. You must supply
645
+ # either the `mlModelTrainingJobId` or the `mlModelTransformJobId`.
646
+ #
647
+ # @option params [String] :ml_model_transform_job_id
648
+ # The job Id of the completed model-transform job. You must supply
649
+ # either the `mlModelTrainingJobId` or the `mlModelTransformJobId`.
650
+ #
651
+ # @option params [Boolean] :update
652
+ # If set to `true`, `update` indicates that this is an update request.
653
+ # The default is `false`. You must supply either the
654
+ # `mlModelTrainingJobId` or the `mlModelTransformJobId`.
655
+ #
656
+ # @option params [String] :neptune_iam_role_arn
657
+ # The ARN of an IAM role providing Neptune access to SageMaker and
658
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
659
+ # group or an error will be thrown.
660
+ #
661
+ # @option params [String] :model_name
662
+ # Model type for training. By default the Neptune ML model is
663
+ # automatically based on the `modelType` used in data processing, but
664
+ # you can specify a different model type here. The default is `rgcn` for
665
+ # heterogeneous graphs and `kge` for knowledge graphs. The only valid
666
+ # value for heterogeneous graphs is `rgcn`. Valid values for knowledge
667
+ # graphs are: `kge`, `transe`, `distmult`, and `rotate`.
668
+ #
669
+ # @option params [String] :instance_type
670
+ # The type of Neptune ML instance to use for online servicing. The
671
+ # default is `ml.m5.xlarge`. Choosing the ML instance for an inference
672
+ # endpoint depends on the task type, the graph size, and your budget.
673
+ #
674
+ # @option params [Integer] :instance_count
675
+ # The minimum number of Amazon EC2 instances to deploy to an endpoint
676
+ # for prediction. The default is 1
677
+ #
678
+ # @option params [String] :volume_encryption_kms_key
679
+ # The Amazon Key Management Service (Amazon KMS) key that SageMaker uses
680
+ # to encrypt data on the storage volume attached to the ML compute
681
+ # instances that run the training job. The default is None.
682
+ #
683
+ # @return [Types::CreateMLEndpointOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
684
+ #
685
+ # * {Types::CreateMLEndpointOutput#id #id} => String
686
+ # * {Types::CreateMLEndpointOutput#arn #arn} => String
687
+ # * {Types::CreateMLEndpointOutput#creation_time_in_millis #creation_time_in_millis} => Integer
688
+ #
689
+ # @example Request syntax with placeholder values
690
+ #
691
+ # resp = client.create_ml_endpoint({
692
+ # id: "String",
693
+ # ml_model_training_job_id: "String",
694
+ # ml_model_transform_job_id: "String",
695
+ # update: false,
696
+ # neptune_iam_role_arn: "String",
697
+ # model_name: "String",
698
+ # instance_type: "String",
699
+ # instance_count: 1,
700
+ # volume_encryption_kms_key: "String",
701
+ # })
702
+ #
703
+ # @example Response structure
704
+ #
705
+ # resp.id #=> String
706
+ # resp.arn #=> String
707
+ # resp.creation_time_in_millis #=> Integer
708
+ #
709
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/CreateMLEndpoint AWS API Documentation
710
+ #
711
+ # @overload create_ml_endpoint(params = {})
712
+ # @param [Hash] params ({})
713
+ def create_ml_endpoint(params = {}, options = {})
714
+ req = build_request(:create_ml_endpoint, params)
715
+ req.send_request(options)
716
+ end
717
+
718
+ # Cancels the creation of a Neptune ML inference endpoint. See [Managing
719
+ # inference endpoints using the endpoints command][1].
720
+ #
721
+ #
722
+ #
723
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-endpoints.html
724
+ #
725
+ # @option params [required, String] :id
726
+ # The unique identifier of the inference endpoint.
727
+ #
728
+ # @option params [String] :neptune_iam_role_arn
729
+ # The ARN of an IAM role providing Neptune access to SageMaker and
730
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
731
+ # group or an error will be thrown.
732
+ #
733
+ # @option params [Boolean] :clean
734
+ # If this flag is set to `TRUE`, all Neptune ML S3 artifacts should be
735
+ # deleted when the job is stopped. The default is `FALSE`.
736
+ #
737
+ # @return [Types::DeleteMLEndpointOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
738
+ #
739
+ # * {Types::DeleteMLEndpointOutput#status #status} => String
740
+ #
741
+ # @example Request syntax with placeholder values
742
+ #
743
+ # resp = client.delete_ml_endpoint({
744
+ # id: "String", # required
745
+ # neptune_iam_role_arn: "String",
746
+ # clean: false,
747
+ # })
748
+ #
749
+ # @example Response structure
750
+ #
751
+ # resp.status #=> String
752
+ #
753
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/DeleteMLEndpoint AWS API Documentation
754
+ #
755
+ # @overload delete_ml_endpoint(params = {})
756
+ # @param [Hash] params ({})
757
+ def delete_ml_endpoint(params = {}, options = {})
758
+ req = build_request(:delete_ml_endpoint, params)
759
+ req.send_request(options)
760
+ end
761
+
762
+ # Deletes statistics for Gremlin and openCypher (property graph) data.
763
+ #
764
+ # @return [Types::DeletePropertygraphStatisticsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
765
+ #
766
+ # * {Types::DeletePropertygraphStatisticsOutput#status_code #status_code} => Integer
767
+ # * {Types::DeletePropertygraphStatisticsOutput#status #status} => String
768
+ # * {Types::DeletePropertygraphStatisticsOutput#payload #payload} => Types::DeleteStatisticsValueMap
769
+ #
770
+ # @example Response structure
771
+ #
772
+ # resp.status_code #=> Integer
773
+ # resp.status #=> String
774
+ # resp.payload.active #=> Boolean
775
+ # resp.payload.statistics_id #=> String
776
+ #
777
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/DeletePropertygraphStatistics AWS API Documentation
778
+ #
779
+ # @overload delete_propertygraph_statistics(params = {})
780
+ # @param [Hash] params ({})
781
+ def delete_propertygraph_statistics(params = {}, options = {})
782
+ req = build_request(:delete_propertygraph_statistics, params)
783
+ req.send_request(options)
784
+ end
785
+
786
+ # Deletes SPARQL statistics
787
+ #
788
+ # @return [Types::DeleteSparqlStatisticsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
789
+ #
790
+ # * {Types::DeleteSparqlStatisticsOutput#status_code #status_code} => Integer
791
+ # * {Types::DeleteSparqlStatisticsOutput#status #status} => String
792
+ # * {Types::DeleteSparqlStatisticsOutput#payload #payload} => Types::DeleteStatisticsValueMap
793
+ #
794
+ # @example Response structure
795
+ #
796
+ # resp.status_code #=> Integer
797
+ # resp.status #=> String
798
+ # resp.payload.active #=> Boolean
799
+ # resp.payload.statistics_id #=> String
800
+ #
801
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/DeleteSparqlStatistics AWS API Documentation
802
+ #
803
+ # @overload delete_sparql_statistics(params = {})
804
+ # @param [Hash] params ({})
805
+ def delete_sparql_statistics(params = {}, options = {})
806
+ req = build_request(:delete_sparql_statistics, params)
807
+ req.send_request(options)
808
+ end
809
+
810
+ # The fast reset REST API lets you reset a Neptune graph quicky and
811
+ # easily, removing all of its data.
812
+ #
813
+ # Neptune fast reset is a two-step process. First you call
814
+ # `ExecuteFastReset` with `action` set to `initiateDatabaseReset`. This
815
+ # returns a UUID token which you then include when calling
816
+ # `ExecuteFastReset` again with `action` set to `performDatabaseReset`.
817
+ # See [Empty an Amazon Neptune DB cluster using the fast reset API][1].
818
+ #
819
+ #
820
+ #
821
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/manage-console-fast-reset.html
822
+ #
823
+ # @option params [required, String] :action
824
+ # The fast reset action. One of the following values:
825
+ #
826
+ # * <b> <code>initiateDatabaseReset</code> </b>   –   This action
827
+ # generates a unique token needed to actually perform the fast reset.
828
+ #
829
+ # * <b> <code>performDatabaseReset</code> </b>   –   This action uses
830
+ # the token generated by the `initiateDatabaseReset` action to
831
+ # actually perform the fast reset.
832
+ #
833
+ # @option params [String] :token
834
+ # The fast-reset token to initiate the reset.
835
+ #
836
+ # @return [Types::ExecuteFastResetOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
837
+ #
838
+ # * {Types::ExecuteFastResetOutput#status #status} => String
839
+ # * {Types::ExecuteFastResetOutput#payload #payload} => Types::FastResetToken
840
+ #
841
+ # @example Request syntax with placeholder values
842
+ #
843
+ # resp = client.execute_fast_reset({
844
+ # action: "initiateDatabaseReset", # required, accepts initiateDatabaseReset, performDatabaseReset
845
+ # token: "String",
846
+ # })
847
+ #
848
+ # @example Response structure
849
+ #
850
+ # resp.status #=> String
851
+ # resp.payload.token #=> String
852
+ #
853
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ExecuteFastReset AWS API Documentation
854
+ #
855
+ # @overload execute_fast_reset(params = {})
856
+ # @param [Hash] params ({})
857
+ def execute_fast_reset(params = {}, options = {})
858
+ req = build_request(:execute_fast_reset, params)
859
+ req.send_request(options)
860
+ end
861
+
862
+ # Executes a Gremlin Explain query.
863
+ #
864
+ # Amazon Neptune has added a Gremlin feature named `explain` that
865
+ # provides is a self-service tool for understanding the execution
866
+ # approach being taken by the Neptune engine for the query. You invoke
867
+ # it by adding an `explain` parameter to an HTTP call that submits a
868
+ # Gremlin query.
869
+ #
870
+ # The explain feature provides information about the logical structure
871
+ # of query execution plans. You can use this information to identify
872
+ # potential evaluation and execution bottlenecks and to tune your query,
873
+ # as explained in [Tuning Gremlin queries][1]. You can also use query
874
+ # hints to improve query execution plans.
875
+ #
876
+ #
877
+ #
878
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/gremlin-traversal-tuning.html
879
+ #
880
+ # @option params [required, String] :gremlin_query
881
+ # The Gremlin explain query string.
882
+ #
883
+ # @return [Types::ExecuteGremlinExplainQueryOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
884
+ #
885
+ # * {Types::ExecuteGremlinExplainQueryOutput#output #output} => String
886
+ #
887
+ # @example Request syntax with placeholder values
888
+ #
889
+ # resp = client.execute_gremlin_explain_query({
890
+ # gremlin_query: "String", # required
891
+ # })
892
+ #
893
+ # @example Response structure
894
+ #
895
+ # resp.output #=> String
896
+ #
897
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ExecuteGremlinExplainQuery AWS API Documentation
898
+ #
899
+ # @overload execute_gremlin_explain_query(params = {})
900
+ # @param [Hash] params ({})
901
+ def execute_gremlin_explain_query(params = {}, options = {})
902
+ req = build_request(:execute_gremlin_explain_query, params)
903
+ req.send_request(options)
904
+ end
905
+
906
+ # Executes a Gremlin Profile query, which runs a specified traversal,
907
+ # collects various metrics about the run, and produces a profile report
908
+ # as output. See [Gremlin profile API in Neptune][1] for details.
909
+ #
910
+ #
911
+ #
912
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/gremlin-profile-api.html
913
+ #
914
+ # @option params [required, String] :gremlin_query
915
+ # The Gremlin query string to profile.
916
+ #
917
+ # @option params [Boolean] :results
918
+ # If this flag is set to `TRUE`, the query results are gathered and
919
+ # displayed as part of the profile report. If `FALSE`, only the result
920
+ # count is displayed.
921
+ #
922
+ # @option params [Integer] :chop
923
+ # If non-zero, causes the results string to be truncated at that number
924
+ # of characters. If set to zero, the string contains all the results.
925
+ #
926
+ # @option params [String] :serializer
927
+ # If non-null, the gathered results are returned in a serialized
928
+ # response message in the format specified by this parameter. See
929
+ # [Gremlin profile API in Neptune][1] for more information.
930
+ #
931
+ #
932
+ #
933
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/gremlin-profile-api.html
934
+ #
935
+ # @option params [Boolean] :index_ops
936
+ # If this flag is set to `TRUE`, the results include a detailed report
937
+ # of all index operations that took place during query execution and
938
+ # serialization.
939
+ #
940
+ # @return [Types::ExecuteGremlinProfileQueryOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
941
+ #
942
+ # * {Types::ExecuteGremlinProfileQueryOutput#output #output} => String
943
+ #
944
+ # @example Request syntax with placeholder values
945
+ #
946
+ # resp = client.execute_gremlin_profile_query({
947
+ # gremlin_query: "String", # required
948
+ # results: false,
949
+ # chop: 1,
950
+ # serializer: "String",
951
+ # index_ops: false,
952
+ # })
953
+ #
954
+ # @example Response structure
955
+ #
956
+ # resp.output #=> String
957
+ #
958
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ExecuteGremlinProfileQuery AWS API Documentation
959
+ #
960
+ # @overload execute_gremlin_profile_query(params = {})
961
+ # @param [Hash] params ({})
962
+ def execute_gremlin_profile_query(params = {}, options = {})
963
+ req = build_request(:execute_gremlin_profile_query, params)
964
+ req.send_request(options)
965
+ end
966
+
967
+ # This commands executes a Gremlin query. Amazon Neptune is compatible
968
+ # with Apache TinkerPop3 and Gremlin, so you can use the Gremlin
969
+ # traversal language to query the graph, as described under [The
970
+ # Graph][1] in the Apache TinkerPop3 documentation. More details can
971
+ # also be found in [Accessing a Neptune graph with Gremlin][2].
972
+ #
973
+ #
974
+ #
975
+ # [1]: https://tinkerpop.apache.org/docs/current/reference/#graph
976
+ # [2]: https://docs.aws.amazon.com/neptune/latest/userguide/access-graph-gremlin.html
977
+ #
978
+ # @option params [required, String] :gremlin_query
979
+ # Using this API, you can run Gremlin queries in string format much as
980
+ # you can using the HTTP endpoint. The interface is compatible with
981
+ # whatever Gremlin version your DB cluster is using (see the [Tinkerpop
982
+ # client section][1] to determine which Gremlin releases your engine
983
+ # version supports).
984
+ #
985
+ #
986
+ #
987
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/access-graph-gremlin-client.html#best-practices-gremlin-java-latest
988
+ #
989
+ # @option params [String] :serializer
990
+ # If non-null, the query results are returned in a serialized response
991
+ # message in the format specified by this parameter. See the
992
+ # [GraphSON][1] section in the TinkerPop documentation for a list of the
993
+ # formats that are currently supported.
994
+ #
995
+ #
996
+ #
997
+ # [1]: https://tinkerpop.apache.org/docs/current/reference/#_graphson
998
+ #
999
+ # @return [Types::ExecuteGremlinQueryOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1000
+ #
1001
+ # * {Types::ExecuteGremlinQueryOutput#request_id #request_id} => String
1002
+ # * {Types::ExecuteGremlinQueryOutput#status #status} => Types::GremlinQueryStatusAttributes
1003
+ # * {Types::ExecuteGremlinQueryOutput#result #result} => Hash,Array,String,Numeric,Boolean
1004
+ # * {Types::ExecuteGremlinQueryOutput#meta #meta} => Hash,Array,String,Numeric,Boolean
1005
+ #
1006
+ # @example Request syntax with placeholder values
1007
+ #
1008
+ # resp = client.execute_gremlin_query({
1009
+ # gremlin_query: "String", # required
1010
+ # serializer: "String",
1011
+ # })
1012
+ #
1013
+ # @example Response structure
1014
+ #
1015
+ # resp.request_id #=> String
1016
+ # resp.status.message #=> String
1017
+ # resp.status.code #=> Integer
1018
+ #
1019
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ExecuteGremlinQuery AWS API Documentation
1020
+ #
1021
+ # @overload execute_gremlin_query(params = {})
1022
+ # @param [Hash] params ({})
1023
+ def execute_gremlin_query(params = {}, options = {})
1024
+ req = build_request(:execute_gremlin_query, params)
1025
+ req.send_request(options)
1026
+ end
1027
+
1028
+ # Executes an openCypher `explain` request. See [The openCypher explain
1029
+ # feature][1] for more information.
1030
+ #
1031
+ #
1032
+ #
1033
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/access-graph-opencypher-explain.html
1034
+ #
1035
+ # @option params [required, String] :open_cypher_query
1036
+ # The openCypher query string.
1037
+ #
1038
+ # @option params [String] :parameters
1039
+ # The openCypher query parameters.
1040
+ #
1041
+ # @option params [required, String] :explain_mode
1042
+ # The openCypher `explain` mode. Can be one of: `static`, `dynamic`, or
1043
+ # `details`.
1044
+ #
1045
+ # @return [Types::ExecuteOpenCypherExplainQueryOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1046
+ #
1047
+ # * {Types::ExecuteOpenCypherExplainQueryOutput#results #results} => String
1048
+ #
1049
+ # @example Request syntax with placeholder values
1050
+ #
1051
+ # resp = client.execute_open_cypher_explain_query({
1052
+ # open_cypher_query: "String", # required
1053
+ # parameters: "String",
1054
+ # explain_mode: "static", # required, accepts static, dynamic, details
1055
+ # })
1056
+ #
1057
+ # @example Response structure
1058
+ #
1059
+ # resp.results #=> String
1060
+ #
1061
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ExecuteOpenCypherExplainQuery AWS API Documentation
1062
+ #
1063
+ # @overload execute_open_cypher_explain_query(params = {})
1064
+ # @param [Hash] params ({})
1065
+ def execute_open_cypher_explain_query(params = {}, options = {})
1066
+ req = build_request(:execute_open_cypher_explain_query, params)
1067
+ req.send_request(options)
1068
+ end
1069
+
1070
+ # Executes an openCypher query. See [Accessing the Neptune Graph with
1071
+ # openCypher][1] for more information.
1072
+ #
1073
+ # Neptune supports building graph applications using openCypher, which
1074
+ # is currently one of the most popular query languages among developers
1075
+ # working with graph databases. Developers, business analysts, and data
1076
+ # scientists like openCypher's declarative, SQL-inspired syntax because
1077
+ # it provides a familiar structure in which to querying property graphs.
1078
+ #
1079
+ # The openCypher language was originally developed by Neo4j, then
1080
+ # open-sourced in 2015 and contributed to the [openCypher project][2]
1081
+ # under an Apache 2 open-source license.
1082
+ #
1083
+ #
1084
+ #
1085
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/access-graph-opencypher.html
1086
+ # [2]: https://opencypher.org/
1087
+ #
1088
+ # @option params [required, String] :open_cypher_query
1089
+ # The openCypher query string to be executed.
1090
+ #
1091
+ # @option params [String] :parameters
1092
+ # The openCypher query parameters for query execution. See [Examples of
1093
+ # openCypher parameterized queries][1] for more information.
1094
+ #
1095
+ #
1096
+ #
1097
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/opencypher-parameterized-queries.html
1098
+ #
1099
+ # @return [Types::ExecuteOpenCypherQueryOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1100
+ #
1101
+ # * {Types::ExecuteOpenCypherQueryOutput#results #results} => Hash,Array,String,Numeric,Boolean
1102
+ #
1103
+ # @example Request syntax with placeholder values
1104
+ #
1105
+ # resp = client.execute_open_cypher_query({
1106
+ # open_cypher_query: "String", # required
1107
+ # parameters: "String",
1108
+ # })
1109
+ #
1110
+ # @example Response structure
1111
+ #
1112
+ #
1113
+ #
1114
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ExecuteOpenCypherQuery AWS API Documentation
1115
+ #
1116
+ # @overload execute_open_cypher_query(params = {})
1117
+ # @param [Hash] params ({})
1118
+ def execute_open_cypher_query(params = {}, options = {})
1119
+ req = build_request(:execute_open_cypher_query, params)
1120
+ req.send_request(options)
1121
+ end
1122
+
1123
+ # Check the status of the graph database on the host.
1124
+ #
1125
+ # @return [Types::GetEngineStatusOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1126
+ #
1127
+ # * {Types::GetEngineStatusOutput#status #status} => String
1128
+ # * {Types::GetEngineStatusOutput#start_time #start_time} => String
1129
+ # * {Types::GetEngineStatusOutput#db_engine_version #db_engine_version} => String
1130
+ # * {Types::GetEngineStatusOutput#role #role} => String
1131
+ # * {Types::GetEngineStatusOutput#dfe_query_engine #dfe_query_engine} => String
1132
+ # * {Types::GetEngineStatusOutput#gremlin #gremlin} => Types::QueryLanguageVersion
1133
+ # * {Types::GetEngineStatusOutput#sparql #sparql} => Types::QueryLanguageVersion
1134
+ # * {Types::GetEngineStatusOutput#opencypher #opencypher} => Types::QueryLanguageVersion
1135
+ # * {Types::GetEngineStatusOutput#lab_mode #lab_mode} => Hash&lt;String,String&gt;
1136
+ # * {Types::GetEngineStatusOutput#rolling_back_trx_count #rolling_back_trx_count} => Integer
1137
+ # * {Types::GetEngineStatusOutput#rolling_back_trx_earliest_start_time #rolling_back_trx_earliest_start_time} => String
1138
+ # * {Types::GetEngineStatusOutput#features #features} => Hash&lt;String,Hash,Array,String,Numeric,Boolean&gt;
1139
+ # * {Types::GetEngineStatusOutput#settings #settings} => Hash&lt;String,String&gt;
1140
+ #
1141
+ # @example Response structure
1142
+ #
1143
+ # resp.status #=> String
1144
+ # resp.start_time #=> String
1145
+ # resp.db_engine_version #=> String
1146
+ # resp.role #=> String
1147
+ # resp.dfe_query_engine #=> String
1148
+ # resp.gremlin.version #=> String
1149
+ # resp.sparql.version #=> String
1150
+ # resp.opencypher.version #=> String
1151
+ # resp.lab_mode #=> Hash
1152
+ # resp.lab_mode["String"] #=> String
1153
+ # resp.rolling_back_trx_count #=> Integer
1154
+ # resp.rolling_back_trx_earliest_start_time #=> String
1155
+ # resp.features #=> Hash
1156
+ # resp.settings #=> Hash
1157
+ # resp.settings["String"] #=> String
1158
+ #
1159
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetEngineStatus AWS API Documentation
1160
+ #
1161
+ # @overload get_engine_status(params = {})
1162
+ # @param [Hash] params ({})
1163
+ def get_engine_status(params = {}, options = {})
1164
+ req = build_request(:get_engine_status, params)
1165
+ req.send_request(options)
1166
+ end
1167
+
1168
+ # Gets the status of a specified Gremlin query.
1169
+ #
1170
+ # @option params [required, String] :query_id
1171
+ # The unique identifier that identifies the Gremlin query.
1172
+ #
1173
+ # @return [Types::GetGremlinQueryStatusOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1174
+ #
1175
+ # * {Types::GetGremlinQueryStatusOutput#query_id #query_id} => String
1176
+ # * {Types::GetGremlinQueryStatusOutput#query_string #query_string} => String
1177
+ # * {Types::GetGremlinQueryStatusOutput#query_eval_stats #query_eval_stats} => Types::QueryEvalStats
1178
+ #
1179
+ # @example Request syntax with placeholder values
1180
+ #
1181
+ # resp = client.get_gremlin_query_status({
1182
+ # query_id: "String", # required
1183
+ # })
1184
+ #
1185
+ # @example Response structure
1186
+ #
1187
+ # resp.query_id #=> String
1188
+ # resp.query_string #=> String
1189
+ # resp.query_eval_stats.waited #=> Integer
1190
+ # resp.query_eval_stats.elapsed #=> Integer
1191
+ # resp.query_eval_stats.cancelled #=> Boolean
1192
+ #
1193
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetGremlinQueryStatus AWS API Documentation
1194
+ #
1195
+ # @overload get_gremlin_query_status(params = {})
1196
+ # @param [Hash] params ({})
1197
+ def get_gremlin_query_status(params = {}, options = {})
1198
+ req = build_request(:get_gremlin_query_status, params)
1199
+ req.send_request(options)
1200
+ end
1201
+
1202
+ # Gets status information about a specified load job. Neptune keeps
1203
+ # track of the most recent 1,024 bulk load jobs, and stores the last
1204
+ # 10,000 error details per job.
1205
+ #
1206
+ # See [Neptune Loader Get-Status API][1] for more information.
1207
+ #
1208
+ #
1209
+ #
1210
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/load-api-reference-status.htm
1211
+ #
1212
+ # @option params [required, String] :load_id
1213
+ # The load ID of the load job to get the status of.
1214
+ #
1215
+ # @option params [Boolean] :details
1216
+ # Flag indicating whether or not to include details beyond the overall
1217
+ # status (`TRUE` or `FALSE`; the default is `FALSE`).
1218
+ #
1219
+ # @option params [Boolean] :errors
1220
+ # Flag indicating whether or not to include a list of errors encountered
1221
+ # (`TRUE` or `FALSE`; the default is `FALSE`).
1222
+ #
1223
+ # The list of errors is paged. The `page` and `errorsPerPage` parameters
1224
+ # allow you to page through all the errors.
1225
+ #
1226
+ # @option params [Integer] :page
1227
+ # The error page number (a positive integer; the default is `1`). Only
1228
+ # valid when the `errors` parameter is set to `TRUE`.
1229
+ #
1230
+ # @option params [Integer] :errors_per_page
1231
+ # The number of errors returned in each page (a positive integer; the
1232
+ # default is `10`). Only valid when the `errors` parameter set to
1233
+ # `TRUE`.
1234
+ #
1235
+ # @return [Types::GetLoaderJobStatusOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1236
+ #
1237
+ # * {Types::GetLoaderJobStatusOutput#status #status} => String
1238
+ # * {Types::GetLoaderJobStatusOutput#payload #payload} => Hash,Array,String,Numeric,Boolean
1239
+ #
1240
+ # @example Request syntax with placeholder values
1241
+ #
1242
+ # resp = client.get_loader_job_status({
1243
+ # load_id: "String", # required
1244
+ # details: false,
1245
+ # errors: false,
1246
+ # page: 1,
1247
+ # errors_per_page: 1,
1248
+ # })
1249
+ #
1250
+ # @example Response structure
1251
+ #
1252
+ # resp.status #=> String
1253
+ #
1254
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetLoaderJobStatus AWS API Documentation
1255
+ #
1256
+ # @overload get_loader_job_status(params = {})
1257
+ # @param [Hash] params ({})
1258
+ def get_loader_job_status(params = {}, options = {})
1259
+ req = build_request(:get_loader_job_status, params)
1260
+ req.send_request(options)
1261
+ end
1262
+
1263
+ # Retrieves information about a specified data processing job. See [The
1264
+ # `dataprocessing` command][1].
1265
+ #
1266
+ #
1267
+ #
1268
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-dataprocessing.html
1269
+ #
1270
+ # @option params [required, String] :id
1271
+ # The unique identifier of the data-processing job to be retrieved.
1272
+ #
1273
+ # @option params [String] :neptune_iam_role_arn
1274
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
1275
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
1276
+ # group or an error will occur.
1277
+ #
1278
+ # @return [Types::GetMLDataProcessingJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1279
+ #
1280
+ # * {Types::GetMLDataProcessingJobOutput#status #status} => String
1281
+ # * {Types::GetMLDataProcessingJobOutput#id #id} => String
1282
+ # * {Types::GetMLDataProcessingJobOutput#processing_job #processing_job} => Types::MlResourceDefinition
1283
+ #
1284
+ # @example Request syntax with placeholder values
1285
+ #
1286
+ # resp = client.get_ml_data_processing_job({
1287
+ # id: "String", # required
1288
+ # neptune_iam_role_arn: "String",
1289
+ # })
1290
+ #
1291
+ # @example Response structure
1292
+ #
1293
+ # resp.status #=> String
1294
+ # resp.id #=> String
1295
+ # resp.processing_job.name #=> String
1296
+ # resp.processing_job.arn #=> String
1297
+ # resp.processing_job.status #=> String
1298
+ # resp.processing_job.output_location #=> String
1299
+ # resp.processing_job.failure_reason #=> String
1300
+ # resp.processing_job.cloudwatch_log_url #=> String
1301
+ #
1302
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetMLDataProcessingJob AWS API Documentation
1303
+ #
1304
+ # @overload get_ml_data_processing_job(params = {})
1305
+ # @param [Hash] params ({})
1306
+ def get_ml_data_processing_job(params = {}, options = {})
1307
+ req = build_request(:get_ml_data_processing_job, params)
1308
+ req.send_request(options)
1309
+ end
1310
+
1311
+ # Retrieves details about an inference endpoint. See [Managing inference
1312
+ # endpoints using the endpoints command][1].
1313
+ #
1314
+ #
1315
+ #
1316
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-endpoints.html
1317
+ #
1318
+ # @option params [required, String] :id
1319
+ # The unique identifier of the inference endpoint.
1320
+ #
1321
+ # @option params [String] :neptune_iam_role_arn
1322
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
1323
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
1324
+ # group or an error will occur.
1325
+ #
1326
+ # @return [Types::GetMLEndpointOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1327
+ #
1328
+ # * {Types::GetMLEndpointOutput#status #status} => String
1329
+ # * {Types::GetMLEndpointOutput#id #id} => String
1330
+ # * {Types::GetMLEndpointOutput#endpoint #endpoint} => Types::MlResourceDefinition
1331
+ # * {Types::GetMLEndpointOutput#endpoint_config #endpoint_config} => Types::MlConfigDefinition
1332
+ #
1333
+ # @example Request syntax with placeholder values
1334
+ #
1335
+ # resp = client.get_ml_endpoint({
1336
+ # id: "String", # required
1337
+ # neptune_iam_role_arn: "String",
1338
+ # })
1339
+ #
1340
+ # @example Response structure
1341
+ #
1342
+ # resp.status #=> String
1343
+ # resp.id #=> String
1344
+ # resp.endpoint.name #=> String
1345
+ # resp.endpoint.arn #=> String
1346
+ # resp.endpoint.status #=> String
1347
+ # resp.endpoint.output_location #=> String
1348
+ # resp.endpoint.failure_reason #=> String
1349
+ # resp.endpoint.cloudwatch_log_url #=> String
1350
+ # resp.endpoint_config.name #=> String
1351
+ # resp.endpoint_config.arn #=> String
1352
+ #
1353
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetMLEndpoint AWS API Documentation
1354
+ #
1355
+ # @overload get_ml_endpoint(params = {})
1356
+ # @param [Hash] params ({})
1357
+ def get_ml_endpoint(params = {}, options = {})
1358
+ req = build_request(:get_ml_endpoint, params)
1359
+ req.send_request(options)
1360
+ end
1361
+
1362
+ # Retrieves information about a Neptune ML model training job. See
1363
+ # [Model training using the `modeltraining` command][1].
1364
+ #
1365
+ #
1366
+ #
1367
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-modeltraining.html
1368
+ #
1369
+ # @option params [required, String] :id
1370
+ # The unique identifier of the model-training job to retrieve.
1371
+ #
1372
+ # @option params [String] :neptune_iam_role_arn
1373
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
1374
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
1375
+ # group or an error will occur.
1376
+ #
1377
+ # @return [Types::GetMLModelTrainingJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1378
+ #
1379
+ # * {Types::GetMLModelTrainingJobOutput#status #status} => String
1380
+ # * {Types::GetMLModelTrainingJobOutput#id #id} => String
1381
+ # * {Types::GetMLModelTrainingJobOutput#processing_job #processing_job} => Types::MlResourceDefinition
1382
+ # * {Types::GetMLModelTrainingJobOutput#hpo_job #hpo_job} => Types::MlResourceDefinition
1383
+ # * {Types::GetMLModelTrainingJobOutput#model_transform_job #model_transform_job} => Types::MlResourceDefinition
1384
+ # * {Types::GetMLModelTrainingJobOutput#ml_models #ml_models} => Array&lt;Types::MlConfigDefinition&gt;
1385
+ #
1386
+ # @example Request syntax with placeholder values
1387
+ #
1388
+ # resp = client.get_ml_model_training_job({
1389
+ # id: "String", # required
1390
+ # neptune_iam_role_arn: "String",
1391
+ # })
1392
+ #
1393
+ # @example Response structure
1394
+ #
1395
+ # resp.status #=> String
1396
+ # resp.id #=> String
1397
+ # resp.processing_job.name #=> String
1398
+ # resp.processing_job.arn #=> String
1399
+ # resp.processing_job.status #=> String
1400
+ # resp.processing_job.output_location #=> String
1401
+ # resp.processing_job.failure_reason #=> String
1402
+ # resp.processing_job.cloudwatch_log_url #=> String
1403
+ # resp.hpo_job.name #=> String
1404
+ # resp.hpo_job.arn #=> String
1405
+ # resp.hpo_job.status #=> String
1406
+ # resp.hpo_job.output_location #=> String
1407
+ # resp.hpo_job.failure_reason #=> String
1408
+ # resp.hpo_job.cloudwatch_log_url #=> String
1409
+ # resp.model_transform_job.name #=> String
1410
+ # resp.model_transform_job.arn #=> String
1411
+ # resp.model_transform_job.status #=> String
1412
+ # resp.model_transform_job.output_location #=> String
1413
+ # resp.model_transform_job.failure_reason #=> String
1414
+ # resp.model_transform_job.cloudwatch_log_url #=> String
1415
+ # resp.ml_models #=> Array
1416
+ # resp.ml_models[0].name #=> String
1417
+ # resp.ml_models[0].arn #=> String
1418
+ #
1419
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetMLModelTrainingJob AWS API Documentation
1420
+ #
1421
+ # @overload get_ml_model_training_job(params = {})
1422
+ # @param [Hash] params ({})
1423
+ def get_ml_model_training_job(params = {}, options = {})
1424
+ req = build_request(:get_ml_model_training_job, params)
1425
+ req.send_request(options)
1426
+ end
1427
+
1428
+ # Gets information about a specified model transform job. See [Use a
1429
+ # trained model to generate new model artifacts][1].
1430
+ #
1431
+ #
1432
+ #
1433
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-model-transform.html
1434
+ #
1435
+ # @option params [required, String] :id
1436
+ # The unique identifier of the model-transform job to be reetrieved.
1437
+ #
1438
+ # @option params [String] :neptune_iam_role_arn
1439
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
1440
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
1441
+ # group or an error will occur.
1442
+ #
1443
+ # @return [Types::GetMLModelTransformJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1444
+ #
1445
+ # * {Types::GetMLModelTransformJobOutput#status #status} => String
1446
+ # * {Types::GetMLModelTransformJobOutput#id #id} => String
1447
+ # * {Types::GetMLModelTransformJobOutput#base_processing_job #base_processing_job} => Types::MlResourceDefinition
1448
+ # * {Types::GetMLModelTransformJobOutput#remote_model_transform_job #remote_model_transform_job} => Types::MlResourceDefinition
1449
+ # * {Types::GetMLModelTransformJobOutput#models #models} => Array&lt;Types::MlConfigDefinition&gt;
1450
+ #
1451
+ # @example Request syntax with placeholder values
1452
+ #
1453
+ # resp = client.get_ml_model_transform_job({
1454
+ # id: "String", # required
1455
+ # neptune_iam_role_arn: "String",
1456
+ # })
1457
+ #
1458
+ # @example Response structure
1459
+ #
1460
+ # resp.status #=> String
1461
+ # resp.id #=> String
1462
+ # resp.base_processing_job.name #=> String
1463
+ # resp.base_processing_job.arn #=> String
1464
+ # resp.base_processing_job.status #=> String
1465
+ # resp.base_processing_job.output_location #=> String
1466
+ # resp.base_processing_job.failure_reason #=> String
1467
+ # resp.base_processing_job.cloudwatch_log_url #=> String
1468
+ # resp.remote_model_transform_job.name #=> String
1469
+ # resp.remote_model_transform_job.arn #=> String
1470
+ # resp.remote_model_transform_job.status #=> String
1471
+ # resp.remote_model_transform_job.output_location #=> String
1472
+ # resp.remote_model_transform_job.failure_reason #=> String
1473
+ # resp.remote_model_transform_job.cloudwatch_log_url #=> String
1474
+ # resp.models #=> Array
1475
+ # resp.models[0].name #=> String
1476
+ # resp.models[0].arn #=> String
1477
+ #
1478
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetMLModelTransformJob AWS API Documentation
1479
+ #
1480
+ # @overload get_ml_model_transform_job(params = {})
1481
+ # @param [Hash] params ({})
1482
+ def get_ml_model_transform_job(params = {}, options = {})
1483
+ req = build_request(:get_ml_model_transform_job, params)
1484
+ req.send_request(options)
1485
+ end
1486
+
1487
+ # Retrieves the status of a specified openCypher query.
1488
+ #
1489
+ # @option params [required, String] :query_id
1490
+ # The unique ID of the openCypher query for which to retrieve the query
1491
+ # status.
1492
+ #
1493
+ # @return [Types::GetOpenCypherQueryStatusOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1494
+ #
1495
+ # * {Types::GetOpenCypherQueryStatusOutput#query_id #query_id} => String
1496
+ # * {Types::GetOpenCypherQueryStatusOutput#query_string #query_string} => String
1497
+ # * {Types::GetOpenCypherQueryStatusOutput#query_eval_stats #query_eval_stats} => Types::QueryEvalStats
1498
+ #
1499
+ # @example Request syntax with placeholder values
1500
+ #
1501
+ # resp = client.get_open_cypher_query_status({
1502
+ # query_id: "String", # required
1503
+ # })
1504
+ #
1505
+ # @example Response structure
1506
+ #
1507
+ # resp.query_id #=> String
1508
+ # resp.query_string #=> String
1509
+ # resp.query_eval_stats.waited #=> Integer
1510
+ # resp.query_eval_stats.elapsed #=> Integer
1511
+ # resp.query_eval_stats.cancelled #=> Boolean
1512
+ #
1513
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetOpenCypherQueryStatus AWS API Documentation
1514
+ #
1515
+ # @overload get_open_cypher_query_status(params = {})
1516
+ # @param [Hash] params ({})
1517
+ def get_open_cypher_query_status(params = {}, options = {})
1518
+ req = build_request(:get_open_cypher_query_status, params)
1519
+ req.send_request(options)
1520
+ end
1521
+
1522
+ # Gets property graph statistics (Gremlin and openCypher).
1523
+ #
1524
+ # @return [Types::GetPropertygraphStatisticsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1525
+ #
1526
+ # * {Types::GetPropertygraphStatisticsOutput#status #status} => String
1527
+ # * {Types::GetPropertygraphStatisticsOutput#payload #payload} => Types::Statistics
1528
+ #
1529
+ # @example Response structure
1530
+ #
1531
+ # resp.status #=> String
1532
+ # resp.payload.auto_compute #=> Boolean
1533
+ # resp.payload.active #=> Boolean
1534
+ # resp.payload.statistics_id #=> String
1535
+ # resp.payload.date #=> Time
1536
+ # resp.payload.note #=> String
1537
+ # resp.payload.signature_info.signature_count #=> Integer
1538
+ # resp.payload.signature_info.instance_count #=> Integer
1539
+ # resp.payload.signature_info.predicate_count #=> Integer
1540
+ #
1541
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetPropertygraphStatistics AWS API Documentation
1542
+ #
1543
+ # @overload get_propertygraph_statistics(params = {})
1544
+ # @param [Hash] params ({})
1545
+ def get_propertygraph_statistics(params = {}, options = {})
1546
+ req = build_request(:get_propertygraph_statistics, params)
1547
+ req.send_request(options)
1548
+ end
1549
+
1550
+ # Gets a stream for a property graph.
1551
+ #
1552
+ # With the Neptune Streams feature, you can generate a complete sequence
1553
+ # of change-log entries that record every change made to your graph data
1554
+ # as it happens. `GetPropertygraphStream` lets you collect these
1555
+ # change-log entries for a property graph.
1556
+ #
1557
+ # The Neptune streams feature needs to be enabled on your Neptune
1558
+ # DBcluster. To enable streams, set the [neptune\_streams][1] DB cluster
1559
+ # parameter to `1`.
1560
+ #
1561
+ # See [Capturing graph changes in real time using Neptune streams][2].
1562
+ #
1563
+ #
1564
+ #
1565
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/parameters.html#parameters-db-cluster-parameters-neptune_streams
1566
+ # [2]: https://docs.aws.amazon.com/neptune/latest/userguide/streams.html
1567
+ #
1568
+ # @option params [Integer] :limit
1569
+ # Specifies the maximum number of records to return. There is also a
1570
+ # size limit of 10 MB on the response that can't be modified and that
1571
+ # takes precedence over the number of records specified in the `limit`
1572
+ # parameter. The response does include a threshold-breaching record if
1573
+ # the 10 MB limit was reached.
1574
+ #
1575
+ # The range for `limit` is 1 to 100,000, with a default of 10.
1576
+ #
1577
+ # @option params [String] :iterator_type
1578
+ # Can be one of:
1579
+ #
1580
+ # * `AT_SEQUENCE_NUMBER`   –   Indicates that reading should start from
1581
+ # the event sequence number specified jointly by the `commitNum` and
1582
+ # `opNum` parameters.
1583
+ #
1584
+ # * `AFTER_SEQUENCE_NUMBER`   –   Indicates that reading should start
1585
+ # right after the event sequence number specified jointly by the
1586
+ # `commitNum` and `opNum` parameters.
1587
+ #
1588
+ # * `TRIM_HORIZON`   –   Indicates that reading should start at the last
1589
+ # untrimmed record in the system, which is the oldest unexpired (not
1590
+ # yet deleted) record in the change-log stream.
1591
+ #
1592
+ # * `LATEST`   –   Indicates that reading should start at the most
1593
+ # recent record in the system, which is the latest unexpired (not yet
1594
+ # deleted) record in the change-log stream.
1595
+ #
1596
+ # @option params [Integer] :commit_num
1597
+ # The commit number of the starting record to read from the change-log
1598
+ # stream. This parameter is required when `iteratorType`
1599
+ # is`AT_SEQUENCE_NUMBER` or `AFTER_SEQUENCE_NUMBER`, and ignored when
1600
+ # `iteratorType` is `TRIM_HORIZON` or `LATEST`.
1601
+ #
1602
+ # @option params [Integer] :op_num
1603
+ # The operation sequence number within the specified commit to start
1604
+ # reading from in the change-log stream data. The default is `1`.
1605
+ #
1606
+ # @option params [String] :encoding
1607
+ # If set to TRUE, Neptune compresses the response using gzip encoding.
1608
+ #
1609
+ # @return [Types::GetPropertygraphStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1610
+ #
1611
+ # * {Types::GetPropertygraphStreamOutput#last_event_id #last_event_id} => Hash&lt;String,String&gt;
1612
+ # * {Types::GetPropertygraphStreamOutput#last_trx_timestamp_in_millis #last_trx_timestamp_in_millis} => Integer
1613
+ # * {Types::GetPropertygraphStreamOutput#format #format} => String
1614
+ # * {Types::GetPropertygraphStreamOutput#records #records} => Array&lt;Types::PropertygraphRecord&gt;
1615
+ # * {Types::GetPropertygraphStreamOutput#total_records #total_records} => Integer
1616
+ #
1617
+ # @example Request syntax with placeholder values
1618
+ #
1619
+ # resp = client.get_propertygraph_stream({
1620
+ # limit: 1,
1621
+ # iterator_type: "AT_SEQUENCE_NUMBER", # accepts AT_SEQUENCE_NUMBER, AFTER_SEQUENCE_NUMBER, TRIM_HORIZON, LATEST
1622
+ # commit_num: 1,
1623
+ # op_num: 1,
1624
+ # encoding: "gzip", # accepts gzip
1625
+ # })
1626
+ #
1627
+ # @example Response structure
1628
+ #
1629
+ # resp.last_event_id #=> Hash
1630
+ # resp.last_event_id["String"] #=> String
1631
+ # resp.last_trx_timestamp_in_millis #=> Integer
1632
+ # resp.format #=> String
1633
+ # resp.records #=> Array
1634
+ # resp.records[0].commit_timestamp_in_millis #=> Integer
1635
+ # resp.records[0].event_id #=> Hash
1636
+ # resp.records[0].event_id["String"] #=> String
1637
+ # resp.records[0].data.id #=> String
1638
+ # resp.records[0].data.type #=> String
1639
+ # resp.records[0].data.key #=> String
1640
+ # resp.records[0].data.from #=> String
1641
+ # resp.records[0].data.to #=> String
1642
+ # resp.records[0].op #=> String
1643
+ # resp.records[0].is_last_op #=> Boolean
1644
+ # resp.total_records #=> Integer
1645
+ #
1646
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetPropertygraphStream AWS API Documentation
1647
+ #
1648
+ # @overload get_propertygraph_stream(params = {})
1649
+ # @param [Hash] params ({})
1650
+ def get_propertygraph_stream(params = {}, options = {})
1651
+ req = build_request(:get_propertygraph_stream, params)
1652
+ req.send_request(options)
1653
+ end
1654
+
1655
+ # Gets a graph summary for a property graph.
1656
+ #
1657
+ # @option params [String] :mode
1658
+ # Mode can take one of two values: `BASIC` (the default), and
1659
+ # `DETAILED`.
1660
+ #
1661
+ # @return [Types::GetPropertygraphSummaryOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1662
+ #
1663
+ # * {Types::GetPropertygraphSummaryOutput#status_code #status_code} => Integer
1664
+ # * {Types::GetPropertygraphSummaryOutput#payload #payload} => Types::PropertygraphSummaryValueMap
1665
+ #
1666
+ # @example Request syntax with placeholder values
1667
+ #
1668
+ # resp = client.get_propertygraph_summary({
1669
+ # mode: "basic", # accepts basic, detailed
1670
+ # })
1671
+ #
1672
+ # @example Response structure
1673
+ #
1674
+ # resp.status_code #=> Integer
1675
+ # resp.payload.version #=> String
1676
+ # resp.payload.last_statistics_computation_time #=> Time
1677
+ # resp.payload.graph_summary.num_nodes #=> Integer
1678
+ # resp.payload.graph_summary.num_edges #=> Integer
1679
+ # resp.payload.graph_summary.num_node_labels #=> Integer
1680
+ # resp.payload.graph_summary.num_edge_labels #=> Integer
1681
+ # resp.payload.graph_summary.node_labels #=> Array
1682
+ # resp.payload.graph_summary.node_labels[0] #=> String
1683
+ # resp.payload.graph_summary.edge_labels #=> Array
1684
+ # resp.payload.graph_summary.edge_labels[0] #=> String
1685
+ # resp.payload.graph_summary.num_node_properties #=> Integer
1686
+ # resp.payload.graph_summary.num_edge_properties #=> Integer
1687
+ # resp.payload.graph_summary.node_properties #=> Array
1688
+ # resp.payload.graph_summary.node_properties[0] #=> Hash
1689
+ # resp.payload.graph_summary.node_properties[0]["String"] #=> Integer
1690
+ # resp.payload.graph_summary.edge_properties #=> Array
1691
+ # resp.payload.graph_summary.edge_properties[0] #=> Hash
1692
+ # resp.payload.graph_summary.edge_properties[0]["String"] #=> Integer
1693
+ # resp.payload.graph_summary.total_node_property_values #=> Integer
1694
+ # resp.payload.graph_summary.total_edge_property_values #=> Integer
1695
+ # resp.payload.graph_summary.node_structures #=> Array
1696
+ # resp.payload.graph_summary.node_structures[0].count #=> Integer
1697
+ # resp.payload.graph_summary.node_structures[0].node_properties #=> Array
1698
+ # resp.payload.graph_summary.node_structures[0].node_properties[0] #=> String
1699
+ # resp.payload.graph_summary.node_structures[0].distinct_outgoing_edge_labels #=> Array
1700
+ # resp.payload.graph_summary.node_structures[0].distinct_outgoing_edge_labels[0] #=> String
1701
+ # resp.payload.graph_summary.edge_structures #=> Array
1702
+ # resp.payload.graph_summary.edge_structures[0].count #=> Integer
1703
+ # resp.payload.graph_summary.edge_structures[0].edge_properties #=> Array
1704
+ # resp.payload.graph_summary.edge_structures[0].edge_properties[0] #=> String
1705
+ #
1706
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetPropertygraphSummary AWS API Documentation
1707
+ #
1708
+ # @overload get_propertygraph_summary(params = {})
1709
+ # @param [Hash] params ({})
1710
+ def get_propertygraph_summary(params = {}, options = {})
1711
+ req = build_request(:get_propertygraph_summary, params)
1712
+ req.send_request(options)
1713
+ end
1714
+
1715
+ # Gets a graph summary for an RDF graph.
1716
+ #
1717
+ # @option params [String] :mode
1718
+ # Mode can take one of two values: `BASIC` (the default), and
1719
+ # `DETAILED`.
1720
+ #
1721
+ # @return [Types::GetRDFGraphSummaryOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1722
+ #
1723
+ # * {Types::GetRDFGraphSummaryOutput#status_code #status_code} => Integer
1724
+ # * {Types::GetRDFGraphSummaryOutput#payload #payload} => Types::RDFGraphSummaryValueMap
1725
+ #
1726
+ # @example Request syntax with placeholder values
1727
+ #
1728
+ # resp = client.get_rdf_graph_summary({
1729
+ # mode: "basic", # accepts basic, detailed
1730
+ # })
1731
+ #
1732
+ # @example Response structure
1733
+ #
1734
+ # resp.status_code #=> Integer
1735
+ # resp.payload.version #=> String
1736
+ # resp.payload.last_statistics_computation_time #=> Time
1737
+ # resp.payload.graph_summary.num_distinct_subjects #=> Integer
1738
+ # resp.payload.graph_summary.num_distinct_predicates #=> Integer
1739
+ # resp.payload.graph_summary.num_quads #=> Integer
1740
+ # resp.payload.graph_summary.num_classes #=> Integer
1741
+ # resp.payload.graph_summary.classes #=> Array
1742
+ # resp.payload.graph_summary.classes[0] #=> String
1743
+ # resp.payload.graph_summary.predicates #=> Array
1744
+ # resp.payload.graph_summary.predicates[0] #=> Hash
1745
+ # resp.payload.graph_summary.predicates[0]["String"] #=> Integer
1746
+ # resp.payload.graph_summary.subject_structures #=> Array
1747
+ # resp.payload.graph_summary.subject_structures[0].count #=> Integer
1748
+ # resp.payload.graph_summary.subject_structures[0].predicates #=> Array
1749
+ # resp.payload.graph_summary.subject_structures[0].predicates[0] #=> String
1750
+ #
1751
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetRDFGraphSummary AWS API Documentation
1752
+ #
1753
+ # @overload get_rdf_graph_summary(params = {})
1754
+ # @param [Hash] params ({})
1755
+ def get_rdf_graph_summary(params = {}, options = {})
1756
+ req = build_request(:get_rdf_graph_summary, params)
1757
+ req.send_request(options)
1758
+ end
1759
+
1760
+ # Gets RDF statistics (SPARQL).
1761
+ #
1762
+ # @return [Types::GetSparqlStatisticsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1763
+ #
1764
+ # * {Types::GetSparqlStatisticsOutput#status #status} => String
1765
+ # * {Types::GetSparqlStatisticsOutput#payload #payload} => Types::Statistics
1766
+ #
1767
+ # @example Response structure
1768
+ #
1769
+ # resp.status #=> String
1770
+ # resp.payload.auto_compute #=> Boolean
1771
+ # resp.payload.active #=> Boolean
1772
+ # resp.payload.statistics_id #=> String
1773
+ # resp.payload.date #=> Time
1774
+ # resp.payload.note #=> String
1775
+ # resp.payload.signature_info.signature_count #=> Integer
1776
+ # resp.payload.signature_info.instance_count #=> Integer
1777
+ # resp.payload.signature_info.predicate_count #=> Integer
1778
+ #
1779
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetSparqlStatistics AWS API Documentation
1780
+ #
1781
+ # @overload get_sparql_statistics(params = {})
1782
+ # @param [Hash] params ({})
1783
+ def get_sparql_statistics(params = {}, options = {})
1784
+ req = build_request(:get_sparql_statistics, params)
1785
+ req.send_request(options)
1786
+ end
1787
+
1788
+ # Gets a stream for an RDF graph.
1789
+ #
1790
+ # With the Neptune Streams feature, you can generate a complete sequence
1791
+ # of change-log entries that record every change made to your graph data
1792
+ # as it happens. `GetSparqlStream` lets you collect these change-log
1793
+ # entries for an RDF graph.
1794
+ #
1795
+ # The Neptune streams feature needs to be enabled on your Neptune
1796
+ # DBcluster. To enable streams, set the [neptune\_streams][1] DB cluster
1797
+ # parameter to `1`.
1798
+ #
1799
+ # See [Capturing graph changes in real time using Neptune streams][2].
1800
+ #
1801
+ #
1802
+ #
1803
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/parameters.html#parameters-db-cluster-parameters-neptune_streams
1804
+ # [2]: https://docs.aws.amazon.com/neptune/latest/userguide/streams.html
1805
+ #
1806
+ # @option params [Integer] :limit
1807
+ # Specifies the maximum number of records to return. There is also a
1808
+ # size limit of 10 MB on the response that can't be modified and that
1809
+ # takes precedence over the number of records specified in the `limit`
1810
+ # parameter. The response does include a threshold-breaching record if
1811
+ # the 10 MB limit was reached.
1812
+ #
1813
+ # The range for `limit` is 1 to 100,000, with a default of 10.
1814
+ #
1815
+ # @option params [String] :iterator_type
1816
+ # Can be one of:
1817
+ #
1818
+ # * `AT_SEQUENCE_NUMBER`   –   Indicates that reading should start from
1819
+ # the event sequence number specified jointly by the `commitNum` and
1820
+ # `opNum` parameters.
1821
+ #
1822
+ # * `AFTER_SEQUENCE_NUMBER`   –   Indicates that reading should start
1823
+ # right after the event sequence number specified jointly by the
1824
+ # `commitNum` and `opNum` parameters.
1825
+ #
1826
+ # * `TRIM_HORIZON`   –   Indicates that reading should start at the last
1827
+ # untrimmed record in the system, which is the oldest unexpired (not
1828
+ # yet deleted) record in the change-log stream.
1829
+ #
1830
+ # * `LATEST`   –   Indicates that reading should start at the most
1831
+ # recent record in the system, which is the latest unexpired (not yet
1832
+ # deleted) record in the change-log stream.
1833
+ #
1834
+ # @option params [Integer] :commit_num
1835
+ # The commit number of the starting record to read from the change-log
1836
+ # stream. This parameter is required when `iteratorType`
1837
+ # is`AT_SEQUENCE_NUMBER` or `AFTER_SEQUENCE_NUMBER`, and ignored when
1838
+ # `iteratorType` is `TRIM_HORIZON` or `LATEST`.
1839
+ #
1840
+ # @option params [Integer] :op_num
1841
+ # The operation sequence number within the specified commit to start
1842
+ # reading from in the change-log stream data. The default is `1`.
1843
+ #
1844
+ # @option params [String] :encoding
1845
+ # If set to TRUE, Neptune compresses the response using gzip encoding.
1846
+ #
1847
+ # @return [Types::GetSparqlStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1848
+ #
1849
+ # * {Types::GetSparqlStreamOutput#last_event_id #last_event_id} => Hash&lt;String,String&gt;
1850
+ # * {Types::GetSparqlStreamOutput#last_trx_timestamp_in_millis #last_trx_timestamp_in_millis} => Integer
1851
+ # * {Types::GetSparqlStreamOutput#format #format} => String
1852
+ # * {Types::GetSparqlStreamOutput#records #records} => Array&lt;Types::SparqlRecord&gt;
1853
+ # * {Types::GetSparqlStreamOutput#total_records #total_records} => Integer
1854
+ #
1855
+ # @example Request syntax with placeholder values
1856
+ #
1857
+ # resp = client.get_sparql_stream({
1858
+ # limit: 1,
1859
+ # iterator_type: "AT_SEQUENCE_NUMBER", # accepts AT_SEQUENCE_NUMBER, AFTER_SEQUENCE_NUMBER, TRIM_HORIZON, LATEST
1860
+ # commit_num: 1,
1861
+ # op_num: 1,
1862
+ # encoding: "gzip", # accepts gzip
1863
+ # })
1864
+ #
1865
+ # @example Response structure
1866
+ #
1867
+ # resp.last_event_id #=> Hash
1868
+ # resp.last_event_id["String"] #=> String
1869
+ # resp.last_trx_timestamp_in_millis #=> Integer
1870
+ # resp.format #=> String
1871
+ # resp.records #=> Array
1872
+ # resp.records[0].commit_timestamp_in_millis #=> Integer
1873
+ # resp.records[0].event_id #=> Hash
1874
+ # resp.records[0].event_id["String"] #=> String
1875
+ # resp.records[0].data.stmt #=> String
1876
+ # resp.records[0].op #=> String
1877
+ # resp.records[0].is_last_op #=> Boolean
1878
+ # resp.total_records #=> Integer
1879
+ #
1880
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/GetSparqlStream AWS API Documentation
1881
+ #
1882
+ # @overload get_sparql_stream(params = {})
1883
+ # @param [Hash] params ({})
1884
+ def get_sparql_stream(params = {}, options = {})
1885
+ req = build_request(:get_sparql_stream, params)
1886
+ req.send_request(options)
1887
+ end
1888
+
1889
+ # Lists active Gremlin queries. See [Gremlin query status API][1] for
1890
+ # details about the output.
1891
+ #
1892
+ #
1893
+ #
1894
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/gremlin-api-status.html
1895
+ #
1896
+ # @option params [Boolean] :include_waiting
1897
+ # If set to `TRUE`, the list returned includes waiting queries. The
1898
+ # default is `FALSE`;
1899
+ #
1900
+ # @return [Types::ListGremlinQueriesOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1901
+ #
1902
+ # * {Types::ListGremlinQueriesOutput#accepted_query_count #accepted_query_count} => Integer
1903
+ # * {Types::ListGremlinQueriesOutput#running_query_count #running_query_count} => Integer
1904
+ # * {Types::ListGremlinQueriesOutput#queries #queries} => Array&lt;Types::GremlinQueryStatus&gt;
1905
+ #
1906
+ # @example Request syntax with placeholder values
1907
+ #
1908
+ # resp = client.list_gremlin_queries({
1909
+ # include_waiting: false,
1910
+ # })
1911
+ #
1912
+ # @example Response structure
1913
+ #
1914
+ # resp.accepted_query_count #=> Integer
1915
+ # resp.running_query_count #=> Integer
1916
+ # resp.queries #=> Array
1917
+ # resp.queries[0].query_id #=> String
1918
+ # resp.queries[0].query_string #=> String
1919
+ # resp.queries[0].query_eval_stats.waited #=> Integer
1920
+ # resp.queries[0].query_eval_stats.elapsed #=> Integer
1921
+ # resp.queries[0].query_eval_stats.cancelled #=> Boolean
1922
+ #
1923
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ListGremlinQueries AWS API Documentation
1924
+ #
1925
+ # @overload list_gremlin_queries(params = {})
1926
+ # @param [Hash] params ({})
1927
+ def list_gremlin_queries(params = {}, options = {})
1928
+ req = build_request(:list_gremlin_queries, params)
1929
+ req.send_request(options)
1930
+ end
1931
+
1932
+ # Retrieves a list of the `loadIds` for all active loader jobs.
1933
+ #
1934
+ # @option params [Integer] :limit
1935
+ # The number of load IDs to list. Must be a positive integer greater
1936
+ # than zero and not more than `100` (which is the default).
1937
+ #
1938
+ # @option params [Boolean] :include_queued_loads
1939
+ # An optional parameter that can be used to exclude the load IDs of
1940
+ # queued load requests when requesting a list of load IDs by setting the
1941
+ # parameter to `FALSE`. The default value is `TRUE`.
1942
+ #
1943
+ # @return [Types::ListLoaderJobsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1944
+ #
1945
+ # * {Types::ListLoaderJobsOutput#status #status} => String
1946
+ # * {Types::ListLoaderJobsOutput#payload #payload} => Types::LoaderIdResult
1947
+ #
1948
+ # @example Request syntax with placeholder values
1949
+ #
1950
+ # resp = client.list_loader_jobs({
1951
+ # limit: 1,
1952
+ # include_queued_loads: false,
1953
+ # })
1954
+ #
1955
+ # @example Response structure
1956
+ #
1957
+ # resp.status #=> String
1958
+ # resp.payload.load_ids #=> Array
1959
+ # resp.payload.load_ids[0] #=> String
1960
+ #
1961
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ListLoaderJobs AWS API Documentation
1962
+ #
1963
+ # @overload list_loader_jobs(params = {})
1964
+ # @param [Hash] params ({})
1965
+ def list_loader_jobs(params = {}, options = {})
1966
+ req = build_request(:list_loader_jobs, params)
1967
+ req.send_request(options)
1968
+ end
1969
+
1970
+ # Returns a list of Neptune ML data processing jobs. See [Listing active
1971
+ # data-processing jobs using the Neptune ML dataprocessing command][1].
1972
+ #
1973
+ #
1974
+ #
1975
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-dataprocessing.html#machine-learning-api-dataprocessing-list-jobs
1976
+ #
1977
+ # @option params [Integer] :max_items
1978
+ # The maximum number of items to return (from 1 to 1024; the default is
1979
+ # 10).
1980
+ #
1981
+ # @option params [String] :neptune_iam_role_arn
1982
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
1983
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
1984
+ # group or an error will occur.
1985
+ #
1986
+ # @return [Types::ListMLDataProcessingJobsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1987
+ #
1988
+ # * {Types::ListMLDataProcessingJobsOutput#ids #ids} => Array&lt;String&gt;
1989
+ #
1990
+ # @example Request syntax with placeholder values
1991
+ #
1992
+ # resp = client.list_ml_data_processing_jobs({
1993
+ # max_items: 1,
1994
+ # neptune_iam_role_arn: "String",
1995
+ # })
1996
+ #
1997
+ # @example Response structure
1998
+ #
1999
+ # resp.ids #=> Array
2000
+ # resp.ids[0] #=> String
2001
+ #
2002
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ListMLDataProcessingJobs AWS API Documentation
2003
+ #
2004
+ # @overload list_ml_data_processing_jobs(params = {})
2005
+ # @param [Hash] params ({})
2006
+ def list_ml_data_processing_jobs(params = {}, options = {})
2007
+ req = build_request(:list_ml_data_processing_jobs, params)
2008
+ req.send_request(options)
2009
+ end
2010
+
2011
+ # Lists existing inference endpoints. See [Managing inference endpoints
2012
+ # using the endpoints command][1].
2013
+ #
2014
+ #
2015
+ #
2016
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-endpoints.html
2017
+ #
2018
+ # @option params [Integer] :max_items
2019
+ # The maximum number of items to return (from 1 to 1024; the default is
2020
+ # 10.
2021
+ #
2022
+ # @option params [String] :neptune_iam_role_arn
2023
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
2024
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
2025
+ # group or an error will occur.
2026
+ #
2027
+ # @return [Types::ListMLEndpointsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2028
+ #
2029
+ # * {Types::ListMLEndpointsOutput#ids #ids} => Array&lt;String&gt;
2030
+ #
2031
+ # @example Request syntax with placeholder values
2032
+ #
2033
+ # resp = client.list_ml_endpoints({
2034
+ # max_items: 1,
2035
+ # neptune_iam_role_arn: "String",
2036
+ # })
2037
+ #
2038
+ # @example Response structure
2039
+ #
2040
+ # resp.ids #=> Array
2041
+ # resp.ids[0] #=> String
2042
+ #
2043
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ListMLEndpoints AWS API Documentation
2044
+ #
2045
+ # @overload list_ml_endpoints(params = {})
2046
+ # @param [Hash] params ({})
2047
+ def list_ml_endpoints(params = {}, options = {})
2048
+ req = build_request(:list_ml_endpoints, params)
2049
+ req.send_request(options)
2050
+ end
2051
+
2052
+ # Lists Neptune ML model-training jobs. See [Model training using the
2053
+ # `modeltraining` command][1].
2054
+ #
2055
+ #
2056
+ #
2057
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-modeltraining.html
2058
+ #
2059
+ # @option params [Integer] :max_items
2060
+ # The maximum number of items to return (from 1 to 1024; the default is
2061
+ # 10).
2062
+ #
2063
+ # @option params [String] :neptune_iam_role_arn
2064
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
2065
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
2066
+ # group or an error will occur.
2067
+ #
2068
+ # @return [Types::ListMLModelTrainingJobsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2069
+ #
2070
+ # * {Types::ListMLModelTrainingJobsOutput#ids #ids} => Array&lt;String&gt;
2071
+ #
2072
+ # @example Request syntax with placeholder values
2073
+ #
2074
+ # resp = client.list_ml_model_training_jobs({
2075
+ # max_items: 1,
2076
+ # neptune_iam_role_arn: "String",
2077
+ # })
2078
+ #
2079
+ # @example Response structure
2080
+ #
2081
+ # resp.ids #=> Array
2082
+ # resp.ids[0] #=> String
2083
+ #
2084
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ListMLModelTrainingJobs AWS API Documentation
2085
+ #
2086
+ # @overload list_ml_model_training_jobs(params = {})
2087
+ # @param [Hash] params ({})
2088
+ def list_ml_model_training_jobs(params = {}, options = {})
2089
+ req = build_request(:list_ml_model_training_jobs, params)
2090
+ req.send_request(options)
2091
+ end
2092
+
2093
+ # Returns a list of model transform job IDs. See [Use a trained model to
2094
+ # generate new model artifacts][1].
2095
+ #
2096
+ #
2097
+ #
2098
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-model-transform.html
2099
+ #
2100
+ # @option params [Integer] :max_items
2101
+ # The maximum number of items to return (from 1 to 1024; the default is
2102
+ # 10).
2103
+ #
2104
+ # @option params [String] :neptune_iam_role_arn
2105
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
2106
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
2107
+ # group or an error will occur.
2108
+ #
2109
+ # @return [Types::ListMLModelTransformJobsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2110
+ #
2111
+ # * {Types::ListMLModelTransformJobsOutput#ids #ids} => Array&lt;String&gt;
2112
+ #
2113
+ # @example Request syntax with placeholder values
2114
+ #
2115
+ # resp = client.list_ml_model_transform_jobs({
2116
+ # max_items: 1,
2117
+ # neptune_iam_role_arn: "String",
2118
+ # })
2119
+ #
2120
+ # @example Response structure
2121
+ #
2122
+ # resp.ids #=> Array
2123
+ # resp.ids[0] #=> String
2124
+ #
2125
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ListMLModelTransformJobs AWS API Documentation
2126
+ #
2127
+ # @overload list_ml_model_transform_jobs(params = {})
2128
+ # @param [Hash] params ({})
2129
+ def list_ml_model_transform_jobs(params = {}, options = {})
2130
+ req = build_request(:list_ml_model_transform_jobs, params)
2131
+ req.send_request(options)
2132
+ end
2133
+
2134
+ # Lists active openCypher queries. See [Neptune openCypher status
2135
+ # endpoint][1] for more information.
2136
+ #
2137
+ #
2138
+ #
2139
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/access-graph-opencypher-status.html
2140
+ #
2141
+ # @option params [Boolean] :include_waiting
2142
+ # When set to `TRUE` and other parameters are not present, causes status
2143
+ # information to be returned for waiting queries as well as for running
2144
+ # queries.
2145
+ #
2146
+ # @return [Types::ListOpenCypherQueriesOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2147
+ #
2148
+ # * {Types::ListOpenCypherQueriesOutput#accepted_query_count #accepted_query_count} => Integer
2149
+ # * {Types::ListOpenCypherQueriesOutput#running_query_count #running_query_count} => Integer
2150
+ # * {Types::ListOpenCypherQueriesOutput#queries #queries} => Array&lt;Types::GremlinQueryStatus&gt;
2151
+ #
2152
+ # @example Request syntax with placeholder values
2153
+ #
2154
+ # resp = client.list_open_cypher_queries({
2155
+ # include_waiting: false,
2156
+ # })
2157
+ #
2158
+ # @example Response structure
2159
+ #
2160
+ # resp.accepted_query_count #=> Integer
2161
+ # resp.running_query_count #=> Integer
2162
+ # resp.queries #=> Array
2163
+ # resp.queries[0].query_id #=> String
2164
+ # resp.queries[0].query_string #=> String
2165
+ # resp.queries[0].query_eval_stats.waited #=> Integer
2166
+ # resp.queries[0].query_eval_stats.elapsed #=> Integer
2167
+ # resp.queries[0].query_eval_stats.cancelled #=> Boolean
2168
+ #
2169
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ListOpenCypherQueries AWS API Documentation
2170
+ #
2171
+ # @overload list_open_cypher_queries(params = {})
2172
+ # @param [Hash] params ({})
2173
+ def list_open_cypher_queries(params = {}, options = {})
2174
+ req = build_request(:list_open_cypher_queries, params)
2175
+ req.send_request(options)
2176
+ end
2177
+
2178
+ # Manages the generation and use of property graph statistics.
2179
+ #
2180
+ # @option params [String] :mode
2181
+ # The statistics generation mode. One of: `DISABLE_AUTOCOMPUTE`,
2182
+ # `ENABLE_AUTOCOMPUTE`, or `REFRESH`, the last of which manually
2183
+ # triggers DFE statistics generation.
2184
+ #
2185
+ # @return [Types::ManagePropertygraphStatisticsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2186
+ #
2187
+ # * {Types::ManagePropertygraphStatisticsOutput#status #status} => String
2188
+ # * {Types::ManagePropertygraphStatisticsOutput#payload #payload} => Types::RefreshStatisticsIdMap
2189
+ #
2190
+ # @example Request syntax with placeholder values
2191
+ #
2192
+ # resp = client.manage_propertygraph_statistics({
2193
+ # mode: "disableAutoCompute", # accepts disableAutoCompute, enableAutoCompute, refresh
2194
+ # })
2195
+ #
2196
+ # @example Response structure
2197
+ #
2198
+ # resp.status #=> String
2199
+ # resp.payload.statistics_id #=> String
2200
+ #
2201
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ManagePropertygraphStatistics AWS API Documentation
2202
+ #
2203
+ # @overload manage_propertygraph_statistics(params = {})
2204
+ # @param [Hash] params ({})
2205
+ def manage_propertygraph_statistics(params = {}, options = {})
2206
+ req = build_request(:manage_propertygraph_statistics, params)
2207
+ req.send_request(options)
2208
+ end
2209
+
2210
+ # Manages the generation and use of RDF graph statistics.
2211
+ #
2212
+ # @option params [String] :mode
2213
+ # The statistics generation mode. One of: `DISABLE_AUTOCOMPUTE`,
2214
+ # `ENABLE_AUTOCOMPUTE`, or `REFRESH`, the last of which manually
2215
+ # triggers DFE statistics generation.
2216
+ #
2217
+ # @return [Types::ManageSparqlStatisticsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2218
+ #
2219
+ # * {Types::ManageSparqlStatisticsOutput#status #status} => String
2220
+ # * {Types::ManageSparqlStatisticsOutput#payload #payload} => Types::RefreshStatisticsIdMap
2221
+ #
2222
+ # @example Request syntax with placeholder values
2223
+ #
2224
+ # resp = client.manage_sparql_statistics({
2225
+ # mode: "disableAutoCompute", # accepts disableAutoCompute, enableAutoCompute, refresh
2226
+ # })
2227
+ #
2228
+ # @example Response structure
2229
+ #
2230
+ # resp.status #=> String
2231
+ # resp.payload.statistics_id #=> String
2232
+ #
2233
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/ManageSparqlStatistics AWS API Documentation
2234
+ #
2235
+ # @overload manage_sparql_statistics(params = {})
2236
+ # @param [Hash] params ({})
2237
+ def manage_sparql_statistics(params = {}, options = {})
2238
+ req = build_request(:manage_sparql_statistics, params)
2239
+ req.send_request(options)
2240
+ end
2241
+
2242
+ # Starts a Neptune bulk loader job to load data from an Amazon S3 bucket
2243
+ # into a Neptune DB instance. See [Using the Amazon Neptune Bulk Loader
2244
+ # to Ingest Data][1].
2245
+ #
2246
+ #
2247
+ #
2248
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load.html
2249
+ #
2250
+ # @option params [required, String] :source
2251
+ # The `source` parameter accepts an S3 URI that identifies a single
2252
+ # file, multiple files, a folder, or multiple folders. Neptune loads
2253
+ # every data file in any folder that is specified.
2254
+ #
2255
+ # The URI can be in any of the following formats.
2256
+ #
2257
+ # * `s3://(bucket_name)/(object-key-name)`
2258
+ #
2259
+ # * `https://s3.amazonaws.com/(bucket_name)/(object-key-name)`
2260
+ #
2261
+ # * `https://s3.us-east-1.amazonaws.com/(bucket_name)/(object-key-name)`
2262
+ #
2263
+ # The `object-key-name` element of the URI is equivalent to the
2264
+ # [prefix][1] parameter in an S3 [ListObjects][2] API call. It
2265
+ # identifies all the objects in the specified S3 bucket whose names
2266
+ # begin with that prefix. That can be a single file or folder, or
2267
+ # multiple files and/or folders.
2268
+ #
2269
+ # The specified folder or folders can contain multiple vertex files and
2270
+ # multiple edge files.
2271
+ #
2272
+ #
2273
+ #
2274
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html#API_ListObjects_RequestParameters
2275
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
2276
+ #
2277
+ # @option params [required, String] :format
2278
+ # The format of the data. For more information about data formats for
2279
+ # the Neptune `Loader` command, see [Load Data Formats][1].
2280
+ #
2281
+ # **Allowed values**
2282
+ #
2283
+ # * <b> <code>csv</code> </b> for the [Gremlin CSV data format][2].
2284
+ #
2285
+ # * <b> <code>opencypher</code> </b> for the [openCypher CSV data
2286
+ # format][3].
2287
+ #
2288
+ # * <b> <code>ntriples</code> </b> for the [N-Triples RDF data
2289
+ # format][4].
2290
+ #
2291
+ # * <b> <code>nquads</code> </b> for the [N-Quads RDF data format][5].
2292
+ #
2293
+ # * <b> <code>rdfxml</code> </b> for the [RDF\\XML RDF data format][6].
2294
+ #
2295
+ # * <b> <code>turtle</code> </b> for the [Turtle RDF data format][7].
2296
+ #
2297
+ #
2298
+ #
2299
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load-tutorial-format.html
2300
+ # [2]: https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load-tutorial-format-gremlin.html
2301
+ # [3]: https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load-tutorial-format-opencypher.html
2302
+ # [4]: https://www.w3.org/TR/n-triples/
2303
+ # [5]: https://www.w3.org/TR/n-quads/
2304
+ # [6]: https://www.w3.org/TR/rdf-syntax-grammar/
2305
+ # [7]: https://www.w3.org/TR/turtle/
2306
+ #
2307
+ # @option params [required, String] :s3_bucket_region
2308
+ # The Amazon region of the S3 bucket. This must match the Amazon Region
2309
+ # of the DB cluster.
2310
+ #
2311
+ # @option params [required, String] :iam_role_arn
2312
+ # The Amazon Resource Name (ARN) for an IAM role to be assumed by the
2313
+ # Neptune DB instance for access to the S3 bucket. The IAM role ARN
2314
+ # provided here should be attached to the DB cluster (see [Adding the
2315
+ # IAM Role to an Amazon Neptune Cluster][1].
2316
+ #
2317
+ #
2318
+ #
2319
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load-tutorial-IAM-add-role-cluster.html
2320
+ #
2321
+ # @option params [String] :mode
2322
+ # The load job mode.
2323
+ #
2324
+ # *Allowed values*: `RESUME`, `NEW`, `AUTO`.
2325
+ #
2326
+ # *Default value*: `AUTO`.
2327
+ #
2328
+ # ****
2329
+ #
2330
+ # * `RESUME`   –   In RESUME mode, the loader looks for a previous load
2331
+ # from this source, and if it finds one, resumes that load job. If no
2332
+ # previous load job is found, the loader stops.
2333
+ #
2334
+ # The loader avoids reloading files that were successfully loaded in a
2335
+ # previous job. It only tries to process failed files. If you dropped
2336
+ # previously loaded data from your Neptune cluster, that data is not
2337
+ # reloaded in this mode. If a previous load job loaded all files from
2338
+ # the same source successfully, nothing is reloaded, and the loader
2339
+ # returns success.
2340
+ #
2341
+ # * `NEW`   –   In NEW mode, the creates a new load request regardless
2342
+ # of any previous loads. You can use this mode to reload all the data
2343
+ # from a source after dropping previously loaded data from your
2344
+ # Neptune cluster, or to load new data available at the same source.
2345
+ #
2346
+ # * `AUTO`   –   In AUTO mode, the loader looks for a previous load job
2347
+ # from the same source, and if it finds one, resumes that job, just as
2348
+ # in `RESUME` mode.
2349
+ #
2350
+ # If the loader doesn't find a previous load job from the same
2351
+ # source, it loads all data from the source, just as in `NEW` mode.
2352
+ #
2353
+ # @option params [Boolean] :fail_on_error
2354
+ # <b> <code>failOnError</code> </b>   –   A flag to toggle a complete
2355
+ # stop on an error.
2356
+ #
2357
+ # *Allowed values*: `"TRUE"`, `"FALSE"`.
2358
+ #
2359
+ # *Default value*: `"TRUE"`.
2360
+ #
2361
+ # When this parameter is set to `"FALSE"`, the loader tries to load all
2362
+ # the data in the location specified, skipping any entries with errors.
2363
+ #
2364
+ # When this parameter is set to `"TRUE"`, the loader stops as soon as it
2365
+ # encounters an error. Data loaded up to that point persists.
2366
+ #
2367
+ # @option params [String] :parallelism
2368
+ # The optional `parallelism` parameter can be set to reduce the number
2369
+ # of threads used by the bulk load process.
2370
+ #
2371
+ # *Allowed values*:
2372
+ #
2373
+ # * `LOW` –   The number of threads used is the number of available
2374
+ # vCPUs divided by 8.
2375
+ #
2376
+ # * `MEDIUM` –   The number of threads used is the number of available
2377
+ # vCPUs divided by 2.
2378
+ #
2379
+ # * `HIGH` –   The number of threads used is the same as the number of
2380
+ # available vCPUs.
2381
+ #
2382
+ # * `OVERSUBSCRIBE` –   The number of threads used is the number of
2383
+ # available vCPUs multiplied by 2. If this value is used, the bulk
2384
+ # loader takes up all available resources.
2385
+ #
2386
+ # This does not mean, however, that the `OVERSUBSCRIBE` setting
2387
+ # results in 100% CPU utilization. Because the load operation is I/O
2388
+ # bound, the highest CPU utilization to expect is in the 60% to 70%
2389
+ # range.
2390
+ #
2391
+ # *Default value*: `HIGH`
2392
+ #
2393
+ # The `parallelism` setting can sometimes result in a deadlock between
2394
+ # threads when loading openCypher data. When this happens, Neptune
2395
+ # returns the `LOAD_DATA_DEADLOCK` error. You can generally fix the
2396
+ # issue by setting `parallelism` to a lower setting and retrying the
2397
+ # load command.
2398
+ #
2399
+ # @option params [Hash<String,String>] :parser_configuration
2400
+ # <b> <code>parserConfiguration</code> </b>   –   An optional object
2401
+ # with additional parser configuration values. Each of the child
2402
+ # parameters is also optional:
2403
+ #
2404
+ # ****
2405
+ #
2406
+ # * <b> <code>namedGraphUri</code> </b>   –   The default graph for all
2407
+ # RDF formats when no graph is specified (for non-quads formats and
2408
+ # NQUAD entries with no graph).
2409
+ #
2410
+ # The default is
2411
+ # `https://aws.amazon.com/neptune/vocab/v01/DefaultNamedGraph`.
2412
+ #
2413
+ # * <b> <code>baseUri</code> </b>   –   The base URI for RDF/XML and
2414
+ # Turtle formats.
2415
+ #
2416
+ # The default is `https://aws.amazon.com/neptune/default`.
2417
+ #
2418
+ # * <b> <code>allowEmptyStrings</code> </b>   –   Gremlin users need to
2419
+ # be able to pass empty string values("") as node and edge
2420
+ # properties when loading CSV data. If `allowEmptyStrings` is set to
2421
+ # `false` (the default), such empty strings are treated as nulls and
2422
+ # are not loaded.
2423
+ #
2424
+ # If `allowEmptyStrings` is set to `true`, the loader treats empty
2425
+ # strings as valid property values and loads them accordingly.
2426
+ #
2427
+ # @option params [Boolean] :update_single_cardinality_properties
2428
+ # `updateSingleCardinalityProperties` is an optional parameter that
2429
+ # controls how the bulk loader treats a new value for single-cardinality
2430
+ # vertex or edge properties. This is not supported for loading
2431
+ # openCypher data.
2432
+ #
2433
+ # *Allowed values*: `"TRUE"`, `"FALSE"`.
2434
+ #
2435
+ # *Default value*: `"FALSE"`.
2436
+ #
2437
+ # By default, or when `updateSingleCardinalityProperties` is explicitly
2438
+ # set to `"FALSE"`, the loader treats a new value as an error, because
2439
+ # it violates single cardinality.
2440
+ #
2441
+ # When `updateSingleCardinalityProperties` is set to `"TRUE"`, on the
2442
+ # other hand, the bulk loader replaces the existing value with the new
2443
+ # one. If multiple edge or single-cardinality vertex property values are
2444
+ # provided in the source file(s) being loaded, the final value at the
2445
+ # end of the bulk load could be any one of those new values. The loader
2446
+ # only guarantees that the existing value has been replaced by one of
2447
+ # the new ones.
2448
+ #
2449
+ # @option params [Boolean] :queue_request
2450
+ # This is an optional flag parameter that indicates whether the load
2451
+ # request can be queued up or not.
2452
+ #
2453
+ # You don't have to wait for one load job to complete before issuing
2454
+ # the next one, because Neptune can queue up as many as 64 jobs at a
2455
+ # time, provided that their `queueRequest` parameters are all set to
2456
+ # `"TRUE"`.
2457
+ #
2458
+ # If the `queueRequest` parameter is omitted or set to `"FALSE"`, the
2459
+ # load request will fail if another load job is already running.
2460
+ #
2461
+ # *Allowed values*: `"TRUE"`, `"FALSE"`.
2462
+ #
2463
+ # *Default value*: `"FALSE"`.
2464
+ #
2465
+ # @option params [Array<String>] :dependencies
2466
+ # This is an optional parameter that can make a queued load request
2467
+ # contingent on the successful completion of one or more previous jobs
2468
+ # in the queue.
2469
+ #
2470
+ # Neptune can queue up as many as 64 load requests at a time, if their
2471
+ # `queueRequest` parameters are set to `"TRUE"`. The `dependencies`
2472
+ # parameter lets you make execution of such a queued request dependent
2473
+ # on the successful completion of one or more specified previous
2474
+ # requests in the queue.
2475
+ #
2476
+ # For example, if load `Job-A` and `Job-B` are independent of each
2477
+ # other, but load `Job-C` needs `Job-A` and `Job-B` to be finished
2478
+ # before it begins, proceed as follows:
2479
+ #
2480
+ # 1. Submit `load-job-A` and `load-job-B` one after another in any
2481
+ # order, and save their load-ids.
2482
+ #
2483
+ # 2. Submit `load-job-C` with the load-ids of the two jobs in its
2484
+ # `dependencies` field:
2485
+ #
2486
+ # Because of the `dependencies` parameter, the bulk loader will not
2487
+ # start `Job-C` until `Job-A` and `Job-B` have completed successfully.
2488
+ # If either one of them fails, Job-C will not be executed, and its
2489
+ # status will be set to `LOAD_FAILED_BECAUSE_DEPENDENCY_NOT_SATISFIED`.
2490
+ #
2491
+ # You can set up multiple levels of dependency in this way, so that the
2492
+ # failure of one job will cause all requests that are directly or
2493
+ # indirectly dependent on it to be cancelled.
2494
+ #
2495
+ # @option params [Boolean] :user_provided_edge_ids
2496
+ # This parameter is required only when loading openCypher data that
2497
+ # contains relationship IDs. It must be included and set to `True` when
2498
+ # openCypher relationship IDs are explicitly provided in the load data
2499
+ # (recommended).
2500
+ #
2501
+ # When `userProvidedEdgeIds` is absent or set to `True`, an `:ID` column
2502
+ # must be present in every relationship file in the load.
2503
+ #
2504
+ # When `userProvidedEdgeIds` is present and set to `False`, relationship
2505
+ # files in the load **must not** contain an `:ID` column. Instead, the
2506
+ # Neptune loader automatically generates an ID for each relationship.
2507
+ #
2508
+ # It's useful to provide relationship IDs explicitly so that the loader
2509
+ # can resume loading after error in the CSV data have been fixed,
2510
+ # without having to reload any relationships that have already been
2511
+ # loaded. If relationship IDs have not been explicitly assigned, the
2512
+ # loader cannot resume a failed load if any relationship file has had to
2513
+ # be corrected, and must instead reload all the relationships.
2514
+ #
2515
+ # @return [Types::StartLoaderJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2516
+ #
2517
+ # * {Types::StartLoaderJobOutput#status #status} => String
2518
+ # * {Types::StartLoaderJobOutput#payload #payload} => Hash&lt;String,String&gt;
2519
+ #
2520
+ # @example Request syntax with placeholder values
2521
+ #
2522
+ # resp = client.start_loader_job({
2523
+ # source: "String", # required
2524
+ # format: "csv", # required, accepts csv, opencypher, ntriples, nquads, rdfxml, turtle
2525
+ # s3_bucket_region: "us-east-1", # required, accepts us-east-1, us-east-2, us-west-1, us-west-2, ca-central-1, sa-east-1, eu-north-1, eu-west-1, eu-west-2, eu-west-3, eu-central-1, me-south-1, af-south-1, ap-east-1, ap-northeast-1, ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-south-1, cn-north-1, cn-northwest-1, us-gov-west-1, us-gov-east-1
2526
+ # iam_role_arn: "String", # required
2527
+ # mode: "RESUME", # accepts RESUME, NEW, AUTO
2528
+ # fail_on_error: false,
2529
+ # parallelism: "LOW", # accepts LOW, MEDIUM, HIGH, OVERSUBSCRIBE
2530
+ # parser_configuration: {
2531
+ # "String" => "String",
2532
+ # },
2533
+ # update_single_cardinality_properties: false,
2534
+ # queue_request: false,
2535
+ # dependencies: ["String"],
2536
+ # user_provided_edge_ids: false,
2537
+ # })
2538
+ #
2539
+ # @example Response structure
2540
+ #
2541
+ # resp.status #=> String
2542
+ # resp.payload #=> Hash
2543
+ # resp.payload["String"] #=> String
2544
+ #
2545
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/StartLoaderJob AWS API Documentation
2546
+ #
2547
+ # @overload start_loader_job(params = {})
2548
+ # @param [Hash] params ({})
2549
+ def start_loader_job(params = {}, options = {})
2550
+ req = build_request(:start_loader_job, params)
2551
+ req.send_request(options)
2552
+ end
2553
+
2554
+ # Creates a new Neptune ML data processing job for processing the graph
2555
+ # data exported from Neptune for training. See [The `dataprocessing`
2556
+ # command][1].
2557
+ #
2558
+ #
2559
+ #
2560
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-dataprocessing.html
2561
+ #
2562
+ # @option params [String] :id
2563
+ # A unique identifier for the new job. The default is an autogenerated
2564
+ # UUID.
2565
+ #
2566
+ # @option params [String] :previous_data_processing_job_id
2567
+ # The job ID of a completed data processing job run on an earlier
2568
+ # version of the data.
2569
+ #
2570
+ # @option params [required, String] :input_data_s3_location
2571
+ # The URI of the Amazon S3 location where you want SageMaker to download
2572
+ # the data needed to run the data processing job.
2573
+ #
2574
+ # @option params [required, String] :processed_data_s3_location
2575
+ # The URI of the Amazon S3 location where you want SageMaker to save the
2576
+ # results of a data processing job.
2577
+ #
2578
+ # @option params [String] :sagemaker_iam_role_arn
2579
+ # The ARN of an IAM role for SageMaker execution. This must be listed in
2580
+ # your DB cluster parameter group or an error will occur.
2581
+ #
2582
+ # @option params [String] :neptune_iam_role_arn
2583
+ # The Amazon Resource Name (ARN) of an IAM role that SageMaker can
2584
+ # assume to perform tasks on your behalf. This must be listed in your DB
2585
+ # cluster parameter group or an error will occur.
2586
+ #
2587
+ # @option params [String] :processing_instance_type
2588
+ # The type of ML instance used during data processing. Its memory should
2589
+ # be large enough to hold the processed dataset. The default is the
2590
+ # smallest ml.r5 type whose memory is ten times larger than the size of
2591
+ # the exported graph data on disk.
2592
+ #
2593
+ # @option params [Integer] :processing_instance_volume_size_in_gb
2594
+ # The disk volume size of the processing instance. Both input data and
2595
+ # processed data are stored on disk, so the volume size must be large
2596
+ # enough to hold both data sets. The default is 0. If not specified or
2597
+ # 0, Neptune ML chooses the volume size automatically based on the data
2598
+ # size.
2599
+ #
2600
+ # @option params [Integer] :processing_time_out_in_seconds
2601
+ # Timeout in seconds for the data processing job. The default is 86,400
2602
+ # (1 day).
2603
+ #
2604
+ # @option params [String] :model_type
2605
+ # One of the two model types that Neptune ML currently supports:
2606
+ # heterogeneous graph models (`heterogeneous`), and knowledge graph
2607
+ # (`kge`). The default is none. If not specified, Neptune ML chooses the
2608
+ # model type automatically based on the data.
2609
+ #
2610
+ # @option params [String] :config_file_name
2611
+ # A data specification file that describes how to load the exported
2612
+ # graph data for training. The file is automatically generated by the
2613
+ # Neptune export toolkit. The default is
2614
+ # `training-data-configuration.json`.
2615
+ #
2616
+ # @option params [Array<String>] :subnets
2617
+ # The IDs of the subnets in the Neptune VPC. The default is None.
2618
+ #
2619
+ # @option params [Array<String>] :security_group_ids
2620
+ # The VPC security group IDs. The default is None.
2621
+ #
2622
+ # @option params [String] :volume_encryption_kms_key
2623
+ # The Amazon Key Management Service (Amazon KMS) key that SageMaker uses
2624
+ # to encrypt data on the storage volume attached to the ML compute
2625
+ # instances that run the training job. The default is None.
2626
+ #
2627
+ # @option params [String] :s3_output_encryption_kms_key
2628
+ # The Amazon Key Management Service (Amazon KMS) key that SageMaker uses
2629
+ # to encrypt the output of the processing job. The default is none.
2630
+ #
2631
+ # @return [Types::StartMLDataProcessingJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2632
+ #
2633
+ # * {Types::StartMLDataProcessingJobOutput#id #id} => String
2634
+ # * {Types::StartMLDataProcessingJobOutput#arn #arn} => String
2635
+ # * {Types::StartMLDataProcessingJobOutput#creation_time_in_millis #creation_time_in_millis} => Integer
2636
+ #
2637
+ # @example Request syntax with placeholder values
2638
+ #
2639
+ # resp = client.start_ml_data_processing_job({
2640
+ # id: "String",
2641
+ # previous_data_processing_job_id: "String",
2642
+ # input_data_s3_location: "String", # required
2643
+ # processed_data_s3_location: "String", # required
2644
+ # sagemaker_iam_role_arn: "String",
2645
+ # neptune_iam_role_arn: "String",
2646
+ # processing_instance_type: "String",
2647
+ # processing_instance_volume_size_in_gb: 1,
2648
+ # processing_time_out_in_seconds: 1,
2649
+ # model_type: "String",
2650
+ # config_file_name: "String",
2651
+ # subnets: ["String"],
2652
+ # security_group_ids: ["String"],
2653
+ # volume_encryption_kms_key: "String",
2654
+ # s3_output_encryption_kms_key: "String",
2655
+ # })
2656
+ #
2657
+ # @example Response structure
2658
+ #
2659
+ # resp.id #=> String
2660
+ # resp.arn #=> String
2661
+ # resp.creation_time_in_millis #=> Integer
2662
+ #
2663
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/StartMLDataProcessingJob AWS API Documentation
2664
+ #
2665
+ # @overload start_ml_data_processing_job(params = {})
2666
+ # @param [Hash] params ({})
2667
+ def start_ml_data_processing_job(params = {}, options = {})
2668
+ req = build_request(:start_ml_data_processing_job, params)
2669
+ req.send_request(options)
2670
+ end
2671
+
2672
+ # Creates a new Neptune ML model training job. See [Model training using
2673
+ # the `modeltraining` command][1].
2674
+ #
2675
+ #
2676
+ #
2677
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-modeltraining.html
2678
+ #
2679
+ # @option params [String] :id
2680
+ # A unique identifier for the new job. The default is An autogenerated
2681
+ # UUID.
2682
+ #
2683
+ # @option params [String] :previous_model_training_job_id
2684
+ # The job ID of a completed model-training job that you want to update
2685
+ # incrementally based on updated data.
2686
+ #
2687
+ # @option params [required, String] :data_processing_job_id
2688
+ # The job ID of the completed data-processing job that has created the
2689
+ # data that the training will work with.
2690
+ #
2691
+ # @option params [required, String] :train_model_s3_location
2692
+ # The location in Amazon S3 where the model artifacts are to be stored.
2693
+ #
2694
+ # @option params [String] :sagemaker_iam_role_arn
2695
+ # The ARN of an IAM role for SageMaker execution.This must be listed in
2696
+ # your DB cluster parameter group or an error will occur.
2697
+ #
2698
+ # @option params [String] :neptune_iam_role_arn
2699
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
2700
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
2701
+ # group or an error will occur.
2702
+ #
2703
+ # @option params [String] :base_processing_instance_type
2704
+ # The type of ML instance used in preparing and managing training of ML
2705
+ # models. This is a CPU instance chosen based on memory requirements for
2706
+ # processing the training data and model.
2707
+ #
2708
+ # @option params [String] :training_instance_type
2709
+ # The type of ML instance used for model training. All Neptune ML models
2710
+ # support CPU, GPU, and multiGPU training. The default is
2711
+ # `ml.p3.2xlarge`. Choosing the right instance type for training depends
2712
+ # on the task type, graph size, and your budget.
2713
+ #
2714
+ # @option params [Integer] :training_instance_volume_size_in_gb
2715
+ # The disk volume size of the training instance. Both input data and the
2716
+ # output model are stored on disk, so the volume size must be large
2717
+ # enough to hold both data sets. The default is 0. If not specified or
2718
+ # 0, Neptune ML selects a disk volume size based on the recommendation
2719
+ # generated in the data processing step.
2720
+ #
2721
+ # @option params [Integer] :training_time_out_in_seconds
2722
+ # Timeout in seconds for the training job. The default is 86,400 (1
2723
+ # day).
2724
+ #
2725
+ # @option params [Integer] :max_hpo_number_of_training_jobs
2726
+ # Maximum total number of training jobs to start for the hyperparameter
2727
+ # tuning job. The default is 2. Neptune ML automatically tunes the
2728
+ # hyperparameters of the machine learning model. To obtain a model that
2729
+ # performs well, use at least 10 jobs (in other words, set
2730
+ # `maxHPONumberOfTrainingJobs` to 10). In general, the more tuning runs,
2731
+ # the better the results.
2732
+ #
2733
+ # @option params [Integer] :max_hpo_parallel_training_jobs
2734
+ # Maximum number of parallel training jobs to start for the
2735
+ # hyperparameter tuning job. The default is 2. The number of parallel
2736
+ # jobs you can run is limited by the available resources on your
2737
+ # training instance.
2738
+ #
2739
+ # @option params [Array<String>] :subnets
2740
+ # The IDs of the subnets in the Neptune VPC. The default is None.
2741
+ #
2742
+ # @option params [Array<String>] :security_group_ids
2743
+ # The VPC security group IDs. The default is None.
2744
+ #
2745
+ # @option params [String] :volume_encryption_kms_key
2746
+ # The Amazon Key Management Service (KMS) key that SageMaker uses to
2747
+ # encrypt data on the storage volume attached to the ML compute
2748
+ # instances that run the training job. The default is None.
2749
+ #
2750
+ # @option params [String] :s3_output_encryption_kms_key
2751
+ # The Amazon Key Management Service (KMS) key that SageMaker uses to
2752
+ # encrypt the output of the processing job. The default is none.
2753
+ #
2754
+ # @option params [Boolean] :enable_managed_spot_training
2755
+ # Optimizes the cost of training machine-learning models by using Amazon
2756
+ # Elastic Compute Cloud spot instances. The default is `False`.
2757
+ #
2758
+ # @option params [Types::CustomModelTrainingParameters] :custom_model_training_parameters
2759
+ # The configuration for custom model training. This is a JSON object.
2760
+ #
2761
+ # @return [Types::StartMLModelTrainingJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2762
+ #
2763
+ # * {Types::StartMLModelTrainingJobOutput#id #id} => String
2764
+ # * {Types::StartMLModelTrainingJobOutput#arn #arn} => String
2765
+ # * {Types::StartMLModelTrainingJobOutput#creation_time_in_millis #creation_time_in_millis} => Integer
2766
+ #
2767
+ # @example Request syntax with placeholder values
2768
+ #
2769
+ # resp = client.start_ml_model_training_job({
2770
+ # id: "String",
2771
+ # previous_model_training_job_id: "String",
2772
+ # data_processing_job_id: "String", # required
2773
+ # train_model_s3_location: "String", # required
2774
+ # sagemaker_iam_role_arn: "String",
2775
+ # neptune_iam_role_arn: "String",
2776
+ # base_processing_instance_type: "String",
2777
+ # training_instance_type: "String",
2778
+ # training_instance_volume_size_in_gb: 1,
2779
+ # training_time_out_in_seconds: 1,
2780
+ # max_hpo_number_of_training_jobs: 1,
2781
+ # max_hpo_parallel_training_jobs: 1,
2782
+ # subnets: ["String"],
2783
+ # security_group_ids: ["String"],
2784
+ # volume_encryption_kms_key: "String",
2785
+ # s3_output_encryption_kms_key: "String",
2786
+ # enable_managed_spot_training: false,
2787
+ # custom_model_training_parameters: {
2788
+ # source_s3_directory_path: "String", # required
2789
+ # training_entry_point_script: "String",
2790
+ # transform_entry_point_script: "String",
2791
+ # },
2792
+ # })
2793
+ #
2794
+ # @example Response structure
2795
+ #
2796
+ # resp.id #=> String
2797
+ # resp.arn #=> String
2798
+ # resp.creation_time_in_millis #=> Integer
2799
+ #
2800
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/StartMLModelTrainingJob AWS API Documentation
2801
+ #
2802
+ # @overload start_ml_model_training_job(params = {})
2803
+ # @param [Hash] params ({})
2804
+ def start_ml_model_training_job(params = {}, options = {})
2805
+ req = build_request(:start_ml_model_training_job, params)
2806
+ req.send_request(options)
2807
+ end
2808
+
2809
+ # Creates a new model transform job. See [Use a trained model to
2810
+ # generate new model artifacts][1].
2811
+ #
2812
+ #
2813
+ #
2814
+ # [1]: https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-model-transform.html
2815
+ #
2816
+ # @option params [String] :id
2817
+ # A unique identifier for the new job. The default is an autogenerated
2818
+ # UUID.
2819
+ #
2820
+ # @option params [String] :data_processing_job_id
2821
+ # The job ID of a completed data-processing job. You must include either
2822
+ # `dataProcessingJobId` and a `mlModelTrainingJobId`, or a
2823
+ # `trainingJobName`.
2824
+ #
2825
+ # @option params [String] :ml_model_training_job_id
2826
+ # The job ID of a completed model-training job. You must include either
2827
+ # `dataProcessingJobId` and a `mlModelTrainingJobId`, or a
2828
+ # `trainingJobName`.
2829
+ #
2830
+ # @option params [String] :training_job_name
2831
+ # The name of a completed SageMaker training job. You must include
2832
+ # either `dataProcessingJobId` and a `mlModelTrainingJobId`, or a
2833
+ # `trainingJobName`.
2834
+ #
2835
+ # @option params [required, String] :model_transform_output_s3_location
2836
+ # The location in Amazon S3 where the model artifacts are to be stored.
2837
+ #
2838
+ # @option params [String] :sagemaker_iam_role_arn
2839
+ # The ARN of an IAM role for SageMaker execution. This must be listed in
2840
+ # your DB cluster parameter group or an error will occur.
2841
+ #
2842
+ # @option params [String] :neptune_iam_role_arn
2843
+ # The ARN of an IAM role that provides Neptune access to SageMaker and
2844
+ # Amazon S3 resources. This must be listed in your DB cluster parameter
2845
+ # group or an error will occur.
2846
+ #
2847
+ # @option params [Types::CustomModelTransformParameters] :custom_model_transform_parameters
2848
+ # Configuration information for a model transform using a custom model.
2849
+ # The `customModelTransformParameters` object contains the following
2850
+ # fields, which must have values compatible with the saved model
2851
+ # parameters from the training job:
2852
+ #
2853
+ # @option params [String] :base_processing_instance_type
2854
+ # The type of ML instance used in preparing and managing training of ML
2855
+ # models. This is an ML compute instance chosen based on memory
2856
+ # requirements for processing the training data and model.
2857
+ #
2858
+ # @option params [Integer] :base_processing_instance_volume_size_in_gb
2859
+ # The disk volume size of the training instance in gigabytes. The
2860
+ # default is 0. Both input data and the output model are stored on disk,
2861
+ # so the volume size must be large enough to hold both data sets. If not
2862
+ # specified or 0, Neptune ML selects a disk volume size based on the
2863
+ # recommendation generated in the data processing step.
2864
+ #
2865
+ # @option params [Array<String>] :subnets
2866
+ # The IDs of the subnets in the Neptune VPC. The default is None.
2867
+ #
2868
+ # @option params [Array<String>] :security_group_ids
2869
+ # The VPC security group IDs. The default is None.
2870
+ #
2871
+ # @option params [String] :volume_encryption_kms_key
2872
+ # The Amazon Key Management Service (KMS) key that SageMaker uses to
2873
+ # encrypt data on the storage volume attached to the ML compute
2874
+ # instances that run the training job. The default is None.
2875
+ #
2876
+ # @option params [String] :s3_output_encryption_kms_key
2877
+ # The Amazon Key Management Service (KMS) key that SageMaker uses to
2878
+ # encrypt the output of the processing job. The default is none.
2879
+ #
2880
+ # @return [Types::StartMLModelTransformJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2881
+ #
2882
+ # * {Types::StartMLModelTransformJobOutput#id #id} => String
2883
+ # * {Types::StartMLModelTransformJobOutput#arn #arn} => String
2884
+ # * {Types::StartMLModelTransformJobOutput#creation_time_in_millis #creation_time_in_millis} => Integer
2885
+ #
2886
+ # @example Request syntax with placeholder values
2887
+ #
2888
+ # resp = client.start_ml_model_transform_job({
2889
+ # id: "String",
2890
+ # data_processing_job_id: "String",
2891
+ # ml_model_training_job_id: "String",
2892
+ # training_job_name: "String",
2893
+ # model_transform_output_s3_location: "String", # required
2894
+ # sagemaker_iam_role_arn: "String",
2895
+ # neptune_iam_role_arn: "String",
2896
+ # custom_model_transform_parameters: {
2897
+ # source_s3_directory_path: "String", # required
2898
+ # transform_entry_point_script: "String",
2899
+ # },
2900
+ # base_processing_instance_type: "String",
2901
+ # base_processing_instance_volume_size_in_gb: 1,
2902
+ # subnets: ["String"],
2903
+ # security_group_ids: ["String"],
2904
+ # volume_encryption_kms_key: "String",
2905
+ # s3_output_encryption_kms_key: "String",
2906
+ # })
2907
+ #
2908
+ # @example Response structure
2909
+ #
2910
+ # resp.id #=> String
2911
+ # resp.arn #=> String
2912
+ # resp.creation_time_in_millis #=> Integer
2913
+ #
2914
+ # @see http://docs.aws.amazon.com/goto/WebAPI/neptunedata-2023-08-01/StartMLModelTransformJob AWS API Documentation
2915
+ #
2916
+ # @overload start_ml_model_transform_job(params = {})
2917
+ # @param [Hash] params ({})
2918
+ def start_ml_model_transform_job(params = {}, options = {})
2919
+ req = build_request(:start_ml_model_transform_job, params)
2920
+ req.send_request(options)
2921
+ end
2922
+
2923
+ # @!endgroup
2924
+
2925
+ # @param params ({})
2926
+ # @api private
2927
+ def build_request(operation_name, params = {})
2928
+ handlers = @handlers.for(operation_name)
2929
+ context = Seahorse::Client::RequestContext.new(
2930
+ operation_name: operation_name,
2931
+ operation: config.api.operation(operation_name),
2932
+ client: self,
2933
+ params: params,
2934
+ config: config)
2935
+ context[:gem_name] = 'aws-sdk-neptunedata'
2936
+ context[:gem_version] = '1.0.0'
2937
+ Seahorse::Client::Request.new(handlers, context)
2938
+ end
2939
+
2940
+ # @api private
2941
+ # @deprecated
2942
+ def waiter_names
2943
+ []
2944
+ end
2945
+
2946
+ class << self
2947
+
2948
+ # @api private
2949
+ attr_reader :identifier
2950
+
2951
+ # @api private
2952
+ def errors_module
2953
+ Errors
2954
+ end
2955
+
2956
+ end
2957
+ end
2958
+ end