google-cloud-bigquery 1.19.0 → 1.22.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -348,7 +348,71 @@ module Google
348
348
  end
349
349
 
350
350
  ###
351
- # Checks if the destination table will be time-partitioned. See
351
+ # Checks if the destination table will be range partitioned. See [Creating and using integer range partitioned
352
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
353
+ #
354
+ # @return [Boolean] `true` when the table is range partitioned, or `false` otherwise.
355
+ #
356
+ # @!group Attributes
357
+ #
358
+ def range_partitioning?
359
+ !@gapi.configuration.load.range_partitioning.nil?
360
+ end
361
+
362
+ ###
363
+ # The field on which the destination table will be range partitioned, if any. The field must be a
364
+ # top-level `NULLABLE/REQUIRED` field. The only supported type is `INTEGER/INT64`. See
365
+ # [Creating and using integer range partitioned
366
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
367
+ #
368
+ # @return [String, nil] The partition field, if a field was configured, or `nil` if not range partitioned.
369
+ #
370
+ # @!group Attributes
371
+ #
372
+ def range_partitioning_field
373
+ @gapi.configuration.load.range_partitioning.field if range_partitioning?
374
+ end
375
+
376
+ ###
377
+ # The start of range partitioning, inclusive. See [Creating and using integer range partitioned
378
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
379
+ #
380
+ # @return [Integer, nil] The start of range partitioning, inclusive, or `nil` if not range partitioned.
381
+ #
382
+ # @!group Attributes
383
+ #
384
+ def range_partitioning_start
385
+ @gapi.configuration.load.range_partitioning.range.start if range_partitioning?
386
+ end
387
+
388
+ ###
389
+ # The width of each interval. See [Creating and using integer range partitioned
390
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
391
+ #
392
+ # @return [Integer, nil] The width of each interval, for data in range partitions, or `nil` if not range
393
+ # partitioned.
394
+ #
395
+ # @!group Attributes
396
+ #
397
+ def range_partitioning_interval
398
+ return nil unless range_partitioning?
399
+ @gapi.configuration.load.range_partitioning.range.interval
400
+ end
401
+
402
+ ###
403
+ # The end of range partitioning, exclusive. See [Creating and using integer range partitioned
404
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
405
+ #
406
+ # @return [Integer, nil] The end of range partitioning, exclusive, or `nil` if not range partitioned.
407
+ #
408
+ # @!group Attributes
409
+ #
410
+ def range_partitioning_end
411
+ @gapi.configuration.load.range_partitioning.range.end if range_partitioning?
412
+ end
413
+
414
+ ###
415
+ # Checks if the destination table will be time partitioned. See
352
416
  # [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
353
417
  #
354
418
  # @return [Boolean, nil] `true` when the table will be time-partitioned,
@@ -361,10 +425,10 @@ module Google
361
425
  end
362
426
 
363
427
  ###
364
- # The period for which the destination table will be partitioned, if
428
+ # The period for which the destination table will be time partitioned, if
365
429
  # any. See [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
366
430
  #
367
- # @return [String, nil] The partition type. Currently the only supported
431
+ # @return [String, nil] The time partition type. Currently the only supported
368
432
  # value is "DAY", or `nil` if not present.
369
433
  #
370
434
  # @!group Attributes
@@ -374,13 +438,13 @@ module Google
374
438
  end
375
439
 
376
440
  ###
377
- # The field on which the destination table will be partitioned, if any.
378
- # If not set, the destination table will be partitioned by pseudo column
379
- # `_PARTITIONTIME`; if set, the table will be partitioned by this field.
441
+ # The field on which the destination table will be time partitioned, if any.
442
+ # If not set, the destination table will be time partitioned by pseudo column
443
+ # `_PARTITIONTIME`; if set, the table will be time partitioned by this field.
380
444
  # See [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
381
445
  #
382
- # @return [String, nil] The partition field, if a field was configured.
383
- # `nil` if not partitioned or not set (partitioned by pseudo column
446
+ # @return [String, nil] The time partition field, if a field was configured.
447
+ # `nil` if not time partitioned or not set (partitioned by pseudo column
384
448
  # '_PARTITIONTIME').
385
449
  #
386
450
  # @!group Attributes
@@ -390,12 +454,12 @@ module Google
390
454
  end
391
455
 
392
456
  ###
393
- # The expiration for the destination table partitions, if any, in
457
+ # The expiration for the destination table time partitions, if any, in
394
458
  # seconds. See [Partitioned
395
459
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
396
460
  #
397
461
  # @return [Integer, nil] The expiration time, in seconds, for data in
398
- # partitions, or `nil` if not present.
462
+ # time partitions, or `nil` if not present.
399
463
  #
400
464
  # @!group Attributes
401
465
  #
@@ -408,11 +472,11 @@ module Google
408
472
 
409
473
  ###
410
474
  # If set to true, queries over the destination table will require a
411
- # partition filter that can be used for partition elimination to be
475
+ # time partition filter that can be used for partition elimination to be
412
476
  # specified. See [Partitioned
413
477
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
414
478
  #
415
- # @return [Boolean] `true` when a partition filter will be required,
479
+ # @return [Boolean] `true` when a time partition filter will be required,
416
480
  # or `false` otherwise.
417
481
  #
418
482
  # @!group Attributes
@@ -1239,12 +1303,21 @@ module Google
1239
1303
  # Sets the labels to use for the load job.
1240
1304
  #
1241
1305
  # @param [Hash] val A hash of user-provided labels associated with
1242
- # the job. You can use these to organize and group your jobs. Label
1243
- # keys and values can be no longer than 63 characters, can only
1244
- # contain lowercase letters, numeric characters, underscores and
1245
- # dashes. International characters are allowed. Label values are
1246
- # optional. Label keys must start with a letter and each label in
1247
- # the list must have a different key.
1306
+ # the job. You can use these to organize and group your jobs.
1307
+ #
1308
+ # The labels applied to a resource must meet the following requirements:
1309
+ #
1310
+ # * Each resource can have multiple labels, up to a maximum of 64.
1311
+ # * Each label must be a key-value pair.
1312
+ # * Keys have a minimum length of 1 character and a maximum length of
1313
+ # 63 characters, and cannot be empty. Values can be empty, and have
1314
+ # a maximum length of 63 characters.
1315
+ # * Keys and values can contain only lowercase letters, numeric characters,
1316
+ # underscores, and dashes. All characters must use UTF-8 encoding, and
1317
+ # international characters are allowed.
1318
+ # * The key portion of a label must be unique. However, you can use the
1319
+ # same key with multiple resources.
1320
+ # * Keys must start with a lowercase letter or international character.
1248
1321
  #
1249
1322
  # @!group Attributes
1250
1323
  #
@@ -1253,14 +1326,180 @@ module Google
1253
1326
  end
1254
1327
 
1255
1328
  ##
1256
- # Sets the partitioning for the destination table. See [Partitioned
1329
+ # Sets the field on which to range partition the table. See [Creating and using integer range partitioned
1330
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1331
+ #
1332
+ # See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
1333
+ #
1334
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1335
+ # partitioning on an existing table.
1336
+ #
1337
+ # @param [String] field The range partition field. the destination table is partitioned by this
1338
+ # field. The field must be a top-level `NULLABLE/REQUIRED` field. The only supported
1339
+ # type is `INTEGER/INT64`.
1340
+ #
1341
+ # @example
1342
+ # require "google/cloud/bigquery"
1343
+ #
1344
+ # bigquery = Google::Cloud::Bigquery.new
1345
+ # dataset = bigquery.dataset "my_dataset"
1346
+ #
1347
+ # gs_url = "gs://my-bucket/file-name.csv"
1348
+ # load_job = dataset.load_job "my_new_table", gs_url do |job|
1349
+ # job.schema do |schema|
1350
+ # schema.integer "my_table_id", mode: :required
1351
+ # schema.string "my_table_data", mode: :required
1352
+ # end
1353
+ # job.range_partitioning_field = "my_table_id"
1354
+ # job.range_partitioning_start = 0
1355
+ # job.range_partitioning_interval = 10
1356
+ # job.range_partitioning_end = 100
1357
+ # end
1358
+ #
1359
+ # load_job.wait_until_done!
1360
+ # load_job.done? #=> true
1361
+ #
1362
+ # @!group Attributes
1363
+ #
1364
+ def range_partitioning_field= field
1365
+ @gapi.configuration.load.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1366
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1367
+ )
1368
+ @gapi.configuration.load.range_partitioning.field = field
1369
+ end
1370
+
1371
+ ##
1372
+ # Sets the start of range partitioning, inclusive, for the destination table. See [Creating and using integer
1373
+ # range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1374
+ #
1375
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1376
+ # partitioning on an existing table.
1377
+ #
1378
+ # See {#range_partitioning_field=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
1379
+ #
1380
+ # @param [Integer] range_start The start of range partitioning, inclusive.
1381
+ #
1382
+ # @example
1383
+ # require "google/cloud/bigquery"
1384
+ #
1385
+ # bigquery = Google::Cloud::Bigquery.new
1386
+ # dataset = bigquery.dataset "my_dataset"
1387
+ #
1388
+ # gs_url = "gs://my-bucket/file-name.csv"
1389
+ # load_job = dataset.load_job "my_new_table", gs_url do |job|
1390
+ # job.schema do |schema|
1391
+ # schema.integer "my_table_id", mode: :required
1392
+ # schema.string "my_table_data", mode: :required
1393
+ # end
1394
+ # job.range_partitioning_field = "my_table_id"
1395
+ # job.range_partitioning_start = 0
1396
+ # job.range_partitioning_interval = 10
1397
+ # job.range_partitioning_end = 100
1398
+ # end
1399
+ #
1400
+ # load_job.wait_until_done!
1401
+ # load_job.done? #=> true
1402
+ #
1403
+ # @!group Attributes
1404
+ #
1405
+ def range_partitioning_start= range_start
1406
+ @gapi.configuration.load.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1407
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1408
+ )
1409
+ @gapi.configuration.load.range_partitioning.range.start = range_start
1410
+ end
1411
+
1412
+ ##
1413
+ # Sets width of each interval for data in range partitions. See [Creating and using integer range partitioned
1414
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1415
+ #
1416
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1417
+ # partitioning on an existing table.
1418
+ #
1419
+ # See {#range_partitioning_field=}, {#range_partitioning_start=} and {#range_partitioning_end=}.
1420
+ #
1421
+ # @param [Integer] range_interval The width of each interval, for data in partitions.
1422
+ #
1423
+ # @example
1424
+ # require "google/cloud/bigquery"
1425
+ #
1426
+ # bigquery = Google::Cloud::Bigquery.new
1427
+ # dataset = bigquery.dataset "my_dataset"
1428
+ #
1429
+ # gs_url = "gs://my-bucket/file-name.csv"
1430
+ # load_job = dataset.load_job "my_new_table", gs_url do |job|
1431
+ # job.schema do |schema|
1432
+ # schema.integer "my_table_id", mode: :required
1433
+ # schema.string "my_table_data", mode: :required
1434
+ # end
1435
+ # job.range_partitioning_field = "my_table_id"
1436
+ # job.range_partitioning_start = 0
1437
+ # job.range_partitioning_interval = 10
1438
+ # job.range_partitioning_end = 100
1439
+ # end
1440
+ #
1441
+ # load_job.wait_until_done!
1442
+ # load_job.done? #=> true
1443
+ #
1444
+ # @!group Attributes
1445
+ #
1446
+ def range_partitioning_interval= range_interval
1447
+ @gapi.configuration.load.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1448
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1449
+ )
1450
+ @gapi.configuration.load.range_partitioning.range.interval = range_interval
1451
+ end
1452
+
1453
+ ##
1454
+ # Sets the end of range partitioning, exclusive, for the destination table. See [Creating and using integer
1455
+ # range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1456
+ #
1457
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1458
+ # partitioning on an existing table.
1459
+ #
1460
+ # See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_field=}.
1461
+ #
1462
+ # @param [Integer] range_end The end of range partitioning, exclusive.
1463
+ #
1464
+ # @example
1465
+ # require "google/cloud/bigquery"
1466
+ #
1467
+ # bigquery = Google::Cloud::Bigquery.new
1468
+ # dataset = bigquery.dataset "my_dataset"
1469
+ #
1470
+ # gs_url = "gs://my-bucket/file-name.csv"
1471
+ # load_job = dataset.load_job "my_new_table", gs_url do |job|
1472
+ # job.schema do |schema|
1473
+ # schema.integer "my_table_id", mode: :required
1474
+ # schema.string "my_table_data", mode: :required
1475
+ # end
1476
+ # job.range_partitioning_field = "my_table_id"
1477
+ # job.range_partitioning_start = 0
1478
+ # job.range_partitioning_interval = 10
1479
+ # job.range_partitioning_end = 100
1480
+ # end
1481
+ #
1482
+ # load_job.wait_until_done!
1483
+ # load_job.done? #=> true
1484
+ #
1485
+ # @!group Attributes
1486
+ #
1487
+ def range_partitioning_end= range_end
1488
+ @gapi.configuration.load.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1489
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1490
+ )
1491
+ @gapi.configuration.load.range_partitioning.range.end = range_end
1492
+ end
1493
+
1494
+ ##
1495
+ # Sets the time partitioning for the destination table. See [Partitioned
1257
1496
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
1258
1497
  #
1259
- # You can only set the partitioning field while creating a table.
1498
+ # You can only set the time partitioning field while creating a table.
1260
1499
  # BigQuery does not allow you to change partitioning on an existing
1261
1500
  # table.
1262
1501
  #
1263
- # @param [String] type The partition type. Currently the only
1502
+ # @param [String] type The time partition type. Currently the only
1264
1503
  # supported value is "DAY".
1265
1504
  #
1266
1505
  # @example
@@ -1285,20 +1524,20 @@ module Google
1285
1524
  end
1286
1525
 
1287
1526
  ##
1288
- # Sets the field on which to partition the destination table. If not
1289
- # set, the destination table is partitioned by pseudo column
1290
- # `_PARTITIONTIME`; if set, the table is partitioned by this field.
1527
+ # Sets the field on which to time partition the destination table. If not
1528
+ # set, the destination table is time partitioned by pseudo column
1529
+ # `_PARTITIONTIME`; if set, the table is time partitioned by this field.
1291
1530
  # See [Partitioned
1292
1531
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
1293
1532
  #
1294
- # The destination table must also be partitioned. See
1533
+ # The destination table must also be time partitioned. See
1295
1534
  # {#time_partitioning_type=}.
1296
1535
  #
1297
- # You can only set the partitioning field while creating a table.
1536
+ # You can only set the time partitioning field while creating a table.
1298
1537
  # BigQuery does not allow you to change partitioning on an existing
1299
1538
  # table.
1300
1539
  #
1301
- # @param [String] field The partition field. The field must be a
1540
+ # @param [String] field The time partition field. The field must be a
1302
1541
  # top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or
1303
1542
  # REQUIRED.
1304
1543
  #
@@ -1328,15 +1567,15 @@ module Google
1328
1567
  end
1329
1568
 
1330
1569
  ##
1331
- # Sets the partition expiration for the destination table. See
1570
+ # Sets the time partition expiration for the destination table. See
1332
1571
  # [Partitioned
1333
1572
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
1334
1573
  #
1335
- # The destination table must also be partitioned. See
1574
+ # The destination table must also be time partitioned. See
1336
1575
  # {#time_partitioning_type=}.
1337
1576
  #
1338
1577
  # @param [Integer] expiration An expiration time, in seconds,
1339
- # for data in partitions.
1578
+ # for data in time partitions.
1340
1579
  #
1341
1580
  # @example
1342
1581
  # require "google/cloud/bigquery"
@@ -1362,12 +1601,12 @@ module Google
1362
1601
 
1363
1602
  ##
1364
1603
  # If set to true, queries over the destination table will require a
1365
- # partition filter that can be used for partition elimination to be
1604
+ # time partition filter that can be used for time partition elimination to be
1366
1605
  # specified. See [Partitioned
1367
1606
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
1368
1607
  #
1369
1608
  # @param [Boolean] val Indicates if queries over the destination table
1370
- # will require a partition filter. The default value is `false`.
1609
+ # will require a time partition filter. The default value is `false`.
1371
1610
  #
1372
1611
  # @!group Attributes
1373
1612
  #
@@ -341,14 +341,19 @@ module Google
341
341
  # the update to comply with ETag-based optimistic concurrency control.
342
342
  #
343
343
  # @param [Hash<String, String>] new_labels A hash containing key/value
344
- # pairs.
345
- #
346
- # * Label keys and values can be no longer than 63 characters.
347
- # * Label keys and values can contain only lowercase letters, numbers,
348
- # underscores, hyphens, and international characters.
349
- # * Label keys and values cannot exceed 128 bytes in size.
350
- # * Label keys must begin with a letter.
351
- # * Label keys must be unique within a model.
344
+ # pairs. The labels applied to a resource must meet the following requirements:
345
+ #
346
+ # * Each resource can have multiple labels, up to a maximum of 64.
347
+ # * Each label must be a key-value pair.
348
+ # * Keys have a minimum length of 1 character and a maximum length of
349
+ # 63 characters, and cannot be empty. Values can be empty, and have
350
+ # a maximum length of 63 characters.
351
+ # * Keys and values can contain only lowercase letters, numeric characters,
352
+ # underscores, and dashes. All characters must use UTF-8 encoding, and
353
+ # international characters are allowed.
354
+ # * The key portion of a label must be unique. However, you can use the
355
+ # same key with multiple resources.
356
+ # * Keys must start with a lowercase letter or international character.
352
357
  #
353
358
  # @example
354
359
  # require "google/cloud/bigquery"
@@ -482,6 +487,146 @@ module Google
482
487
  Array @gapi_json[:trainingRuns]
483
488
  end
484
489
 
490
+ ##
491
+ # Exports the model to Google Cloud Storage asynchronously, immediately
492
+ # returning an {ExtractJob} that can be used to track the progress of the
493
+ # export job. The caller may poll the service by repeatedly calling
494
+ # {Job#reload!} and {Job#done?} to detect when the job is done, or
495
+ # simply block until the job is done by calling #{Job#wait_until_done!}.
496
+ # See also {#extract}.
497
+ #
498
+ # The geographic location for the job ("US", "EU", etc.) can be set via
499
+ # {ExtractJob::Updater#location=} in a block passed to this method. If
500
+ # the model is a full resource representation (see {#resource_full?}),
501
+ # the location of the job will automatically be set to the location of
502
+ # the model.
503
+ #
504
+ # @see https://cloud.google.com/bigquery-ml/docs/exporting-models
505
+ # Exporting models
506
+ #
507
+ # @param [String] extract_url The Google Storage URI to which BigQuery
508
+ # should extract the model. This value should be end in an object name
509
+ # prefix, since multiple objects will be exported.
510
+ # @param [String] format The exported file format. The default value is
511
+ # `ml_tf_saved_model`.
512
+ #
513
+ # The following values are supported:
514
+ #
515
+ # * `ml_tf_saved_model` - TensorFlow SavedModel
516
+ # * `ml_xgboost_booster` - XGBoost Booster
517
+ # @param [String] job_id A user-defined ID for the extract job. The ID
518
+ # must contain only letters (a-z, A-Z), numbers (0-9), underscores
519
+ # (_), or dashes (-). The maximum length is 1,024 characters. If
520
+ # `job_id` is provided, then `prefix` will not be used.
521
+ #
522
+ # See [Generating a job
523
+ # ID](https://cloud.google.com/bigquery/docs/managing-jobs#generate-jobid).
524
+ # @param [String] prefix A string, usually human-readable, that will be
525
+ # prepended to a generated value to produce a unique job ID. For
526
+ # example, the prefix `daily_import_job_` can be given to generate a
527
+ # job ID such as `daily_import_job_12vEDtMQ0mbp1Mo5Z7mzAFQJZazh`. The
528
+ # prefix must contain only letters (a-z, A-Z), numbers (0-9),
529
+ # underscores (_), or dashes (-). The maximum length of the entire ID
530
+ # is 1,024 characters. If `job_id` is provided, then `prefix` will not
531
+ # be used.
532
+ # @param [Hash] labels A hash of user-provided labels associated with
533
+ # the job. You can use these to organize and group your jobs.
534
+ #
535
+ # The labels applied to a resource must meet the following requirements:
536
+ #
537
+ # * Each resource can have multiple labels, up to a maximum of 64.
538
+ # * Each label must be a key-value pair.
539
+ # * Keys have a minimum length of 1 character and a maximum length of
540
+ # 63 characters, and cannot be empty. Values can be empty, and have
541
+ # a maximum length of 63 characters.
542
+ # * Keys and values can contain only lowercase letters, numeric characters,
543
+ # underscores, and dashes. All characters must use UTF-8 encoding, and
544
+ # international characters are allowed.
545
+ # * The key portion of a label must be unique. However, you can use the
546
+ # same key with multiple resources.
547
+ # * Keys must start with a lowercase letter or international character.
548
+ #
549
+ # @yield [job] a job configuration object
550
+ # @yieldparam [Google::Cloud::Bigquery::ExtractJob::Updater] job a job
551
+ # configuration object for setting additional options.
552
+ #
553
+ # @return [Google::Cloud::Bigquery::ExtractJob]
554
+ #
555
+ # @example
556
+ # require "google/cloud/bigquery"
557
+ #
558
+ # bigquery = Google::Cloud::Bigquery.new
559
+ # dataset = bigquery.dataset "my_dataset"
560
+ # model = dataset.model "my_model"
561
+ #
562
+ # extract_job = model.extract_job "gs://my-bucket/#{model.model_id}"
563
+ #
564
+ # extract_job.wait_until_done!
565
+ # extract_job.done? #=> true
566
+ #
567
+ # @!group Data
568
+ #
569
+ def extract_job extract_url, format: nil, job_id: nil, prefix: nil, labels: nil
570
+ ensure_service!
571
+ options = { format: format, job_id: job_id, prefix: prefix, labels: labels }
572
+ updater = ExtractJob::Updater.from_options service, model_ref, extract_url, options
573
+ updater.location = location if location # may be model reference
574
+
575
+ yield updater if block_given?
576
+
577
+ job_gapi = updater.to_gapi
578
+ gapi = service.extract_table job_gapi
579
+ Job.from_gapi gapi, service
580
+ end
581
+
582
+ ##
583
+ # Exports the model to Google Cloud Storage using a synchronous method
584
+ # that blocks for a response. Timeouts and transient errors are generally
585
+ # handled as needed to complete the job. See also {#extract_job}.
586
+ #
587
+ # The geographic location for the job ("US", "EU", etc.) can be set via
588
+ # {ExtractJob::Updater#location=} in a block passed to this method. If
589
+ # the model is a full resource representation (see {#resource_full?}),
590
+ # the location of the job will automatically be set to the location of
591
+ # the model.
592
+ #
593
+ # @see https://cloud.google.com/bigquery-ml/docs/exporting-models
594
+ # Exporting models
595
+ #
596
+ # @param [String] extract_url The Google Storage URI to which BigQuery
597
+ # should extract the model. This value should be end in an object name
598
+ # prefix, since multiple objects will be exported.
599
+ # @param [String] format The exported file format. The default value is
600
+ # `ml_tf_saved_model`.
601
+ #
602
+ # The following values are supported:
603
+ #
604
+ # * `ml_tf_saved_model` - TensorFlow SavedModel
605
+ # * `ml_xgboost_booster` - XGBoost Booster
606
+ # @yield [job] a job configuration object
607
+ # @yieldparam [Google::Cloud::Bigquery::ExtractJob::Updater] job a job
608
+ # configuration object for setting additional options.
609
+ #
610
+ # @return [Boolean] Returns `true` if the extract operation succeeded.
611
+ #
612
+ # @example
613
+ # require "google/cloud/bigquery"
614
+ #
615
+ # bigquery = Google::Cloud::Bigquery.new
616
+ # dataset = bigquery.dataset "my_dataset"
617
+ # model = dataset.model "my_model"
618
+ #
619
+ # model.extract "gs://my-bucket/#{model.model_id}"
620
+ #
621
+ # @!group Data
622
+ #
623
+ def extract extract_url, format: nil, &block
624
+ job = extract_job extract_url, format: format, &block
625
+ job.wait_until_done!
626
+ ensure_job_succeeded! job
627
+ true
628
+ end
629
+
485
630
  ##
486
631
  # Permanently deletes the model.
487
632
  #
@@ -734,6 +879,17 @@ module Google
734
879
  def ensure_full_data!
735
880
  reload! unless resource_full?
736
881
  end
882
+
883
+ def ensure_job_succeeded! job
884
+ return unless job.failed?
885
+ begin
886
+ # raise to activate ruby exception cause handling
887
+ raise job.gapi_error
888
+ rescue StandardError => e
889
+ # wrap Google::Apis::Error with Google::Cloud::Error
890
+ raise Google::Cloud::Error.from_error(e)
891
+ end
892
+ end
737
893
  end
738
894
  end
739
895
  end