@azure/storage-file-share 12.14.0 → 12.20.0-alpha.20230607.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +5739 -6320
- package/dist/index.js.map +1 -1
- package/dist-esm/storage-blob/src/BatchResponse.js +4 -0
- package/dist-esm/storage-blob/src/BatchResponse.js.map +1 -0
- package/dist-esm/storage-blob/src/BatchResponseParser.js +137 -0
- package/dist-esm/storage-blob/src/BatchResponseParser.js.map +1 -0
- package/dist-esm/storage-blob/src/BatchUtils.browser.js +11 -0
- package/dist-esm/storage-blob/src/BatchUtils.browser.js.map +1 -0
- package/dist-esm/storage-blob/src/BatchUtils.js +15 -0
- package/dist-esm/storage-blob/src/BatchUtils.js.map +1 -0
- package/dist-esm/storage-blob/src/BlobBatch.js +267 -0
- package/dist-esm/storage-blob/src/BlobBatch.js.map +1 -0
- package/dist-esm/storage-blob/src/BlobBatchClient.js +140 -0
- package/dist-esm/storage-blob/src/BlobBatchClient.js.map +1 -0
- package/dist-esm/storage-blob/src/BlobDownloadResponse.browser.js +7 -0
- package/dist-esm/storage-blob/src/BlobDownloadResponse.browser.js.map +1 -0
- package/dist-esm/storage-blob/src/BlobDownloadResponse.js +455 -0
- package/dist-esm/storage-blob/src/BlobDownloadResponse.js.map +1 -0
- package/dist-esm/storage-blob/src/BlobLeaseClient.js +192 -0
- package/dist-esm/storage-blob/src/BlobLeaseClient.js.map +1 -0
- package/dist-esm/storage-blob/src/BlobQueryResponse.browser.js +362 -0
- package/dist-esm/storage-blob/src/BlobQueryResponse.browser.js.map +1 -0
- package/dist-esm/storage-blob/src/BlobQueryResponse.js +367 -0
- package/dist-esm/storage-blob/src/BlobQueryResponse.js.map +1 -0
- package/dist-esm/storage-blob/src/BlobServiceClient.js +702 -0
- package/dist-esm/storage-blob/src/BlobServiceClient.js.map +1 -0
- package/dist-esm/storage-blob/src/Clients.js +2527 -0
- package/dist-esm/storage-blob/src/Clients.js.map +1 -0
- package/dist-esm/storage-blob/src/ContainerClient.js +1161 -0
- package/dist-esm/storage-blob/src/ContainerClient.js.map +1 -0
- package/dist-esm/storage-blob/src/PageBlobRangeResponse.js +24 -0
- package/dist-esm/storage-blob/src/PageBlobRangeResponse.js.map +1 -0
- package/dist-esm/storage-blob/src/Pipeline.js +259 -0
- package/dist-esm/storage-blob/src/Pipeline.js.map +1 -0
- package/dist-esm/storage-blob/src/Range.js +21 -0
- package/dist-esm/storage-blob/src/Range.js.map +1 -0
- package/dist-esm/{src → storage-blob/src}/StorageBrowserPolicyFactory.js +1 -1
- package/dist-esm/storage-blob/src/StorageBrowserPolicyFactory.js.map +1 -0
- package/dist-esm/storage-blob/src/StorageClient.js +29 -0
- package/dist-esm/storage-blob/src/StorageClient.js.map +1 -0
- package/dist-esm/storage-blob/src/StorageContextClient.js +17 -0
- package/dist-esm/storage-blob/src/StorageContextClient.js.map +1 -0
- package/dist-esm/{src → storage-blob/src}/StorageRetryPolicyFactory.js +2 -1
- package/dist-esm/storage-blob/src/StorageRetryPolicyFactory.js.map +1 -0
- package/dist-esm/{src → storage-blob/src}/credentials/AnonymousCredential.js +1 -1
- package/dist-esm/storage-blob/src/credentials/AnonymousCredential.js.map +1 -0
- package/dist-esm/storage-blob/src/credentials/Credential.js.map +1 -0
- package/dist-esm/storage-blob/src/credentials/StorageSharedKeyCredential.browser.js.map +1 -0
- package/dist-esm/{src → storage-blob/src}/credentials/StorageSharedKeyCredential.js +1 -1
- package/dist-esm/storage-blob/src/credentials/StorageSharedKeyCredential.js.map +1 -0
- package/dist-esm/storage-blob/src/credentials/UserDelegationKeyCredential.browser.js +5 -0
- package/dist-esm/storage-blob/src/credentials/UserDelegationKeyCredential.browser.js.map +1 -0
- package/dist-esm/storage-blob/src/credentials/UserDelegationKeyCredential.js +31 -0
- package/dist-esm/storage-blob/src/credentials/UserDelegationKeyCredential.js.map +1 -0
- package/dist-esm/{src → storage-blob/src}/generated/src/index.js +1 -1
- package/dist-esm/storage-blob/src/generated/src/index.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/models/index.js +256 -0
- package/dist-esm/storage-blob/src/generated/src/models/index.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/models/mappers.js +8196 -0
- package/dist-esm/storage-blob/src/generated/src/models/mappers.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/models/parameters.js +1610 -0
- package/dist-esm/storage-blob/src/generated/src/models/parameters.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operations/appendBlob.js +221 -0
- package/dist-esm/storage-blob/src/generated/src/operations/appendBlob.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operations/blob.js +997 -0
- package/dist-esm/storage-blob/src/generated/src/operations/blob.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operations/blockBlob.js +365 -0
- package/dist-esm/storage-blob/src/generated/src/operations/blockBlob.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operations/container.js +705 -0
- package/dist-esm/storage-blob/src/generated/src/operations/container.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operations/index.js +14 -0
- package/dist-esm/storage-blob/src/generated/src/operations/index.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operations/pageBlob.js +456 -0
- package/dist-esm/storage-blob/src/generated/src/operations/pageBlob.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operations/service.js +315 -0
- package/dist-esm/storage-blob/src/generated/src/operations/service.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/appendBlob.js +9 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/appendBlob.js.map +1 -0
- package/dist-esm/{src/generated/src/models/index.js → storage-blob/src/generated/src/operationsInterfaces/blob.js} +1 -1
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/blob.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/blockBlob.js +9 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/blockBlob.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/container.js +9 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/container.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/index.js +14 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/index.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/pageBlob.js +9 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/pageBlob.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/service.js +9 -0
- package/dist-esm/storage-blob/src/generated/src/operationsInterfaces/service.js.map +1 -0
- package/dist-esm/storage-blob/src/generated/src/storageClient.js +49 -0
- package/dist-esm/storage-blob/src/generated/src/storageClient.js.map +1 -0
- package/dist-esm/storage-blob/src/generatedModels.js +4 -0
- package/dist-esm/storage-blob/src/generatedModels.js.map +1 -0
- package/dist-esm/storage-blob/src/index.browser.js +23 -0
- package/dist-esm/storage-blob/src/index.browser.js.map +1 -0
- package/dist-esm/storage-blob/src/index.js +33 -0
- package/dist-esm/storage-blob/src/index.js.map +1 -0
- package/dist-esm/storage-blob/src/log.js +8 -0
- package/dist-esm/storage-blob/src/log.js.map +1 -0
- package/dist-esm/storage-blob/src/models.js +108 -0
- package/dist-esm/storage-blob/src/models.js.map +1 -0
- package/dist-esm/storage-blob/src/policies/AnonymousCredentialPolicy.js.map +1 -0
- package/dist-esm/{src → storage-blob/src}/policies/CredentialPolicy.js +1 -1
- package/dist-esm/storage-blob/src/policies/CredentialPolicy.js.map +1 -0
- package/dist-esm/storage-blob/src/policies/RequestPolicy.js +40 -0
- package/dist-esm/storage-blob/src/policies/RequestPolicy.js.map +1 -0
- package/dist-esm/{src → storage-blob/src}/policies/StorageBrowserPolicy.js +2 -1
- package/dist-esm/{src → storage-blob/src}/policies/StorageBrowserPolicy.js.map +1 -1
- package/dist-esm/storage-blob/src/policies/StorageBrowserPolicyV2.js +31 -0
- package/dist-esm/storage-blob/src/policies/StorageBrowserPolicyV2.js.map +1 -0
- package/dist-esm/{src → storage-blob/src}/policies/StorageRetryPolicy.js +15 -6
- package/dist-esm/storage-blob/src/policies/StorageRetryPolicy.js.map +1 -0
- package/dist-esm/storage-blob/src/policies/StorageRetryPolicyV2.js +165 -0
- package/dist-esm/storage-blob/src/policies/StorageRetryPolicyV2.js.map +1 -0
- package/dist-esm/storage-blob/src/policies/StorageSharedKeyCredentialPolicy.js.map +1 -0
- package/dist-esm/storage-blob/src/policies/StorageSharedKeyCredentialPolicyV2.browser.js +18 -0
- package/dist-esm/storage-blob/src/policies/StorageSharedKeyCredentialPolicyV2.browser.js.map +1 -0
- package/dist-esm/storage-blob/src/policies/StorageSharedKeyCredentialPolicyV2.js +131 -0
- package/dist-esm/storage-blob/src/policies/StorageSharedKeyCredentialPolicyV2.js.map +1 -0
- package/dist-esm/storage-blob/src/pollers/BlobStartCopyFromUrlPoller.js +130 -0
- package/dist-esm/storage-blob/src/pollers/BlobStartCopyFromUrlPoller.js.map +1 -0
- package/dist-esm/storage-blob/src/sas/AccountSASPermissions.js +227 -0
- package/dist-esm/storage-blob/src/sas/AccountSASPermissions.js.map +1 -0
- package/dist-esm/storage-blob/src/sas/AccountSASResourceTypes.js.map +1 -0
- package/dist-esm/storage-blob/src/sas/AccountSASServices.js.map +1 -0
- package/dist-esm/storage-blob/src/sas/AccountSASSignatureValues.js +93 -0
- package/dist-esm/storage-blob/src/sas/AccountSASSignatureValues.js.map +1 -0
- package/dist-esm/storage-blob/src/sas/BlobSASPermissions.js +195 -0
- package/dist-esm/storage-blob/src/sas/BlobSASPermissions.js.map +1 -0
- package/dist-esm/storage-blob/src/sas/BlobSASSignatureValues.js +555 -0
- package/dist-esm/storage-blob/src/sas/BlobSASSignatureValues.js.map +1 -0
- package/dist-esm/storage-blob/src/sas/ContainerSASPermissions.js +221 -0
- package/dist-esm/storage-blob/src/sas/ContainerSASPermissions.js.map +1 -0
- package/dist-esm/storage-blob/src/sas/SASQueryParameters.js +234 -0
- package/dist-esm/storage-blob/src/sas/SASQueryParameters.js.map +1 -0
- package/dist-esm/storage-blob/src/sas/SasIPRange.js +13 -0
- package/dist-esm/storage-blob/src/sas/SasIPRange.js.map +1 -0
- package/dist-esm/storage-blob/src/utils/Batch.js.map +1 -0
- package/dist-esm/storage-blob/src/utils/BlobQuickQueryStream.js +111 -0
- package/dist-esm/storage-blob/src/utils/BlobQuickQueryStream.js.map +1 -0
- package/dist-esm/storage-blob/src/utils/Mutex.js +66 -0
- package/dist-esm/storage-blob/src/utils/Mutex.js.map +1 -0
- package/dist-esm/storage-blob/src/utils/RetriableReadableStream.js +119 -0
- package/dist-esm/storage-blob/src/utils/RetriableReadableStream.js.map +1 -0
- package/dist-esm/storage-blob/src/utils/cache.js +11 -0
- package/dist-esm/storage-blob/src/utils/cache.js.map +1 -0
- package/dist-esm/storage-blob/src/utils/constants.js +223 -0
- package/dist-esm/storage-blob/src/utils/constants.js.map +1 -0
- package/dist-esm/storage-blob/src/utils/tracing.js +14 -0
- package/dist-esm/storage-blob/src/utils/tracing.js.map +1 -0
- package/dist-esm/storage-blob/src/utils/utils.browser.js +48 -0
- package/dist-esm/storage-blob/src/utils/utils.browser.js.map +1 -0
- package/dist-esm/storage-blob/src/utils/utils.common.js +754 -0
- package/dist-esm/storage-blob/src/utils/utils.common.js.map +1 -0
- package/dist-esm/storage-blob/src/utils/utils.node.js +132 -0
- package/dist-esm/storage-blob/src/utils/utils.node.js.map +1 -0
- package/dist-esm/storage-file-share/src/AccountSASPermissions.js.map +1 -0
- package/dist-esm/storage-file-share/src/AccountSASResourceTypes.js +72 -0
- package/dist-esm/storage-file-share/src/AccountSASResourceTypes.js.map +1 -0
- package/dist-esm/storage-file-share/src/AccountSASServices.js +80 -0
- package/dist-esm/storage-file-share/src/AccountSASServices.js.map +1 -0
- package/dist-esm/storage-file-share/src/AccountSASSignatureValues.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/Clients.js +476 -1253
- package/dist-esm/storage-file-share/src/Clients.js.map +1 -0
- package/dist-esm/storage-file-share/src/FileDownloadResponse.browser.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/FileDownloadResponse.js +15 -14
- package/dist-esm/storage-file-share/src/FileDownloadResponse.js.map +1 -0
- package/dist-esm/storage-file-share/src/FileSASPermissions.js.map +1 -0
- package/dist-esm/storage-file-share/src/FileSASSignatureValues.js.map +1 -0
- package/dist-esm/storage-file-share/src/FileSystemAttributes.js.map +1 -0
- package/dist-esm/storage-file-share/src/Range.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/SASQueryParameters.js +14 -14
- package/dist-esm/storage-file-share/src/SASQueryParameters.js.map +1 -0
- package/dist-esm/storage-file-share/src/SasIPRange.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/ShareClientInternal.js +1 -2
- package/dist-esm/storage-file-share/src/ShareClientInternal.js.map +1 -0
- package/dist-esm/storage-file-share/src/ShareSASPermissions.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/ShareServiceClient.js +58 -121
- package/dist-esm/storage-file-share/src/ShareServiceClient.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/StorageClient.js +20 -14
- package/dist-esm/storage-file-share/src/StorageClient.js.map +1 -0
- package/dist-esm/storage-file-share/src/StorageContextClient.js +18 -0
- package/dist-esm/storage-file-share/src/StorageContextClient.js.map +1 -0
- package/dist-esm/storage-file-share/src/generated/src/index.js +11 -0
- package/dist-esm/storage-file-share/src/generated/src/index.js.map +1 -0
- package/dist-esm/storage-file-share/src/generated/src/models/index.js +144 -0
- package/dist-esm/storage-file-share/src/generated/src/models/index.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/generated/src/models/mappers.js +16 -49
- package/dist-esm/storage-file-share/src/generated/src/models/mappers.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/generated/src/models/parameters.js +17 -37
- package/dist-esm/storage-file-share/src/generated/src/models/parameters.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/generated/src/operations/directory.js +17 -73
- package/dist-esm/storage-file-share/src/generated/src/operations/directory.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/generated/src/operations/file.js +33 -142
- package/dist-esm/storage-file-share/src/generated/src/operations/file.js.map +1 -0
- package/dist-esm/storage-file-share/src/generated/src/operations/index.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/generated/src/operations/service.js +7 -17
- package/dist-esm/storage-file-share/src/generated/src/operations/service.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/generated/src/operations/share.js +26 -85
- package/dist-esm/storage-file-share/src/generated/src/operations/share.js.map +1 -0
- package/dist-esm/storage-file-share/src/generated/src/operationsInterfaces/directory.js +9 -0
- package/dist-esm/storage-file-share/src/generated/src/operationsInterfaces/directory.js.map +1 -0
- package/dist-esm/storage-file-share/src/generated/src/operationsInterfaces/file.js +9 -0
- package/dist-esm/storage-file-share/src/generated/src/operationsInterfaces/file.js.map +1 -0
- package/dist-esm/storage-file-share/src/generated/src/operationsInterfaces/index.js +12 -0
- package/dist-esm/storage-file-share/src/generated/src/operationsInterfaces/index.js.map +1 -0
- package/dist-esm/storage-file-share/src/generated/src/operationsInterfaces/service.js +9 -0
- package/dist-esm/storage-file-share/src/generated/src/operationsInterfaces/service.js.map +1 -0
- package/dist-esm/storage-file-share/src/generated/src/operationsInterfaces/share.js +9 -0
- package/dist-esm/storage-file-share/src/generated/src/operationsInterfaces/share.js.map +1 -0
- package/dist-esm/storage-file-share/src/generated/src/storageClient.js +48 -0
- package/dist-esm/storage-file-share/src/generated/src/storageClient.js.map +1 -0
- package/dist-esm/storage-file-share/src/generatedModels.js +4 -0
- package/dist-esm/storage-file-share/src/generatedModels.js.map +1 -0
- package/dist-esm/storage-file-share/src/index.browser.js +18 -0
- package/dist-esm/storage-file-share/src/index.browser.js.map +1 -0
- package/dist-esm/storage-file-share/src/index.js +28 -0
- package/dist-esm/storage-file-share/src/index.js.map +1 -0
- package/dist-esm/storage-file-share/src/log.js.map +1 -0
- package/dist-esm/storage-file-share/src/models.js.map +1 -0
- package/dist-esm/storage-file-share/src/utils/Batch.js +122 -0
- package/dist-esm/storage-file-share/src/utils/Batch.js.map +1 -0
- package/dist-esm/storage-file-share/src/utils/BufferScheduler.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/utils/RetriableReadableStream.js +51 -48
- package/dist-esm/storage-file-share/src/utils/RetriableReadableStream.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/utils/constants.js +2 -6
- package/dist-esm/storage-file-share/src/utils/constants.js.map +1 -0
- package/dist-esm/storage-file-share/src/utils/tracing.js +14 -0
- package/dist-esm/storage-file-share/src/utils/tracing.js.map +1 -0
- package/dist-esm/storage-file-share/src/utils/utils.browser.js.map +1 -0
- package/dist-esm/{src → storage-file-share/src}/utils/utils.common.js +82 -95
- package/dist-esm/storage-file-share/src/utils/utils.common.js.map +1 -0
- package/dist-esm/storage-file-share/src/utils/utils.node.js.map +1 -0
- package/package.json +31 -25
- package/types/3.1/storage-file-share.d.ts +1454 -529
- package/types/latest/storage-file-share.d.ts +1580 -549
- package/dist-esm/src/AccountSASPermissions.js.map +0 -1
- package/dist-esm/src/AccountSASResourceTypes.js.map +0 -1
- package/dist-esm/src/AccountSASServices.js.map +0 -1
- package/dist-esm/src/AccountSASSignatureValues.js.map +0 -1
- package/dist-esm/src/Clients.js.map +0 -1
- package/dist-esm/src/FileDownloadResponse.browser.js.map +0 -1
- package/dist-esm/src/FileDownloadResponse.js.map +0 -1
- package/dist-esm/src/FileSASPermissions.js.map +0 -1
- package/dist-esm/src/FileSASSignatureValues.js.map +0 -1
- package/dist-esm/src/FileSystemAttributes.js.map +0 -1
- package/dist-esm/src/Pipeline.js +0 -88
- package/dist-esm/src/Pipeline.js.map +0 -1
- package/dist-esm/src/Range.js.map +0 -1
- package/dist-esm/src/SASQueryParameters.js.map +0 -1
- package/dist-esm/src/SasIPRange.js.map +0 -1
- package/dist-esm/src/ShareClientInternal.js.map +0 -1
- package/dist-esm/src/ShareSASPermissions.js.map +0 -1
- package/dist-esm/src/ShareServiceClient.js.map +0 -1
- package/dist-esm/src/StorageBrowserPolicyFactory.js.map +0 -1
- package/dist-esm/src/StorageClient.js.map +0 -1
- package/dist-esm/src/StorageRetryPolicyFactory.js.map +0 -1
- package/dist-esm/src/TelemetryPolicyFactory.js +0 -50
- package/dist-esm/src/TelemetryPolicyFactory.js.map +0 -1
- package/dist-esm/src/credentials/AnonymousCredential.js.map +0 -1
- package/dist-esm/src/credentials/Credential.js.map +0 -1
- package/dist-esm/src/credentials/StorageSharedKeyCredential.browser.js.map +0 -1
- package/dist-esm/src/credentials/StorageSharedKeyCredential.js.map +0 -1
- package/dist-esm/src/generated/src/index.js.map +0 -1
- package/dist-esm/src/generated/src/models/index.js.map +0 -1
- package/dist-esm/src/generated/src/models/mappers.js.map +0 -1
- package/dist-esm/src/generated/src/models/parameters.js.map +0 -1
- package/dist-esm/src/generated/src/operations/directory.js.map +0 -1
- package/dist-esm/src/generated/src/operations/file.js.map +0 -1
- package/dist-esm/src/generated/src/operations/index.js.map +0 -1
- package/dist-esm/src/generated/src/operations/service.js.map +0 -1
- package/dist-esm/src/generated/src/operations/share.js.map +0 -1
- package/dist-esm/src/generated/src/storageClient.js +0 -25
- package/dist-esm/src/generated/src/storageClient.js.map +0 -1
- package/dist-esm/src/generated/src/storageClientContext.js +0 -40
- package/dist-esm/src/generated/src/storageClientContext.js.map +0 -1
- package/dist-esm/src/generatedModels.js +0 -8
- package/dist-esm/src/generatedModels.js.map +0 -1
- package/dist-esm/src/index.browser.js +0 -17
- package/dist-esm/src/index.browser.js.map +0 -1
- package/dist-esm/src/index.js +0 -27
- package/dist-esm/src/index.js.map +0 -1
- package/dist-esm/src/log.js.map +0 -1
- package/dist-esm/src/models.js.map +0 -1
- package/dist-esm/src/policies/AnonymousCredentialPolicy.js.map +0 -1
- package/dist-esm/src/policies/CredentialPolicy.js.map +0 -1
- package/dist-esm/src/policies/StorageRetryPolicy.js.map +0 -1
- package/dist-esm/src/policies/StorageSharedKeyCredentialPolicy.js.map +0 -1
- package/dist-esm/src/policies/TelemetryPolicy.js +0 -36
- package/dist-esm/src/policies/TelemetryPolicy.js.map +0 -1
- package/dist-esm/src/utils/Batch.js.map +0 -1
- package/dist-esm/src/utils/BufferScheduler.js.map +0 -1
- package/dist-esm/src/utils/RetriableReadableStream.js.map +0 -1
- package/dist-esm/src/utils/cache.js +0 -8
- package/dist-esm/src/utils/cache.js.map +0 -1
- package/dist-esm/src/utils/constants.js.map +0 -1
- package/dist-esm/src/utils/tracing.js +0 -27
- package/dist-esm/src/utils/tracing.js.map +0 -1
- package/dist-esm/src/utils/utils.browser.js.map +0 -1
- package/dist-esm/src/utils/utils.common.js.map +0 -1
- package/dist-esm/src/utils/utils.node.js.map +0 -1
- /package/dist-esm/{src → storage-blob/src}/credentials/Credential.js +0 -0
- /package/dist-esm/{src → storage-blob/src}/credentials/StorageSharedKeyCredential.browser.js +0 -0
- /package/dist-esm/{src → storage-blob/src}/policies/AnonymousCredentialPolicy.js +0 -0
- /package/dist-esm/{src → storage-blob/src}/policies/StorageSharedKeyCredentialPolicy.js +0 -0
- /package/dist-esm/{src → storage-blob/src/sas}/AccountSASResourceTypes.js +0 -0
- /package/dist-esm/{src → storage-blob/src/sas}/AccountSASServices.js +0 -0
- /package/dist-esm/{src → storage-blob/src}/utils/Batch.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/AccountSASPermissions.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/AccountSASSignatureValues.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/FileDownloadResponse.browser.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/FileSASPermissions.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/FileSASSignatureValues.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/FileSystemAttributes.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/Range.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/SasIPRange.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/ShareSASPermissions.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/generated/src/operations/index.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/log.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/models.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/utils/BufferScheduler.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/utils/utils.browser.js +0 -0
- /package/dist-esm/{src → storage-file-share/src}/utils/utils.node.js +0 -0
@@ -0,0 +1,2527 @@
|
|
1
|
+
import { __asyncDelegator, __asyncGenerator, __asyncValues, __await } from "tslib";
|
2
|
+
import { getDefaultProxySettings, } from "@azure/core-rest-pipeline";
|
3
|
+
import { isTokenCredential } from "@azure/core-auth";
|
4
|
+
import { isNode } from "@azure/core-util";
|
5
|
+
import { v4 as generateUuid } from "uuid";
|
6
|
+
import { BlobDownloadResponse } from "./BlobDownloadResponse";
|
7
|
+
import { BlobQueryResponse } from "./BlobQueryResponse";
|
8
|
+
import { AnonymousCredential } from "./credentials/AnonymousCredential";
|
9
|
+
import { StorageSharedKeyCredential } from "./credentials/StorageSharedKeyCredential";
|
10
|
+
import { ensureCpkIfSpecified, toAccessTier, } from "./models";
|
11
|
+
import { rangeResponseFromModel, } from "./PageBlobRangeResponse";
|
12
|
+
import { newPipeline, isPipelineLike } from "./Pipeline";
|
13
|
+
import { BlobBeginCopyFromUrlPoller, } from "./pollers/BlobStartCopyFromUrlPoller";
|
14
|
+
import { rangeToString } from "./Range";
|
15
|
+
import { StorageClient } from "./StorageClient";
|
16
|
+
import { Batch } from "./utils/Batch";
|
17
|
+
import { BufferScheduler } from "../../storage-common/src";
|
18
|
+
import { BlobDoesNotUseCustomerSpecifiedEncryption, BlobUsesCustomerSpecifiedEncryptionMsg, BLOCK_BLOB_MAX_BLOCKS, BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES, BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES, DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES, DEFAULT_BLOCK_BUFFER_SIZE_BYTES, DEFAULT_MAX_DOWNLOAD_RETRY_REQUESTS, ETagAny, URLConstants, } from "./utils/constants";
|
19
|
+
import { tracingClient } from "./utils/tracing";
|
20
|
+
import { appendToURLPath, appendToURLQuery, assertResponse, extractConnectionStringParts, ExtractPageRangeInfoItems, generateBlockID, getURLParameter, httpAuthorizationToString, isIpEndpointStyle, parseObjectReplicationRecord, setURLParameter, toBlobTags, toBlobTagsString, toQuerySerialization, toTags, } from "./utils/utils.common";
|
21
|
+
import { fsCreateReadStream, fsStat, readStreamToLocalFile, streamToBuffer, } from "./utils/utils.node";
|
22
|
+
import { generateBlobSASQueryParameters } from "./sas/BlobSASSignatureValues";
|
23
|
+
import { BlobLeaseClient } from "./BlobLeaseClient";
|
24
|
+
/**
|
25
|
+
* A BlobClient represents a URL to an Azure Storage blob; the blob may be a block blob,
|
26
|
+
* append blob, or page blob.
|
27
|
+
*/
|
28
|
+
export class BlobClient extends StorageClient {
|
29
|
+
/**
|
30
|
+
* The name of the blob.
|
31
|
+
*/
|
32
|
+
get name() {
|
33
|
+
return this._name;
|
34
|
+
}
|
35
|
+
/**
|
36
|
+
* The name of the storage container the blob is associated with.
|
37
|
+
*/
|
38
|
+
get containerName() {
|
39
|
+
return this._containerName;
|
40
|
+
}
|
41
|
+
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
42
|
+
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
43
|
+
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
44
|
+
options) {
|
45
|
+
options = options || {};
|
46
|
+
let pipeline;
|
47
|
+
let url;
|
48
|
+
if (isPipelineLike(credentialOrPipelineOrContainerName)) {
|
49
|
+
// (url: string, pipeline: Pipeline)
|
50
|
+
url = urlOrConnectionString;
|
51
|
+
pipeline = credentialOrPipelineOrContainerName;
|
52
|
+
}
|
53
|
+
else if ((isNode && credentialOrPipelineOrContainerName instanceof StorageSharedKeyCredential) ||
|
54
|
+
credentialOrPipelineOrContainerName instanceof AnonymousCredential ||
|
55
|
+
isTokenCredential(credentialOrPipelineOrContainerName)) {
|
56
|
+
// (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions)
|
57
|
+
url = urlOrConnectionString;
|
58
|
+
options = blobNameOrOptions;
|
59
|
+
pipeline = newPipeline(credentialOrPipelineOrContainerName, options);
|
60
|
+
}
|
61
|
+
else if (!credentialOrPipelineOrContainerName &&
|
62
|
+
typeof credentialOrPipelineOrContainerName !== "string") {
|
63
|
+
// (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions)
|
64
|
+
// The second parameter is undefined. Use anonymous credential.
|
65
|
+
url = urlOrConnectionString;
|
66
|
+
pipeline = newPipeline(new AnonymousCredential(), options);
|
67
|
+
}
|
68
|
+
else if (credentialOrPipelineOrContainerName &&
|
69
|
+
typeof credentialOrPipelineOrContainerName === "string" &&
|
70
|
+
blobNameOrOptions &&
|
71
|
+
typeof blobNameOrOptions === "string") {
|
72
|
+
// (connectionString: string, containerName: string, blobName: string, options?: StoragePipelineOptions)
|
73
|
+
const containerName = credentialOrPipelineOrContainerName;
|
74
|
+
const blobName = blobNameOrOptions;
|
75
|
+
const extractedCreds = extractConnectionStringParts(urlOrConnectionString);
|
76
|
+
if (extractedCreds.kind === "AccountConnString") {
|
77
|
+
if (isNode) {
|
78
|
+
const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey);
|
79
|
+
url = appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName));
|
80
|
+
if (!options.proxyOptions) {
|
81
|
+
options.proxyOptions = getDefaultProxySettings(extractedCreds.proxyUri);
|
82
|
+
}
|
83
|
+
pipeline = newPipeline(sharedKeyCredential, options);
|
84
|
+
}
|
85
|
+
else {
|
86
|
+
throw new Error("Account connection string is only supported in Node.js environment");
|
87
|
+
}
|
88
|
+
}
|
89
|
+
else if (extractedCreds.kind === "SASConnString") {
|
90
|
+
url =
|
91
|
+
appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)) +
|
92
|
+
"?" +
|
93
|
+
extractedCreds.accountSas;
|
94
|
+
pipeline = newPipeline(new AnonymousCredential(), options);
|
95
|
+
}
|
96
|
+
else {
|
97
|
+
throw new Error("Connection string must be either an Account connection string or a SAS connection string");
|
98
|
+
}
|
99
|
+
}
|
100
|
+
else {
|
101
|
+
throw new Error("Expecting non-empty strings for containerName and blobName parameters");
|
102
|
+
}
|
103
|
+
super(url, pipeline);
|
104
|
+
({ blobName: this._name, containerName: this._containerName } =
|
105
|
+
this.getBlobAndContainerNamesFromUrl());
|
106
|
+
this.blobContext = this.storageClientContext.blob;
|
107
|
+
this._snapshot = getURLParameter(this.url, URLConstants.Parameters.SNAPSHOT);
|
108
|
+
this._versionId = getURLParameter(this.url, URLConstants.Parameters.VERSIONID);
|
109
|
+
}
|
110
|
+
/**
|
111
|
+
* Creates a new BlobClient object identical to the source but with the specified snapshot timestamp.
|
112
|
+
* Provide "" will remove the snapshot and return a Client to the base blob.
|
113
|
+
*
|
114
|
+
* @param snapshot - The snapshot timestamp.
|
115
|
+
* @returns A new BlobClient object identical to the source but with the specified snapshot timestamp
|
116
|
+
*/
|
117
|
+
withSnapshot(snapshot) {
|
118
|
+
return new BlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline);
|
119
|
+
}
|
120
|
+
/**
|
121
|
+
* Creates a new BlobClient object pointing to a version of this blob.
|
122
|
+
* Provide "" will remove the versionId and return a Client to the base blob.
|
123
|
+
*
|
124
|
+
* @param versionId - The versionId.
|
125
|
+
* @returns A new BlobClient object pointing to the version of this blob.
|
126
|
+
*/
|
127
|
+
withVersion(versionId) {
|
128
|
+
return new BlobClient(setURLParameter(this.url, URLConstants.Parameters.VERSIONID, versionId.length === 0 ? undefined : versionId), this.pipeline);
|
129
|
+
}
|
130
|
+
/**
|
131
|
+
* Creates a AppendBlobClient object.
|
132
|
+
*
|
133
|
+
*/
|
134
|
+
getAppendBlobClient() {
|
135
|
+
return new AppendBlobClient(this.url, this.pipeline);
|
136
|
+
}
|
137
|
+
/**
|
138
|
+
* Creates a BlockBlobClient object.
|
139
|
+
*
|
140
|
+
*/
|
141
|
+
getBlockBlobClient() {
|
142
|
+
return new BlockBlobClient(this.url, this.pipeline);
|
143
|
+
}
|
144
|
+
/**
|
145
|
+
* Creates a PageBlobClient object.
|
146
|
+
*
|
147
|
+
*/
|
148
|
+
getPageBlobClient() {
|
149
|
+
return new PageBlobClient(this.url, this.pipeline);
|
150
|
+
}
|
151
|
+
/**
|
152
|
+
* Reads or downloads a blob from the system, including its metadata and properties.
|
153
|
+
* You can also call Get Blob to read a snapshot.
|
154
|
+
*
|
155
|
+
* * In Node.js, data returns in a Readable stream readableStreamBody
|
156
|
+
* * In browsers, data returns in a promise blobBody
|
157
|
+
*
|
158
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob
|
159
|
+
*
|
160
|
+
* @param offset - From which position of the blob to download, greater than or equal to 0
|
161
|
+
* @param count - How much data to be downloaded, greater than 0. Will download to the end when undefined
|
162
|
+
* @param options - Optional options to Blob Download operation.
|
163
|
+
*
|
164
|
+
*
|
165
|
+
* Example usage (Node.js):
|
166
|
+
*
|
167
|
+
* ```js
|
168
|
+
* // Download and convert a blob to a string
|
169
|
+
* const downloadBlockBlobResponse = await blobClient.download();
|
170
|
+
* const downloaded = await streamToBuffer(downloadBlockBlobResponse.readableStreamBody);
|
171
|
+
* console.log("Downloaded blob content:", downloaded.toString());
|
172
|
+
*
|
173
|
+
* async function streamToBuffer(readableStream) {
|
174
|
+
* return new Promise((resolve, reject) => {
|
175
|
+
* const chunks = [];
|
176
|
+
* readableStream.on("data", (data) => {
|
177
|
+
* chunks.push(data instanceof Buffer ? data : Buffer.from(data));
|
178
|
+
* });
|
179
|
+
* readableStream.on("end", () => {
|
180
|
+
* resolve(Buffer.concat(chunks));
|
181
|
+
* });
|
182
|
+
* readableStream.on("error", reject);
|
183
|
+
* });
|
184
|
+
* }
|
185
|
+
* ```
|
186
|
+
*
|
187
|
+
* Example usage (browser):
|
188
|
+
*
|
189
|
+
* ```js
|
190
|
+
* // Download and convert a blob to a string
|
191
|
+
* const downloadBlockBlobResponse = await blobClient.download();
|
192
|
+
* const downloaded = await blobToString(await downloadBlockBlobResponse.blobBody);
|
193
|
+
* console.log(
|
194
|
+
* "Downloaded blob content",
|
195
|
+
* downloaded
|
196
|
+
* );
|
197
|
+
*
|
198
|
+
* async function blobToString(blob: Blob): Promise<string> {
|
199
|
+
* const fileReader = new FileReader();
|
200
|
+
* return new Promise<string>((resolve, reject) => {
|
201
|
+
* fileReader.onloadend = (ev: any) => {
|
202
|
+
* resolve(ev.target!.result);
|
203
|
+
* };
|
204
|
+
* fileReader.onerror = reject;
|
205
|
+
* fileReader.readAsText(blob);
|
206
|
+
* });
|
207
|
+
* }
|
208
|
+
* ```
|
209
|
+
*/
|
210
|
+
async download(offset = 0, count, options = {}) {
|
211
|
+
options.conditions = options.conditions || {};
|
212
|
+
options.conditions = options.conditions || {};
|
213
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
214
|
+
return tracingClient.withSpan("BlobClient-download", options, async (updatedOptions) => {
|
215
|
+
var _a;
|
216
|
+
const res = assertResponse(await this.blobContext.download({
|
217
|
+
abortSignal: options.abortSignal,
|
218
|
+
leaseAccessConditions: options.conditions,
|
219
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
220
|
+
requestOptions: {
|
221
|
+
onDownloadProgress: isNode ? undefined : options.onProgress, // for Node.js, progress is reported by RetriableReadableStream
|
222
|
+
},
|
223
|
+
range: offset === 0 && !count ? undefined : rangeToString({ offset, count }),
|
224
|
+
rangeGetContentMD5: options.rangeGetContentMD5,
|
225
|
+
rangeGetContentCRC64: options.rangeGetContentCrc64,
|
226
|
+
snapshot: options.snapshot,
|
227
|
+
cpkInfo: options.customerProvidedKey,
|
228
|
+
tracingOptions: updatedOptions.tracingOptions,
|
229
|
+
}));
|
230
|
+
const wrappedRes = Object.assign(Object.assign({}, res), { _response: res._response, objectReplicationDestinationPolicyId: res.objectReplicationPolicyId, objectReplicationSourceProperties: parseObjectReplicationRecord(res.objectReplicationRules) });
|
231
|
+
// Return browser response immediately
|
232
|
+
if (!isNode) {
|
233
|
+
return wrappedRes;
|
234
|
+
}
|
235
|
+
// We support retrying when download stream unexpected ends in Node.js runtime
|
236
|
+
// Following code shouldn't be bundled into browser build, however some
|
237
|
+
// bundlers may try to bundle following code and "FileReadResponse.ts".
|
238
|
+
// In this case, "FileDownloadResponse.browser.ts" will be used as a shim of "FileDownloadResponse.ts"
|
239
|
+
// The config is in package.json "browser" field
|
240
|
+
if (options.maxRetryRequests === undefined || options.maxRetryRequests < 0) {
|
241
|
+
// TODO: Default value or make it a required parameter?
|
242
|
+
options.maxRetryRequests = DEFAULT_MAX_DOWNLOAD_RETRY_REQUESTS;
|
243
|
+
}
|
244
|
+
if (res.contentLength === undefined) {
|
245
|
+
throw new RangeError(`File download response doesn't contain valid content length header`);
|
246
|
+
}
|
247
|
+
if (!res.etag) {
|
248
|
+
throw new RangeError(`File download response doesn't contain valid etag header`);
|
249
|
+
}
|
250
|
+
return new BlobDownloadResponse(wrappedRes, async (start) => {
|
251
|
+
var _a;
|
252
|
+
const updatedDownloadOptions = {
|
253
|
+
leaseAccessConditions: options.conditions,
|
254
|
+
modifiedAccessConditions: {
|
255
|
+
ifMatch: options.conditions.ifMatch || res.etag,
|
256
|
+
ifModifiedSince: options.conditions.ifModifiedSince,
|
257
|
+
ifNoneMatch: options.conditions.ifNoneMatch,
|
258
|
+
ifUnmodifiedSince: options.conditions.ifUnmodifiedSince,
|
259
|
+
ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions,
|
260
|
+
},
|
261
|
+
range: rangeToString({
|
262
|
+
count: offset + res.contentLength - start,
|
263
|
+
offset: start,
|
264
|
+
}),
|
265
|
+
rangeGetContentMD5: options.rangeGetContentMD5,
|
266
|
+
rangeGetContentCRC64: options.rangeGetContentCrc64,
|
267
|
+
snapshot: options.snapshot,
|
268
|
+
cpkInfo: options.customerProvidedKey,
|
269
|
+
};
|
270
|
+
// Debug purpose only
|
271
|
+
// console.log(
|
272
|
+
// `Read from internal stream, range: ${
|
273
|
+
// updatedOptions.range
|
274
|
+
// }, options: ${JSON.stringify(updatedOptions)}`
|
275
|
+
// );
|
276
|
+
return (await this.blobContext.download(Object.assign({ abortSignal: options.abortSignal }, updatedDownloadOptions))).readableStreamBody;
|
277
|
+
}, offset, res.contentLength, {
|
278
|
+
maxRetryRequests: options.maxRetryRequests,
|
279
|
+
onProgress: options.onProgress,
|
280
|
+
});
|
281
|
+
});
|
282
|
+
}
|
283
|
+
/**
|
284
|
+
* Returns true if the Azure blob resource represented by this client exists; false otherwise.
|
285
|
+
*
|
286
|
+
* NOTE: use this function with care since an existing blob might be deleted by other clients or
|
287
|
+
* applications. Vice versa new blobs might be added by other clients or applications after this
|
288
|
+
* function completes.
|
289
|
+
*
|
290
|
+
* @param options - options to Exists operation.
|
291
|
+
*/
|
292
|
+
async exists(options = {}) {
|
293
|
+
return tracingClient.withSpan("BlobClient-exists", options, async (updatedOptions) => {
|
294
|
+
try {
|
295
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
296
|
+
await this.getProperties({
|
297
|
+
abortSignal: options.abortSignal,
|
298
|
+
customerProvidedKey: options.customerProvidedKey,
|
299
|
+
conditions: options.conditions,
|
300
|
+
tracingOptions: updatedOptions.tracingOptions,
|
301
|
+
});
|
302
|
+
return true;
|
303
|
+
}
|
304
|
+
catch (e) {
|
305
|
+
if (e.statusCode === 404) {
|
306
|
+
// Expected exception when checking blob existence
|
307
|
+
return false;
|
308
|
+
}
|
309
|
+
else if (e.statusCode === 409 &&
|
310
|
+
(e.details.errorCode === BlobUsesCustomerSpecifiedEncryptionMsg ||
|
311
|
+
e.details.errorCode === BlobDoesNotUseCustomerSpecifiedEncryption)) {
|
312
|
+
// Expected exception when checking blob existence
|
313
|
+
return true;
|
314
|
+
}
|
315
|
+
throw e;
|
316
|
+
}
|
317
|
+
});
|
318
|
+
}
|
319
|
+
/**
|
320
|
+
* Returns all user-defined metadata, standard HTTP properties, and system properties
|
321
|
+
* for the blob. It does not return the content of the blob.
|
322
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-properties
|
323
|
+
*
|
324
|
+
* WARNING: The `metadata` object returned in the response will have its keys in lowercase, even if
|
325
|
+
* they originally contained uppercase characters. This differs from the metadata keys returned by
|
326
|
+
* the methods of {@link ContainerClient} that list blobs using the `includeMetadata` option, which
|
327
|
+
* will retain their original casing.
|
328
|
+
*
|
329
|
+
* @param options - Optional options to Get Properties operation.
|
330
|
+
*/
|
331
|
+
async getProperties(options = {}) {
|
332
|
+
options.conditions = options.conditions || {};
|
333
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
334
|
+
return tracingClient.withSpan("BlobClient-getProperties", options, async (updatedOptions) => {
|
335
|
+
var _a;
|
336
|
+
const res = assertResponse(await this.blobContext.getProperties({
|
337
|
+
abortSignal: options.abortSignal,
|
338
|
+
leaseAccessConditions: options.conditions,
|
339
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
340
|
+
cpkInfo: options.customerProvidedKey,
|
341
|
+
tracingOptions: updatedOptions.tracingOptions,
|
342
|
+
}));
|
343
|
+
return Object.assign(Object.assign({}, res), { _response: res._response, objectReplicationDestinationPolicyId: res.objectReplicationPolicyId, objectReplicationSourceProperties: parseObjectReplicationRecord(res.objectReplicationRules) });
|
344
|
+
});
|
345
|
+
}
|
346
|
+
/**
|
347
|
+
* Marks the specified blob or snapshot for deletion. The blob is later deleted
|
348
|
+
* during garbage collection. Note that in order to delete a blob, you must delete
|
349
|
+
* all of its snapshots. You can delete both at the same time with the Delete
|
350
|
+
* Blob operation.
|
351
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob
|
352
|
+
*
|
353
|
+
* @param options - Optional options to Blob Delete operation.
|
354
|
+
*/
|
355
|
+
async delete(options = {}) {
|
356
|
+
options.conditions = options.conditions || {};
|
357
|
+
return tracingClient.withSpan("BlobClient-delete", options, async (updatedOptions) => {
|
358
|
+
var _a;
|
359
|
+
return assertResponse(await this.blobContext.delete({
|
360
|
+
abortSignal: options.abortSignal,
|
361
|
+
deleteSnapshots: options.deleteSnapshots,
|
362
|
+
leaseAccessConditions: options.conditions,
|
363
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
364
|
+
tracingOptions: updatedOptions.tracingOptions,
|
365
|
+
}));
|
366
|
+
});
|
367
|
+
}
|
368
|
+
/**
|
369
|
+
* Marks the specified blob or snapshot for deletion if it exists. The blob is later deleted
|
370
|
+
* during garbage collection. Note that in order to delete a blob, you must delete
|
371
|
+
* all of its snapshots. You can delete both at the same time with the Delete
|
372
|
+
* Blob operation.
|
373
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob
|
374
|
+
*
|
375
|
+
* @param options - Optional options to Blob Delete operation.
|
376
|
+
*/
|
377
|
+
async deleteIfExists(options = {}) {
|
378
|
+
return tracingClient.withSpan("BlobClient-deleteIfExists", options, async (updatedOptions) => {
|
379
|
+
var _a, _b;
|
380
|
+
try {
|
381
|
+
const res = assertResponse(await this.delete(updatedOptions));
|
382
|
+
return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response });
|
383
|
+
}
|
384
|
+
catch (e) {
|
385
|
+
if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "BlobNotFound") {
|
386
|
+
return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response });
|
387
|
+
}
|
388
|
+
throw e;
|
389
|
+
}
|
390
|
+
});
|
391
|
+
}
|
392
|
+
/**
|
393
|
+
* Restores the contents and metadata of soft deleted blob and any associated
|
394
|
+
* soft deleted snapshots. Undelete Blob is supported only on version 2017-07-29
|
395
|
+
* or later.
|
396
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/undelete-blob
|
397
|
+
*
|
398
|
+
* @param options - Optional options to Blob Undelete operation.
|
399
|
+
*/
|
400
|
+
async undelete(options = {}) {
|
401
|
+
return tracingClient.withSpan("BlobClient-undelete", options, async (updatedOptions) => {
|
402
|
+
return assertResponse(await this.blobContext.undelete({
|
403
|
+
abortSignal: options.abortSignal,
|
404
|
+
tracingOptions: updatedOptions.tracingOptions,
|
405
|
+
}));
|
406
|
+
});
|
407
|
+
}
|
408
|
+
/**
|
409
|
+
* Sets system properties on the blob.
|
410
|
+
*
|
411
|
+
* If no value provided, or no value provided for the specified blob HTTP headers,
|
412
|
+
* these blob HTTP headers without a value will be cleared.
|
413
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-properties
|
414
|
+
*
|
415
|
+
* @param blobHTTPHeaders - If no value provided, or no value provided for
|
416
|
+
* the specified blob HTTP headers, these blob HTTP
|
417
|
+
* headers without a value will be cleared.
|
418
|
+
* A common header to set is `blobContentType`
|
419
|
+
* enabling the browser to provide functionality
|
420
|
+
* based on file type.
|
421
|
+
* @param options - Optional options to Blob Set HTTP Headers operation.
|
422
|
+
*/
|
423
|
+
async setHTTPHeaders(blobHTTPHeaders, options = {}) {
|
424
|
+
options.conditions = options.conditions || {};
|
425
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
426
|
+
return tracingClient.withSpan("BlobClient-setHTTPHeaders", options, async (updatedOptions) => {
|
427
|
+
var _a;
|
428
|
+
return assertResponse(await this.blobContext.setHttpHeaders({
|
429
|
+
abortSignal: options.abortSignal,
|
430
|
+
blobHttpHeaders: blobHTTPHeaders,
|
431
|
+
leaseAccessConditions: options.conditions,
|
432
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
433
|
+
// cpkInfo: options.customerProvidedKey, // CPK is not included in Swagger, should change this back when this issue is fixed in Swagger.
|
434
|
+
tracingOptions: updatedOptions.tracingOptions,
|
435
|
+
}));
|
436
|
+
});
|
437
|
+
}
|
438
|
+
/**
|
439
|
+
* Sets user-defined metadata for the specified blob as one or more name-value pairs.
|
440
|
+
*
|
441
|
+
* If no option provided, or no metadata defined in the parameter, the blob
|
442
|
+
* metadata will be removed.
|
443
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-metadata
|
444
|
+
*
|
445
|
+
* @param metadata - Replace existing metadata with this value.
|
446
|
+
* If no value provided the existing metadata will be removed.
|
447
|
+
* @param options - Optional options to Set Metadata operation.
|
448
|
+
*/
|
449
|
+
async setMetadata(metadata, options = {}) {
|
450
|
+
options.conditions = options.conditions || {};
|
451
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
452
|
+
return tracingClient.withSpan("BlobClient-setMetadata", options, async (updatedOptions) => {
|
453
|
+
var _a;
|
454
|
+
return assertResponse(await this.blobContext.setMetadata({
|
455
|
+
abortSignal: options.abortSignal,
|
456
|
+
leaseAccessConditions: options.conditions,
|
457
|
+
metadata,
|
458
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
459
|
+
cpkInfo: options.customerProvidedKey,
|
460
|
+
encryptionScope: options.encryptionScope,
|
461
|
+
tracingOptions: updatedOptions.tracingOptions,
|
462
|
+
}));
|
463
|
+
});
|
464
|
+
}
|
465
|
+
/**
|
466
|
+
* Sets tags on the underlying blob.
|
467
|
+
* A blob can have up to 10 tags. Tag keys must be between 1 and 128 characters. Tag values must be between 0 and 256 characters.
|
468
|
+
* Valid tag key and value characters include lower and upper case letters, digits (0-9),
|
469
|
+
* space (' '), plus ('+'), minus ('-'), period ('.'), foward slash ('/'), colon (':'), equals ('='), and underscore ('_').
|
470
|
+
*
|
471
|
+
* @param tags -
|
472
|
+
* @param options -
|
473
|
+
*/
|
474
|
+
async setTags(tags, options = {}) {
|
475
|
+
return tracingClient.withSpan("BlobClient-setTags", options, async (updatedOptions) => {
|
476
|
+
var _a;
|
477
|
+
return assertResponse(await this.blobContext.setTags({
|
478
|
+
abortSignal: options.abortSignal,
|
479
|
+
leaseAccessConditions: options.conditions,
|
480
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
481
|
+
tracingOptions: updatedOptions.tracingOptions,
|
482
|
+
tags: toBlobTags(tags),
|
483
|
+
}));
|
484
|
+
});
|
485
|
+
}
|
486
|
+
/**
|
487
|
+
* Gets the tags associated with the underlying blob.
|
488
|
+
*
|
489
|
+
* @param options -
|
490
|
+
*/
|
491
|
+
async getTags(options = {}) {
|
492
|
+
return tracingClient.withSpan("BlobClient-getTags", options, async (updatedOptions) => {
|
493
|
+
var _a;
|
494
|
+
const response = assertResponse(await this.blobContext.getTags({
|
495
|
+
abortSignal: options.abortSignal,
|
496
|
+
leaseAccessConditions: options.conditions,
|
497
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
498
|
+
tracingOptions: updatedOptions.tracingOptions,
|
499
|
+
}));
|
500
|
+
const wrappedResponse = Object.assign(Object.assign({}, response), { _response: response._response, tags: toTags({ blobTagSet: response.blobTagSet }) || {} });
|
501
|
+
return wrappedResponse;
|
502
|
+
});
|
503
|
+
}
|
504
|
+
/**
|
505
|
+
* Get a {@link BlobLeaseClient} that manages leases on the blob.
|
506
|
+
*
|
507
|
+
* @param proposeLeaseId - Initial proposed lease Id.
|
508
|
+
* @returns A new BlobLeaseClient object for managing leases on the blob.
|
509
|
+
*/
|
510
|
+
getBlobLeaseClient(proposeLeaseId) {
|
511
|
+
return new BlobLeaseClient(this, proposeLeaseId);
|
512
|
+
}
|
513
|
+
/**
|
514
|
+
* Creates a read-only snapshot of a blob.
|
515
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/snapshot-blob
|
516
|
+
*
|
517
|
+
* @param options - Optional options to the Blob Create Snapshot operation.
|
518
|
+
*/
|
519
|
+
async createSnapshot(options = {}) {
|
520
|
+
options.conditions = options.conditions || {};
|
521
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
522
|
+
return tracingClient.withSpan("BlobClient-createSnapshot", options, async (updatedOptions) => {
|
523
|
+
var _a;
|
524
|
+
return assertResponse(await this.blobContext.createSnapshot({
|
525
|
+
abortSignal: options.abortSignal,
|
526
|
+
leaseAccessConditions: options.conditions,
|
527
|
+
metadata: options.metadata,
|
528
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
529
|
+
cpkInfo: options.customerProvidedKey,
|
530
|
+
encryptionScope: options.encryptionScope,
|
531
|
+
tracingOptions: updatedOptions.tracingOptions,
|
532
|
+
}));
|
533
|
+
});
|
534
|
+
}
|
535
|
+
/**
|
536
|
+
* Asynchronously copies a blob to a destination within the storage account.
|
537
|
+
* This method returns a long running operation poller that allows you to wait
|
538
|
+
* indefinitely until the copy is completed.
|
539
|
+
* You can also cancel a copy before it is completed by calling `cancelOperation` on the poller.
|
540
|
+
* Note that the onProgress callback will not be invoked if the operation completes in the first
|
541
|
+
* request, and attempting to cancel a completed copy will result in an error being thrown.
|
542
|
+
*
|
543
|
+
* In version 2012-02-12 and later, the source for a Copy Blob operation can be
|
544
|
+
* a committed blob in any Azure storage account.
|
545
|
+
* Beginning with version 2015-02-21, the source for a Copy Blob operation can be
|
546
|
+
* an Azure file in any Azure storage account.
|
547
|
+
* Only storage accounts created on or after June 7th, 2012 allow the Copy Blob
|
548
|
+
* operation to copy from another storage account.
|
549
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob
|
550
|
+
*
|
551
|
+
* Example using automatic polling:
|
552
|
+
*
|
553
|
+
* ```js
|
554
|
+
* const copyPoller = await blobClient.beginCopyFromURL('url');
|
555
|
+
* const result = await copyPoller.pollUntilDone();
|
556
|
+
* ```
|
557
|
+
*
|
558
|
+
* Example using manual polling:
|
559
|
+
*
|
560
|
+
* ```js
|
561
|
+
* const copyPoller = await blobClient.beginCopyFromURL('url');
|
562
|
+
* while (!poller.isDone()) {
|
563
|
+
* await poller.poll();
|
564
|
+
* }
|
565
|
+
* const result = copyPoller.getResult();
|
566
|
+
* ```
|
567
|
+
*
|
568
|
+
* Example using progress updates:
|
569
|
+
*
|
570
|
+
* ```js
|
571
|
+
* const copyPoller = await blobClient.beginCopyFromURL('url', {
|
572
|
+
* onProgress(state) {
|
573
|
+
* console.log(`Progress: ${state.copyProgress}`);
|
574
|
+
* }
|
575
|
+
* });
|
576
|
+
* const result = await copyPoller.pollUntilDone();
|
577
|
+
* ```
|
578
|
+
*
|
579
|
+
* Example using a changing polling interval (default 15 seconds):
|
580
|
+
*
|
581
|
+
* ```js
|
582
|
+
* const copyPoller = await blobClient.beginCopyFromURL('url', {
|
583
|
+
* intervalInMs: 1000 // poll blob every 1 second for copy progress
|
584
|
+
* });
|
585
|
+
* const result = await copyPoller.pollUntilDone();
|
586
|
+
* ```
|
587
|
+
*
|
588
|
+
* Example using copy cancellation:
|
589
|
+
*
|
590
|
+
* ```js
|
591
|
+
* const copyPoller = await blobClient.beginCopyFromURL('url');
|
592
|
+
* // cancel operation after starting it.
|
593
|
+
* try {
|
594
|
+
* await copyPoller.cancelOperation();
|
595
|
+
* // calls to get the result now throw PollerCancelledError
|
596
|
+
* await copyPoller.getResult();
|
597
|
+
* } catch (err) {
|
598
|
+
* if (err.name === 'PollerCancelledError') {
|
599
|
+
* console.log('The copy was cancelled.');
|
600
|
+
* }
|
601
|
+
* }
|
602
|
+
* ```
|
603
|
+
*
|
604
|
+
* @param copySource - url to the source Azure Blob/File.
|
605
|
+
* @param options - Optional options to the Blob Start Copy From URL operation.
|
606
|
+
*/
|
607
|
+
async beginCopyFromURL(copySource, options = {}) {
|
608
|
+
const client = {
|
609
|
+
abortCopyFromURL: (...args) => this.abortCopyFromURL(...args),
|
610
|
+
getProperties: (...args) => this.getProperties(...args),
|
611
|
+
startCopyFromURL: (...args) => this.startCopyFromURL(...args),
|
612
|
+
};
|
613
|
+
const poller = new BlobBeginCopyFromUrlPoller({
|
614
|
+
blobClient: client,
|
615
|
+
copySource,
|
616
|
+
intervalInMs: options.intervalInMs,
|
617
|
+
onProgress: options.onProgress,
|
618
|
+
resumeFrom: options.resumeFrom,
|
619
|
+
startCopyFromURLOptions: options,
|
620
|
+
});
|
621
|
+
// Trigger the startCopyFromURL call by calling poll.
|
622
|
+
// Any errors from this method should be surfaced to the user.
|
623
|
+
await poller.poll();
|
624
|
+
return poller;
|
625
|
+
}
|
626
|
+
/**
|
627
|
+
* Aborts a pending asynchronous Copy Blob operation, and leaves a destination blob with zero
|
628
|
+
* length and full metadata. Version 2012-02-12 and newer.
|
629
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/abort-copy-blob
|
630
|
+
*
|
631
|
+
* @param copyId - Id of the Copy From URL operation.
|
632
|
+
* @param options - Optional options to the Blob Abort Copy From URL operation.
|
633
|
+
*/
|
634
|
+
async abortCopyFromURL(copyId, options = {}) {
|
635
|
+
return tracingClient.withSpan("BlobClient-abortCopyFromURL", options, async (updatedOptions) => {
|
636
|
+
return assertResponse(await this.blobContext.abortCopyFromURL(copyId, {
|
637
|
+
abortSignal: options.abortSignal,
|
638
|
+
leaseAccessConditions: options.conditions,
|
639
|
+
tracingOptions: updatedOptions.tracingOptions,
|
640
|
+
}));
|
641
|
+
});
|
642
|
+
}
|
643
|
+
/**
|
644
|
+
* The synchronous Copy From URL operation copies a blob or an internet resource to a new blob. It will not
|
645
|
+
* return a response until the copy is complete.
|
646
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url
|
647
|
+
*
|
648
|
+
* @param copySource - The source URL to copy from, Shared Access Signature(SAS) maybe needed for authentication
|
649
|
+
* @param options -
|
650
|
+
*/
|
651
|
+
async syncCopyFromURL(copySource, options = {}) {
|
652
|
+
options.conditions = options.conditions || {};
|
653
|
+
options.sourceConditions = options.sourceConditions || {};
|
654
|
+
return tracingClient.withSpan("BlobClient-syncCopyFromURL", options, async (updatedOptions) => {
|
655
|
+
var _a, _b, _c, _d, _e, _f, _g;
|
656
|
+
return assertResponse(await this.blobContext.copyFromURL(copySource, {
|
657
|
+
abortSignal: options.abortSignal,
|
658
|
+
metadata: options.metadata,
|
659
|
+
leaseAccessConditions: options.conditions,
|
660
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
661
|
+
sourceModifiedAccessConditions: {
|
662
|
+
sourceIfMatch: (_b = options.sourceConditions) === null || _b === void 0 ? void 0 : _b.ifMatch,
|
663
|
+
sourceIfModifiedSince: (_c = options.sourceConditions) === null || _c === void 0 ? void 0 : _c.ifModifiedSince,
|
664
|
+
sourceIfNoneMatch: (_d = options.sourceConditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch,
|
665
|
+
sourceIfUnmodifiedSince: (_e = options.sourceConditions) === null || _e === void 0 ? void 0 : _e.ifUnmodifiedSince,
|
666
|
+
},
|
667
|
+
sourceContentMD5: options.sourceContentMD5,
|
668
|
+
copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization),
|
669
|
+
tier: toAccessTier(options.tier),
|
670
|
+
blobTagsString: toBlobTagsString(options.tags),
|
671
|
+
immutabilityPolicyExpiry: (_f = options.immutabilityPolicy) === null || _f === void 0 ? void 0 : _f.expiriesOn,
|
672
|
+
immutabilityPolicyMode: (_g = options.immutabilityPolicy) === null || _g === void 0 ? void 0 : _g.policyMode,
|
673
|
+
legalHold: options.legalHold,
|
674
|
+
encryptionScope: options.encryptionScope,
|
675
|
+
copySourceTags: options.copySourceTags,
|
676
|
+
tracingOptions: updatedOptions.tracingOptions,
|
677
|
+
}));
|
678
|
+
});
|
679
|
+
}
|
680
|
+
/**
|
681
|
+
* Sets the tier on a blob. The operation is allowed on a page blob in a premium
|
682
|
+
* storage account and on a block blob in a blob storage account (locally redundant
|
683
|
+
* storage only). A premium page blob's tier determines the allowed size, IOPS,
|
684
|
+
* and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive
|
685
|
+
* storage type. This operation does not update the blob's ETag.
|
686
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tier
|
687
|
+
*
|
688
|
+
* @param tier - The tier to be set on the blob. Valid values are Hot, Cool, or Archive.
|
689
|
+
* @param options - Optional options to the Blob Set Tier operation.
|
690
|
+
*/
|
691
|
+
async setAccessTier(tier, options = {}) {
|
692
|
+
return tracingClient.withSpan("BlobClient-setAccessTier", options, async (updatedOptions) => {
|
693
|
+
var _a;
|
694
|
+
return assertResponse(await this.blobContext.setTier(toAccessTier(tier), {
|
695
|
+
abortSignal: options.abortSignal,
|
696
|
+
leaseAccessConditions: options.conditions,
|
697
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
698
|
+
rehydratePriority: options.rehydratePriority,
|
699
|
+
tracingOptions: updatedOptions.tracingOptions,
|
700
|
+
}));
|
701
|
+
});
|
702
|
+
}
|
703
|
+
async downloadToBuffer(param1, param2, param3, param4 = {}) {
|
704
|
+
var _a;
|
705
|
+
let buffer;
|
706
|
+
let offset = 0;
|
707
|
+
let count = 0;
|
708
|
+
let options = param4;
|
709
|
+
if (param1 instanceof Buffer) {
|
710
|
+
buffer = param1;
|
711
|
+
offset = param2 || 0;
|
712
|
+
count = typeof param3 === "number" ? param3 : 0;
|
713
|
+
}
|
714
|
+
else {
|
715
|
+
offset = typeof param1 === "number" ? param1 : 0;
|
716
|
+
count = typeof param2 === "number" ? param2 : 0;
|
717
|
+
options = param3 || {};
|
718
|
+
}
|
719
|
+
let blockSize = (_a = options.blockSize) !== null && _a !== void 0 ? _a : 0;
|
720
|
+
if (blockSize < 0) {
|
721
|
+
throw new RangeError("blockSize option must be >= 0");
|
722
|
+
}
|
723
|
+
if (blockSize === 0) {
|
724
|
+
blockSize = DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES;
|
725
|
+
}
|
726
|
+
if (offset < 0) {
|
727
|
+
throw new RangeError("offset option must be >= 0");
|
728
|
+
}
|
729
|
+
if (count && count <= 0) {
|
730
|
+
throw new RangeError("count option must be greater than 0");
|
731
|
+
}
|
732
|
+
if (!options.conditions) {
|
733
|
+
options.conditions = {};
|
734
|
+
}
|
735
|
+
return tracingClient.withSpan("BlobClient-downloadToBuffer", options, async (updatedOptions) => {
|
736
|
+
// Customer doesn't specify length, get it
|
737
|
+
if (!count) {
|
738
|
+
const response = await this.getProperties(Object.assign(Object.assign({}, options), { tracingOptions: updatedOptions.tracingOptions }));
|
739
|
+
count = response.contentLength - offset;
|
740
|
+
if (count < 0) {
|
741
|
+
throw new RangeError(`offset ${offset} shouldn't be larger than blob size ${response.contentLength}`);
|
742
|
+
}
|
743
|
+
}
|
744
|
+
// Allocate the buffer of size = count if the buffer is not provided
|
745
|
+
if (!buffer) {
|
746
|
+
try {
|
747
|
+
buffer = Buffer.alloc(count);
|
748
|
+
}
|
749
|
+
catch (error) {
|
750
|
+
throw new Error(`Unable to allocate the buffer of size: ${count}(in bytes). Please try passing your own buffer to the "downloadToBuffer" method or try using other methods like "download" or "downloadToFile".\t ${error.message}`);
|
751
|
+
}
|
752
|
+
}
|
753
|
+
if (buffer.length < count) {
|
754
|
+
throw new RangeError(`The buffer's size should be equal to or larger than the request count of bytes: ${count}`);
|
755
|
+
}
|
756
|
+
let transferProgress = 0;
|
757
|
+
const batch = new Batch(options.concurrency);
|
758
|
+
for (let off = offset; off < offset + count; off = off + blockSize) {
|
759
|
+
batch.addOperation(async () => {
|
760
|
+
// Exclusive chunk end position
|
761
|
+
let chunkEnd = offset + count;
|
762
|
+
if (off + blockSize < chunkEnd) {
|
763
|
+
chunkEnd = off + blockSize;
|
764
|
+
}
|
765
|
+
const response = await this.download(off, chunkEnd - off, {
|
766
|
+
abortSignal: options.abortSignal,
|
767
|
+
conditions: options.conditions,
|
768
|
+
maxRetryRequests: options.maxRetryRequestsPerBlock,
|
769
|
+
customerProvidedKey: options.customerProvidedKey,
|
770
|
+
tracingOptions: updatedOptions.tracingOptions,
|
771
|
+
});
|
772
|
+
const stream = response.readableStreamBody;
|
773
|
+
await streamToBuffer(stream, buffer, off - offset, chunkEnd - offset);
|
774
|
+
// Update progress after block is downloaded, in case of block trying
|
775
|
+
// Could provide finer grained progress updating inside HTTP requests,
|
776
|
+
// only if convenience layer download try is enabled
|
777
|
+
transferProgress += chunkEnd - off;
|
778
|
+
if (options.onProgress) {
|
779
|
+
options.onProgress({ loadedBytes: transferProgress });
|
780
|
+
}
|
781
|
+
});
|
782
|
+
}
|
783
|
+
await batch.do();
|
784
|
+
return buffer;
|
785
|
+
});
|
786
|
+
}
|
787
|
+
/**
|
788
|
+
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
789
|
+
*
|
790
|
+
* Downloads an Azure Blob to a local file.
|
791
|
+
* Fails if the the given file path already exits.
|
792
|
+
* Offset and count are optional, pass 0 and undefined respectively to download the entire blob.
|
793
|
+
*
|
794
|
+
* @param filePath -
|
795
|
+
* @param offset - From which position of the block blob to download.
|
796
|
+
* @param count - How much data to be downloaded. Will download to the end when passing undefined.
|
797
|
+
* @param options - Options to Blob download options.
|
798
|
+
* @returns The response data for blob download operation,
|
799
|
+
* but with readableStreamBody set to undefined since its
|
800
|
+
* content is already read and written into a local file
|
801
|
+
* at the specified path.
|
802
|
+
*/
|
803
|
+
async downloadToFile(filePath, offset = 0, count, options = {}) {
|
804
|
+
return tracingClient.withSpan("BlobClient-downloadToFile", options, async (updatedOptions) => {
|
805
|
+
const response = await this.download(offset, count, Object.assign(Object.assign({}, options), { tracingOptions: updatedOptions.tracingOptions }));
|
806
|
+
if (response.readableStreamBody) {
|
807
|
+
await readStreamToLocalFile(response.readableStreamBody, filePath);
|
808
|
+
}
|
809
|
+
// The stream is no longer accessible so setting it to undefined.
|
810
|
+
response.blobDownloadStream = undefined;
|
811
|
+
return response;
|
812
|
+
});
|
813
|
+
}
|
814
|
+
getBlobAndContainerNamesFromUrl() {
|
815
|
+
let containerName;
|
816
|
+
let blobName;
|
817
|
+
try {
|
818
|
+
// URL may look like the following
|
819
|
+
// "https://myaccount.blob.core.windows.net/mycontainer/blob?sasString";
|
820
|
+
// "https://myaccount.blob.core.windows.net/mycontainer/blob";
|
821
|
+
// "https://myaccount.blob.core.windows.net/mycontainer/blob/a.txt?sasString";
|
822
|
+
// "https://myaccount.blob.core.windows.net/mycontainer/blob/a.txt";
|
823
|
+
// IPv4/IPv6 address hosts, Endpoints - `http://127.0.0.1:10000/devstoreaccount1/containername/blob`
|
824
|
+
// http://localhost:10001/devstoreaccount1/containername/blob
|
825
|
+
const parsedUrl = new URL(this.url);
|
826
|
+
if (parsedUrl.host.split(".")[1] === "blob") {
|
827
|
+
// "https://myaccount.blob.core.windows.net/containername/blob".
|
828
|
+
// .getPath() -> /containername/blob
|
829
|
+
const pathComponents = parsedUrl.pathname.match("/([^/]*)(/(.*))?");
|
830
|
+
containerName = pathComponents[1];
|
831
|
+
blobName = pathComponents[3];
|
832
|
+
}
|
833
|
+
else if (isIpEndpointStyle(parsedUrl)) {
|
834
|
+
// IPv4/IPv6 address hosts... Example - http://192.0.0.10:10001/devstoreaccount1/containername/blob
|
835
|
+
// Single word domain without a [dot] in the endpoint... Example - http://localhost:10001/devstoreaccount1/containername/blob
|
836
|
+
// .getPath() -> /devstoreaccount1/containername/blob
|
837
|
+
const pathComponents = parsedUrl.pathname.match("/([^/]*)/([^/]*)(/(.*))?");
|
838
|
+
containerName = pathComponents[2];
|
839
|
+
blobName = pathComponents[4];
|
840
|
+
}
|
841
|
+
else {
|
842
|
+
// "https://customdomain.com/containername/blob".
|
843
|
+
// .getPath() -> /containername/blob
|
844
|
+
const pathComponents = parsedUrl.pathname.match("/([^/]*)(/(.*))?");
|
845
|
+
containerName = pathComponents[1];
|
846
|
+
blobName = pathComponents[3];
|
847
|
+
}
|
848
|
+
// decode the encoded blobName, containerName - to get all the special characters that might be present in them
|
849
|
+
containerName = decodeURIComponent(containerName);
|
850
|
+
blobName = decodeURIComponent(blobName);
|
851
|
+
// Azure Storage Server will replace "\" with "/" in the blob names
|
852
|
+
// doing the same in the SDK side so that the user doesn't have to replace "\" instances in the blobName
|
853
|
+
blobName = blobName.replace(/\\/g, "/");
|
854
|
+
if (!containerName) {
|
855
|
+
throw new Error("Provided containerName is invalid.");
|
856
|
+
}
|
857
|
+
return { blobName, containerName };
|
858
|
+
}
|
859
|
+
catch (error) {
|
860
|
+
throw new Error("Unable to extract blobName and containerName with provided information.");
|
861
|
+
}
|
862
|
+
}
|
863
|
+
/**
|
864
|
+
* Asynchronously copies a blob to a destination within the storage account.
|
865
|
+
* In version 2012-02-12 and later, the source for a Copy Blob operation can be
|
866
|
+
* a committed blob in any Azure storage account.
|
867
|
+
* Beginning with version 2015-02-21, the source for a Copy Blob operation can be
|
868
|
+
* an Azure file in any Azure storage account.
|
869
|
+
* Only storage accounts created on or after June 7th, 2012 allow the Copy Blob
|
870
|
+
* operation to copy from another storage account.
|
871
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob
|
872
|
+
*
|
873
|
+
* @param copySource - url to the source Azure Blob/File.
|
874
|
+
* @param options - Optional options to the Blob Start Copy From URL operation.
|
875
|
+
*/
|
876
|
+
async startCopyFromURL(copySource, options = {}) {
|
877
|
+
return tracingClient.withSpan("BlobClient-startCopyFromURL", options, async (updatedOptions) => {
|
878
|
+
var _a, _b, _c;
|
879
|
+
options.conditions = options.conditions || {};
|
880
|
+
options.sourceConditions = options.sourceConditions || {};
|
881
|
+
return assertResponse(await this.blobContext.startCopyFromURL(copySource, {
|
882
|
+
abortSignal: options.abortSignal,
|
883
|
+
leaseAccessConditions: options.conditions,
|
884
|
+
metadata: options.metadata,
|
885
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
886
|
+
sourceModifiedAccessConditions: {
|
887
|
+
sourceIfMatch: options.sourceConditions.ifMatch,
|
888
|
+
sourceIfModifiedSince: options.sourceConditions.ifModifiedSince,
|
889
|
+
sourceIfNoneMatch: options.sourceConditions.ifNoneMatch,
|
890
|
+
sourceIfUnmodifiedSince: options.sourceConditions.ifUnmodifiedSince,
|
891
|
+
sourceIfTags: options.sourceConditions.tagConditions,
|
892
|
+
},
|
893
|
+
immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn,
|
894
|
+
immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode,
|
895
|
+
legalHold: options.legalHold,
|
896
|
+
rehydratePriority: options.rehydratePriority,
|
897
|
+
tier: toAccessTier(options.tier),
|
898
|
+
blobTagsString: toBlobTagsString(options.tags),
|
899
|
+
sealBlob: options.sealBlob,
|
900
|
+
tracingOptions: updatedOptions.tracingOptions,
|
901
|
+
}));
|
902
|
+
});
|
903
|
+
}
|
904
|
+
/**
|
905
|
+
* Only available for BlobClient constructed with a shared key credential.
|
906
|
+
*
|
907
|
+
* Generates a Blob Service Shared Access Signature (SAS) URI based on the client properties
|
908
|
+
* and parameters passed in. The SAS is signed by the shared key credential of the client.
|
909
|
+
*
|
910
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
911
|
+
*
|
912
|
+
* @param options - Optional parameters.
|
913
|
+
* @returns The SAS URI consisting of the URI to the resource represented by this client, followed by the generated SAS token.
|
914
|
+
*/
|
915
|
+
generateSasUrl(options) {
|
916
|
+
return new Promise((resolve) => {
|
917
|
+
if (!(this.credential instanceof StorageSharedKeyCredential)) {
|
918
|
+
throw new RangeError("Can only generate the SAS when the client is initialized with a shared key credential");
|
919
|
+
}
|
920
|
+
const sas = generateBlobSASQueryParameters(Object.assign({ containerName: this._containerName, blobName: this._name, snapshotTime: this._snapshot, versionId: this._versionId }, options), this.credential).toString();
|
921
|
+
resolve(appendToURLQuery(this.url, sas));
|
922
|
+
});
|
923
|
+
}
|
924
|
+
/**
|
925
|
+
* Delete the immutablility policy on the blob.
|
926
|
+
*
|
927
|
+
* @param options - Optional options to delete immutability policy on the blob.
|
928
|
+
*/
|
929
|
+
async deleteImmutabilityPolicy(options = {}) {
|
930
|
+
return tracingClient.withSpan("BlobClient-deleteImmutabilityPolicy", options, async (updatedOptions) => {
|
931
|
+
return assertResponse(await this.blobContext.deleteImmutabilityPolicy({
|
932
|
+
tracingOptions: updatedOptions.tracingOptions,
|
933
|
+
}));
|
934
|
+
});
|
935
|
+
}
|
936
|
+
/**
|
937
|
+
* Set immutability policy on the blob.
|
938
|
+
*
|
939
|
+
* @param options - Optional options to set immutability policy on the blob.
|
940
|
+
*/
|
941
|
+
async setImmutabilityPolicy(immutabilityPolicy, options = {}) {
|
942
|
+
return tracingClient.withSpan("BlobClient-setImmutabilityPolicy", options, async (updatedOptions) => {
|
943
|
+
return assertResponse(await this.blobContext.setImmutabilityPolicy({
|
944
|
+
immutabilityPolicyExpiry: immutabilityPolicy.expiriesOn,
|
945
|
+
immutabilityPolicyMode: immutabilityPolicy.policyMode,
|
946
|
+
tracingOptions: updatedOptions.tracingOptions,
|
947
|
+
}));
|
948
|
+
});
|
949
|
+
}
|
950
|
+
/**
|
951
|
+
* Set legal hold on the blob.
|
952
|
+
*
|
953
|
+
* @param options - Optional options to set legal hold on the blob.
|
954
|
+
*/
|
955
|
+
async setLegalHold(legalHoldEnabled, options = {}) {
|
956
|
+
return tracingClient.withSpan("BlobClient-setLegalHold", options, async (updatedOptions) => {
|
957
|
+
return assertResponse(await this.blobContext.setLegalHold(legalHoldEnabled, {
|
958
|
+
tracingOptions: updatedOptions.tracingOptions,
|
959
|
+
}));
|
960
|
+
});
|
961
|
+
}
|
962
|
+
}
|
963
|
+
/**
|
964
|
+
* AppendBlobClient defines a set of operations applicable to append blobs.
|
965
|
+
*/
|
966
|
+
export class AppendBlobClient extends BlobClient {
|
967
|
+
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
968
|
+
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
969
|
+
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
970
|
+
options) {
|
971
|
+
// In TypeScript we cannot simply pass all parameters to super() like below so have to duplicate the code instead.
|
972
|
+
// super(s, credentialOrPipelineOrContainerNameOrOptions, blobNameOrOptions, options);
|
973
|
+
let pipeline;
|
974
|
+
let url;
|
975
|
+
options = options || {};
|
976
|
+
if (isPipelineLike(credentialOrPipelineOrContainerName)) {
|
977
|
+
// (url: string, pipeline: Pipeline)
|
978
|
+
url = urlOrConnectionString;
|
979
|
+
pipeline = credentialOrPipelineOrContainerName;
|
980
|
+
}
|
981
|
+
else if ((isNode && credentialOrPipelineOrContainerName instanceof StorageSharedKeyCredential) ||
|
982
|
+
credentialOrPipelineOrContainerName instanceof AnonymousCredential ||
|
983
|
+
isTokenCredential(credentialOrPipelineOrContainerName)) {
|
984
|
+
// (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) url = urlOrConnectionString;
|
985
|
+
url = urlOrConnectionString;
|
986
|
+
options = blobNameOrOptions;
|
987
|
+
pipeline = newPipeline(credentialOrPipelineOrContainerName, options);
|
988
|
+
}
|
989
|
+
else if (!credentialOrPipelineOrContainerName &&
|
990
|
+
typeof credentialOrPipelineOrContainerName !== "string") {
|
991
|
+
// (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions)
|
992
|
+
url = urlOrConnectionString;
|
993
|
+
// The second parameter is undefined. Use anonymous credential.
|
994
|
+
pipeline = newPipeline(new AnonymousCredential(), options);
|
995
|
+
}
|
996
|
+
else if (credentialOrPipelineOrContainerName &&
|
997
|
+
typeof credentialOrPipelineOrContainerName === "string" &&
|
998
|
+
blobNameOrOptions &&
|
999
|
+
typeof blobNameOrOptions === "string") {
|
1000
|
+
// (connectionString: string, containerName: string, blobName: string, options?: StoragePipelineOptions)
|
1001
|
+
const containerName = credentialOrPipelineOrContainerName;
|
1002
|
+
const blobName = blobNameOrOptions;
|
1003
|
+
const extractedCreds = extractConnectionStringParts(urlOrConnectionString);
|
1004
|
+
if (extractedCreds.kind === "AccountConnString") {
|
1005
|
+
if (isNode) {
|
1006
|
+
const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey);
|
1007
|
+
url = appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName));
|
1008
|
+
if (!options.proxyOptions) {
|
1009
|
+
options.proxyOptions = getDefaultProxySettings(extractedCreds.proxyUri);
|
1010
|
+
}
|
1011
|
+
pipeline = newPipeline(sharedKeyCredential, options);
|
1012
|
+
}
|
1013
|
+
else {
|
1014
|
+
throw new Error("Account connection string is only supported in Node.js environment");
|
1015
|
+
}
|
1016
|
+
}
|
1017
|
+
else if (extractedCreds.kind === "SASConnString") {
|
1018
|
+
url =
|
1019
|
+
appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)) +
|
1020
|
+
"?" +
|
1021
|
+
extractedCreds.accountSas;
|
1022
|
+
pipeline = newPipeline(new AnonymousCredential(), options);
|
1023
|
+
}
|
1024
|
+
else {
|
1025
|
+
throw new Error("Connection string must be either an Account connection string or a SAS connection string");
|
1026
|
+
}
|
1027
|
+
}
|
1028
|
+
else {
|
1029
|
+
throw new Error("Expecting non-empty strings for containerName and blobName parameters");
|
1030
|
+
}
|
1031
|
+
super(url, pipeline);
|
1032
|
+
this.appendBlobContext = this.storageClientContext.appendBlob;
|
1033
|
+
}
|
1034
|
+
/**
|
1035
|
+
* Creates a new AppendBlobClient object identical to the source but with the
|
1036
|
+
* specified snapshot timestamp.
|
1037
|
+
* Provide "" will remove the snapshot and return a Client to the base blob.
|
1038
|
+
*
|
1039
|
+
* @param snapshot - The snapshot timestamp.
|
1040
|
+
* @returns A new AppendBlobClient object identical to the source but with the specified snapshot timestamp.
|
1041
|
+
*/
|
1042
|
+
withSnapshot(snapshot) {
|
1043
|
+
return new AppendBlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline);
|
1044
|
+
}
|
1045
|
+
/**
|
1046
|
+
* Creates a 0-length append blob. Call AppendBlock to append data to an append blob.
|
1047
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/put-blob
|
1048
|
+
*
|
1049
|
+
* @param options - Options to the Append Block Create operation.
|
1050
|
+
*
|
1051
|
+
*
|
1052
|
+
* Example usage:
|
1053
|
+
*
|
1054
|
+
* ```js
|
1055
|
+
* const appendBlobClient = containerClient.getAppendBlobClient("<blob name>");
|
1056
|
+
* await appendBlobClient.create();
|
1057
|
+
* ```
|
1058
|
+
*/
|
1059
|
+
async create(options = {}) {
|
1060
|
+
options.conditions = options.conditions || {};
|
1061
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1062
|
+
return tracingClient.withSpan("AppendBlobClient-create", options, async (updatedOptions) => {
|
1063
|
+
var _a, _b, _c;
|
1064
|
+
return assertResponse(await this.appendBlobContext.create(0, {
|
1065
|
+
abortSignal: options.abortSignal,
|
1066
|
+
blobHttpHeaders: options.blobHTTPHeaders,
|
1067
|
+
leaseAccessConditions: options.conditions,
|
1068
|
+
metadata: options.metadata,
|
1069
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
1070
|
+
cpkInfo: options.customerProvidedKey,
|
1071
|
+
encryptionScope: options.encryptionScope,
|
1072
|
+
immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn,
|
1073
|
+
immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode,
|
1074
|
+
legalHold: options.legalHold,
|
1075
|
+
blobTagsString: toBlobTagsString(options.tags),
|
1076
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1077
|
+
}));
|
1078
|
+
});
|
1079
|
+
}
|
1080
|
+
/**
|
1081
|
+
* Creates a 0-length append blob. Call AppendBlock to append data to an append blob.
|
1082
|
+
* If the blob with the same name already exists, the content of the existing blob will remain unchanged.
|
1083
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/put-blob
|
1084
|
+
*
|
1085
|
+
* @param options -
|
1086
|
+
*/
|
1087
|
+
async createIfNotExists(options = {}) {
|
1088
|
+
const conditions = { ifNoneMatch: ETagAny };
|
1089
|
+
return tracingClient.withSpan("AppendBlobClient-createIfNotExists", options, async (updatedOptions) => {
|
1090
|
+
var _a, _b;
|
1091
|
+
try {
|
1092
|
+
const res = assertResponse(await this.create(Object.assign(Object.assign({}, updatedOptions), { conditions })));
|
1093
|
+
return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response });
|
1094
|
+
}
|
1095
|
+
catch (e) {
|
1096
|
+
if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "BlobAlreadyExists") {
|
1097
|
+
return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response });
|
1098
|
+
}
|
1099
|
+
throw e;
|
1100
|
+
}
|
1101
|
+
});
|
1102
|
+
}
|
1103
|
+
/**
|
1104
|
+
* Seals the append blob, making it read only.
|
1105
|
+
*
|
1106
|
+
* @param options -
|
1107
|
+
*/
|
1108
|
+
async seal(options = {}) {
|
1109
|
+
options.conditions = options.conditions || {};
|
1110
|
+
return tracingClient.withSpan("AppendBlobClient-seal", options, async (updatedOptions) => {
|
1111
|
+
var _a;
|
1112
|
+
return assertResponse(await this.appendBlobContext.seal({
|
1113
|
+
abortSignal: options.abortSignal,
|
1114
|
+
appendPositionAccessConditions: options.conditions,
|
1115
|
+
leaseAccessConditions: options.conditions,
|
1116
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
1117
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1118
|
+
}));
|
1119
|
+
});
|
1120
|
+
}
|
1121
|
+
/**
|
1122
|
+
* Commits a new block of data to the end of the existing append blob.
|
1123
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/append-block
|
1124
|
+
*
|
1125
|
+
* @param body - Data to be appended.
|
1126
|
+
* @param contentLength - Length of the body in bytes.
|
1127
|
+
* @param options - Options to the Append Block operation.
|
1128
|
+
*
|
1129
|
+
*
|
1130
|
+
* Example usage:
|
1131
|
+
*
|
1132
|
+
* ```js
|
1133
|
+
* const content = "Hello World!";
|
1134
|
+
*
|
1135
|
+
* // Create a new append blob and append data to the blob.
|
1136
|
+
* const newAppendBlobClient = containerClient.getAppendBlobClient("<blob name>");
|
1137
|
+
* await newAppendBlobClient.create();
|
1138
|
+
* await newAppendBlobClient.appendBlock(content, content.length);
|
1139
|
+
*
|
1140
|
+
* // Append data to an existing append blob.
|
1141
|
+
* const existingAppendBlobClient = containerClient.getAppendBlobClient("<blob name>");
|
1142
|
+
* await existingAppendBlobClient.appendBlock(content, content.length);
|
1143
|
+
* ```
|
1144
|
+
*/
|
1145
|
+
async appendBlock(body, contentLength, options = {}) {
|
1146
|
+
options.conditions = options.conditions || {};
|
1147
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1148
|
+
return tracingClient.withSpan("AppendBlobClient-appendBlock", options, async (updatedOptions) => {
|
1149
|
+
var _a;
|
1150
|
+
return assertResponse(await this.appendBlobContext.appendBlock(contentLength, body, {
|
1151
|
+
abortSignal: options.abortSignal,
|
1152
|
+
appendPositionAccessConditions: options.conditions,
|
1153
|
+
leaseAccessConditions: options.conditions,
|
1154
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
1155
|
+
requestOptions: {
|
1156
|
+
onUploadProgress: options.onProgress,
|
1157
|
+
},
|
1158
|
+
transactionalContentMD5: options.transactionalContentMD5,
|
1159
|
+
transactionalContentCrc64: options.transactionalContentCrc64,
|
1160
|
+
cpkInfo: options.customerProvidedKey,
|
1161
|
+
encryptionScope: options.encryptionScope,
|
1162
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1163
|
+
}));
|
1164
|
+
});
|
1165
|
+
}
|
1166
|
+
/**
|
1167
|
+
* The Append Block operation commits a new block of data to the end of an existing append blob
|
1168
|
+
* where the contents are read from a source url.
|
1169
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/append-block-from-url
|
1170
|
+
*
|
1171
|
+
* @param sourceURL -
|
1172
|
+
* The url to the blob that will be the source of the copy. A source blob in the same storage account can
|
1173
|
+
* be authenticated via Shared Key. However, if the source is a blob in another account, the source blob
|
1174
|
+
* must either be public or must be authenticated via a shared access signature. If the source blob is
|
1175
|
+
* public, no authentication is required to perform the operation.
|
1176
|
+
* @param sourceOffset - Offset in source to be appended
|
1177
|
+
* @param count - Number of bytes to be appended as a block
|
1178
|
+
* @param options -
|
1179
|
+
*/
|
1180
|
+
async appendBlockFromURL(sourceURL, sourceOffset, count, options = {}) {
|
1181
|
+
options.conditions = options.conditions || {};
|
1182
|
+
options.sourceConditions = options.sourceConditions || {};
|
1183
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1184
|
+
return tracingClient.withSpan("AppendBlobClient-appendBlockFromURL", options, async (updatedOptions) => {
|
1185
|
+
var _a, _b, _c, _d, _e;
|
1186
|
+
return assertResponse(await this.appendBlobContext.appendBlockFromUrl(sourceURL, 0, {
|
1187
|
+
abortSignal: options.abortSignal,
|
1188
|
+
sourceRange: rangeToString({ offset: sourceOffset, count }),
|
1189
|
+
sourceContentMD5: options.sourceContentMD5,
|
1190
|
+
sourceContentCrc64: options.sourceContentCrc64,
|
1191
|
+
leaseAccessConditions: options.conditions,
|
1192
|
+
appendPositionAccessConditions: options.conditions,
|
1193
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
1194
|
+
sourceModifiedAccessConditions: {
|
1195
|
+
sourceIfMatch: (_b = options.sourceConditions) === null || _b === void 0 ? void 0 : _b.ifMatch,
|
1196
|
+
sourceIfModifiedSince: (_c = options.sourceConditions) === null || _c === void 0 ? void 0 : _c.ifModifiedSince,
|
1197
|
+
sourceIfNoneMatch: (_d = options.sourceConditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch,
|
1198
|
+
sourceIfUnmodifiedSince: (_e = options.sourceConditions) === null || _e === void 0 ? void 0 : _e.ifUnmodifiedSince,
|
1199
|
+
},
|
1200
|
+
copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization),
|
1201
|
+
cpkInfo: options.customerProvidedKey,
|
1202
|
+
encryptionScope: options.encryptionScope,
|
1203
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1204
|
+
}));
|
1205
|
+
});
|
1206
|
+
}
|
1207
|
+
}
|
1208
|
+
/**
|
1209
|
+
* BlockBlobClient defines a set of operations applicable to block blobs.
|
1210
|
+
*/
|
1211
|
+
export class BlockBlobClient extends BlobClient {
|
1212
|
+
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
1213
|
+
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
1214
|
+
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
1215
|
+
options) {
|
1216
|
+
// In TypeScript we cannot simply pass all parameters to super() like below so have to duplicate the code instead.
|
1217
|
+
// super(s, credentialOrPipelineOrContainerNameOrOptions, blobNameOrOptions, options);
|
1218
|
+
let pipeline;
|
1219
|
+
let url;
|
1220
|
+
options = options || {};
|
1221
|
+
if (isPipelineLike(credentialOrPipelineOrContainerName)) {
|
1222
|
+
// (url: string, pipeline: Pipeline)
|
1223
|
+
url = urlOrConnectionString;
|
1224
|
+
pipeline = credentialOrPipelineOrContainerName;
|
1225
|
+
}
|
1226
|
+
else if ((isNode && credentialOrPipelineOrContainerName instanceof StorageSharedKeyCredential) ||
|
1227
|
+
credentialOrPipelineOrContainerName instanceof AnonymousCredential ||
|
1228
|
+
isTokenCredential(credentialOrPipelineOrContainerName)) {
|
1229
|
+
// (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions)
|
1230
|
+
url = urlOrConnectionString;
|
1231
|
+
options = blobNameOrOptions;
|
1232
|
+
pipeline = newPipeline(credentialOrPipelineOrContainerName, options);
|
1233
|
+
}
|
1234
|
+
else if (!credentialOrPipelineOrContainerName &&
|
1235
|
+
typeof credentialOrPipelineOrContainerName !== "string") {
|
1236
|
+
// (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions)
|
1237
|
+
// The second parameter is undefined. Use anonymous credential.
|
1238
|
+
url = urlOrConnectionString;
|
1239
|
+
pipeline = newPipeline(new AnonymousCredential(), options);
|
1240
|
+
}
|
1241
|
+
else if (credentialOrPipelineOrContainerName &&
|
1242
|
+
typeof credentialOrPipelineOrContainerName === "string" &&
|
1243
|
+
blobNameOrOptions &&
|
1244
|
+
typeof blobNameOrOptions === "string") {
|
1245
|
+
// (connectionString: string, containerName: string, blobName: string, options?: StoragePipelineOptions)
|
1246
|
+
const containerName = credentialOrPipelineOrContainerName;
|
1247
|
+
const blobName = blobNameOrOptions;
|
1248
|
+
const extractedCreds = extractConnectionStringParts(urlOrConnectionString);
|
1249
|
+
if (extractedCreds.kind === "AccountConnString") {
|
1250
|
+
if (isNode) {
|
1251
|
+
const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey);
|
1252
|
+
url = appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName));
|
1253
|
+
if (!options.proxyOptions) {
|
1254
|
+
options.proxyOptions = getDefaultProxySettings(extractedCreds.proxyUri);
|
1255
|
+
}
|
1256
|
+
pipeline = newPipeline(sharedKeyCredential, options);
|
1257
|
+
}
|
1258
|
+
else {
|
1259
|
+
throw new Error("Account connection string is only supported in Node.js environment");
|
1260
|
+
}
|
1261
|
+
}
|
1262
|
+
else if (extractedCreds.kind === "SASConnString") {
|
1263
|
+
url =
|
1264
|
+
appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)) +
|
1265
|
+
"?" +
|
1266
|
+
extractedCreds.accountSas;
|
1267
|
+
pipeline = newPipeline(new AnonymousCredential(), options);
|
1268
|
+
}
|
1269
|
+
else {
|
1270
|
+
throw new Error("Connection string must be either an Account connection string or a SAS connection string");
|
1271
|
+
}
|
1272
|
+
}
|
1273
|
+
else {
|
1274
|
+
throw new Error("Expecting non-empty strings for containerName and blobName parameters");
|
1275
|
+
}
|
1276
|
+
super(url, pipeline);
|
1277
|
+
this.blockBlobContext = this.storageClientContext.blockBlob;
|
1278
|
+
this._blobContext = this.storageClientContext.blob;
|
1279
|
+
}
|
1280
|
+
/**
|
1281
|
+
* Creates a new BlockBlobClient object identical to the source but with the
|
1282
|
+
* specified snapshot timestamp.
|
1283
|
+
* Provide "" will remove the snapshot and return a URL to the base blob.
|
1284
|
+
*
|
1285
|
+
* @param snapshot - The snapshot timestamp.
|
1286
|
+
* @returns A new BlockBlobClient object identical to the source but with the specified snapshot timestamp.
|
1287
|
+
*/
|
1288
|
+
withSnapshot(snapshot) {
|
1289
|
+
return new BlockBlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline);
|
1290
|
+
}
|
1291
|
+
/**
|
1292
|
+
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
1293
|
+
*
|
1294
|
+
* Quick query for a JSON or CSV formatted blob.
|
1295
|
+
*
|
1296
|
+
* Example usage (Node.js):
|
1297
|
+
*
|
1298
|
+
* ```js
|
1299
|
+
* // Query and convert a blob to a string
|
1300
|
+
* const queryBlockBlobResponse = await blockBlobClient.query("select * from BlobStorage");
|
1301
|
+
* const downloaded = (await streamToBuffer(queryBlockBlobResponse.readableStreamBody)).toString();
|
1302
|
+
* console.log("Query blob content:", downloaded);
|
1303
|
+
*
|
1304
|
+
* async function streamToBuffer(readableStream) {
|
1305
|
+
* return new Promise((resolve, reject) => {
|
1306
|
+
* const chunks = [];
|
1307
|
+
* readableStream.on("data", (data) => {
|
1308
|
+
* chunks.push(data instanceof Buffer ? data : Buffer.from(data));
|
1309
|
+
* });
|
1310
|
+
* readableStream.on("end", () => {
|
1311
|
+
* resolve(Buffer.concat(chunks));
|
1312
|
+
* });
|
1313
|
+
* readableStream.on("error", reject);
|
1314
|
+
* });
|
1315
|
+
* }
|
1316
|
+
* ```
|
1317
|
+
*
|
1318
|
+
* @param query -
|
1319
|
+
* @param options -
|
1320
|
+
*/
|
1321
|
+
async query(query, options = {}) {
|
1322
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1323
|
+
if (!isNode) {
|
1324
|
+
throw new Error("This operation currently is only supported in Node.js.");
|
1325
|
+
}
|
1326
|
+
return tracingClient.withSpan("BlockBlobClient-query", options, async (updatedOptions) => {
|
1327
|
+
var _a;
|
1328
|
+
const response = assertResponse(await this._blobContext.query({
|
1329
|
+
abortSignal: options.abortSignal,
|
1330
|
+
queryRequest: {
|
1331
|
+
queryType: "SQL",
|
1332
|
+
expression: query,
|
1333
|
+
inputSerialization: toQuerySerialization(options.inputTextConfiguration),
|
1334
|
+
outputSerialization: toQuerySerialization(options.outputTextConfiguration),
|
1335
|
+
},
|
1336
|
+
leaseAccessConditions: options.conditions,
|
1337
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
1338
|
+
cpkInfo: options.customerProvidedKey,
|
1339
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1340
|
+
}));
|
1341
|
+
return new BlobQueryResponse(response, {
|
1342
|
+
abortSignal: options.abortSignal,
|
1343
|
+
onProgress: options.onProgress,
|
1344
|
+
onError: options.onError,
|
1345
|
+
});
|
1346
|
+
});
|
1347
|
+
}
|
1348
|
+
/**
|
1349
|
+
* Creates a new block blob, or updates the content of an existing block blob.
|
1350
|
+
* Updating an existing block blob overwrites any existing metadata on the blob.
|
1351
|
+
* Partial updates are not supported; the content of the existing blob is
|
1352
|
+
* overwritten with the new content. To perform a partial update of a block blob's,
|
1353
|
+
* use {@link stageBlock} and {@link commitBlockList}.
|
1354
|
+
*
|
1355
|
+
* This is a non-parallel uploading method, please use {@link uploadFile},
|
1356
|
+
* {@link uploadStream} or {@link uploadBrowserData} for better performance
|
1357
|
+
* with concurrency uploading.
|
1358
|
+
*
|
1359
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/put-blob
|
1360
|
+
*
|
1361
|
+
* @param body - Blob, string, ArrayBuffer, ArrayBufferView or a function
|
1362
|
+
* which returns a new Readable stream whose offset is from data source beginning.
|
1363
|
+
* @param contentLength - Length of body in bytes. Use Buffer.byteLength() to calculate body length for a
|
1364
|
+
* string including non non-Base64/Hex-encoded characters.
|
1365
|
+
* @param options - Options to the Block Blob Upload operation.
|
1366
|
+
* @returns Response data for the Block Blob Upload operation.
|
1367
|
+
*
|
1368
|
+
* Example usage:
|
1369
|
+
*
|
1370
|
+
* ```js
|
1371
|
+
* const content = "Hello world!";
|
1372
|
+
* const uploadBlobResponse = await blockBlobClient.upload(content, content.length);
|
1373
|
+
* ```
|
1374
|
+
*/
|
1375
|
+
async upload(body, contentLength, options = {}) {
|
1376
|
+
options.conditions = options.conditions || {};
|
1377
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1378
|
+
return tracingClient.withSpan("BlockBlobClient-upload", options, async (updatedOptions) => {
|
1379
|
+
var _a, _b, _c;
|
1380
|
+
return assertResponse(await this.blockBlobContext.upload(contentLength, body, {
|
1381
|
+
abortSignal: options.abortSignal,
|
1382
|
+
blobHttpHeaders: options.blobHTTPHeaders,
|
1383
|
+
leaseAccessConditions: options.conditions,
|
1384
|
+
metadata: options.metadata,
|
1385
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
1386
|
+
requestOptions: {
|
1387
|
+
onUploadProgress: options.onProgress,
|
1388
|
+
},
|
1389
|
+
cpkInfo: options.customerProvidedKey,
|
1390
|
+
encryptionScope: options.encryptionScope,
|
1391
|
+
immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn,
|
1392
|
+
immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode,
|
1393
|
+
legalHold: options.legalHold,
|
1394
|
+
tier: toAccessTier(options.tier),
|
1395
|
+
blobTagsString: toBlobTagsString(options.tags),
|
1396
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1397
|
+
}));
|
1398
|
+
});
|
1399
|
+
}
|
1400
|
+
/**
|
1401
|
+
* Creates a new Block Blob where the contents of the blob are read from a given URL.
|
1402
|
+
* This API is supported beginning with the 2020-04-08 version. Partial updates
|
1403
|
+
* are not supported with Put Blob from URL; the content of an existing blob is overwritten with
|
1404
|
+
* the content of the new blob. To perform partial updates to a block blob’s contents using a
|
1405
|
+
* source URL, use {@link stageBlockFromURL} and {@link commitBlockList}.
|
1406
|
+
*
|
1407
|
+
* @param sourceURL - Specifies the URL of the blob. The value
|
1408
|
+
* may be a URL of up to 2 KB in length that specifies a blob.
|
1409
|
+
* The value should be URL-encoded as it would appear
|
1410
|
+
* in a request URI. The source blob must either be public
|
1411
|
+
* or must be authenticated via a shared access signature.
|
1412
|
+
* If the source blob is public, no authentication is required
|
1413
|
+
* to perform the operation. Here are some examples of source object URLs:
|
1414
|
+
* - https://myaccount.blob.core.windows.net/mycontainer/myblob
|
1415
|
+
* - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
|
1416
|
+
* @param options - Optional parameters.
|
1417
|
+
*/
|
1418
|
+
async syncUploadFromURL(sourceURL, options = {}) {
|
1419
|
+
options.conditions = options.conditions || {};
|
1420
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1421
|
+
return tracingClient.withSpan("BlockBlobClient-syncUploadFromURL", options, async (updatedOptions) => {
|
1422
|
+
var _a, _b, _c, _d, _e, _f;
|
1423
|
+
return assertResponse(await this.blockBlobContext.putBlobFromUrl(0, sourceURL, Object.assign(Object.assign({}, options), { blobHttpHeaders: options.blobHTTPHeaders, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), sourceModifiedAccessConditions: {
|
1424
|
+
sourceIfMatch: (_b = options.sourceConditions) === null || _b === void 0 ? void 0 : _b.ifMatch,
|
1425
|
+
sourceIfModifiedSince: (_c = options.sourceConditions) === null || _c === void 0 ? void 0 : _c.ifModifiedSince,
|
1426
|
+
sourceIfNoneMatch: (_d = options.sourceConditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch,
|
1427
|
+
sourceIfUnmodifiedSince: (_e = options.sourceConditions) === null || _e === void 0 ? void 0 : _e.ifUnmodifiedSince,
|
1428
|
+
sourceIfTags: (_f = options.sourceConditions) === null || _f === void 0 ? void 0 : _f.tagConditions,
|
1429
|
+
}, cpkInfo: options.customerProvidedKey, copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization), tier: toAccessTier(options.tier), blobTagsString: toBlobTagsString(options.tags), copySourceTags: options.copySourceTags, tracingOptions: updatedOptions.tracingOptions })));
|
1430
|
+
});
|
1431
|
+
}
|
1432
|
+
/**
|
1433
|
+
* Uploads the specified block to the block blob's "staging area" to be later
|
1434
|
+
* committed by a call to commitBlockList.
|
1435
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/put-block
|
1436
|
+
*
|
1437
|
+
* @param blockId - A 64-byte value that is base64-encoded
|
1438
|
+
* @param body - Data to upload to the staging area.
|
1439
|
+
* @param contentLength - Number of bytes to upload.
|
1440
|
+
* @param options - Options to the Block Blob Stage Block operation.
|
1441
|
+
* @returns Response data for the Block Blob Stage Block operation.
|
1442
|
+
*/
|
1443
|
+
async stageBlock(blockId, body, contentLength, options = {}) {
|
1444
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1445
|
+
return tracingClient.withSpan("BlockBlobClient-stageBlock", options, async (updatedOptions) => {
|
1446
|
+
return assertResponse(await this.blockBlobContext.stageBlock(blockId, contentLength, body, {
|
1447
|
+
abortSignal: options.abortSignal,
|
1448
|
+
leaseAccessConditions: options.conditions,
|
1449
|
+
requestOptions: {
|
1450
|
+
onUploadProgress: options.onProgress,
|
1451
|
+
},
|
1452
|
+
transactionalContentMD5: options.transactionalContentMD5,
|
1453
|
+
transactionalContentCrc64: options.transactionalContentCrc64,
|
1454
|
+
cpkInfo: options.customerProvidedKey,
|
1455
|
+
encryptionScope: options.encryptionScope,
|
1456
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1457
|
+
}));
|
1458
|
+
});
|
1459
|
+
}
|
1460
|
+
/**
|
1461
|
+
* The Stage Block From URL operation creates a new block to be committed as part
|
1462
|
+
* of a blob where the contents are read from a URL.
|
1463
|
+
* This API is available starting in version 2018-03-28.
|
1464
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url
|
1465
|
+
*
|
1466
|
+
* @param blockId - A 64-byte value that is base64-encoded
|
1467
|
+
* @param sourceURL - Specifies the URL of the blob. The value
|
1468
|
+
* may be a URL of up to 2 KB in length that specifies a blob.
|
1469
|
+
* The value should be URL-encoded as it would appear
|
1470
|
+
* in a request URI. The source blob must either be public
|
1471
|
+
* or must be authenticated via a shared access signature.
|
1472
|
+
* If the source blob is public, no authentication is required
|
1473
|
+
* to perform the operation. Here are some examples of source object URLs:
|
1474
|
+
* - https://myaccount.blob.core.windows.net/mycontainer/myblob
|
1475
|
+
* - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
|
1476
|
+
* @param offset - From which position of the blob to download, greater than or equal to 0
|
1477
|
+
* @param count - How much data to be downloaded, greater than 0. Will download to the end when undefined
|
1478
|
+
* @param options - Options to the Block Blob Stage Block From URL operation.
|
1479
|
+
* @returns Response data for the Block Blob Stage Block From URL operation.
|
1480
|
+
*/
|
1481
|
+
async stageBlockFromURL(blockId, sourceURL, offset = 0, count, options = {}) {
|
1482
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1483
|
+
return tracingClient.withSpan("BlockBlobClient-stageBlockFromURL", options, async (updatedOptions) => {
|
1484
|
+
return assertResponse(await this.blockBlobContext.stageBlockFromURL(blockId, 0, sourceURL, {
|
1485
|
+
abortSignal: options.abortSignal,
|
1486
|
+
leaseAccessConditions: options.conditions,
|
1487
|
+
sourceContentMD5: options.sourceContentMD5,
|
1488
|
+
sourceContentCrc64: options.sourceContentCrc64,
|
1489
|
+
sourceRange: offset === 0 && !count ? undefined : rangeToString({ offset, count }),
|
1490
|
+
cpkInfo: options.customerProvidedKey,
|
1491
|
+
encryptionScope: options.encryptionScope,
|
1492
|
+
copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization),
|
1493
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1494
|
+
}));
|
1495
|
+
});
|
1496
|
+
}
|
1497
|
+
/**
|
1498
|
+
* Writes a blob by specifying the list of block IDs that make up the blob.
|
1499
|
+
* In order to be written as part of a blob, a block must have been successfully written
|
1500
|
+
* to the server in a prior {@link stageBlock} operation. You can call {@link commitBlockList} to
|
1501
|
+
* update a blob by uploading only those blocks that have changed, then committing the new and existing
|
1502
|
+
* blocks together. Any blocks not specified in the block list and permanently deleted.
|
1503
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/put-block-list
|
1504
|
+
*
|
1505
|
+
* @param blocks - Array of 64-byte value that is base64-encoded
|
1506
|
+
* @param options - Options to the Block Blob Commit Block List operation.
|
1507
|
+
* @returns Response data for the Block Blob Commit Block List operation.
|
1508
|
+
*/
|
1509
|
+
async commitBlockList(blocks, options = {}) {
|
1510
|
+
options.conditions = options.conditions || {};
|
1511
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1512
|
+
return tracingClient.withSpan("BlockBlobClient-commitBlockList", options, async (updatedOptions) => {
|
1513
|
+
var _a, _b, _c;
|
1514
|
+
return assertResponse(await this.blockBlobContext.commitBlockList({ latest: blocks }, {
|
1515
|
+
abortSignal: options.abortSignal,
|
1516
|
+
blobHttpHeaders: options.blobHTTPHeaders,
|
1517
|
+
leaseAccessConditions: options.conditions,
|
1518
|
+
metadata: options.metadata,
|
1519
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
1520
|
+
cpkInfo: options.customerProvidedKey,
|
1521
|
+
encryptionScope: options.encryptionScope,
|
1522
|
+
immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn,
|
1523
|
+
immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode,
|
1524
|
+
legalHold: options.legalHold,
|
1525
|
+
tier: toAccessTier(options.tier),
|
1526
|
+
blobTagsString: toBlobTagsString(options.tags),
|
1527
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1528
|
+
}));
|
1529
|
+
});
|
1530
|
+
}
|
1531
|
+
/**
|
1532
|
+
* Returns the list of blocks that have been uploaded as part of a block blob
|
1533
|
+
* using the specified block list filter.
|
1534
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/get-block-list
|
1535
|
+
*
|
1536
|
+
* @param listType - Specifies whether to return the list of committed blocks,
|
1537
|
+
* the list of uncommitted blocks, or both lists together.
|
1538
|
+
* @param options - Options to the Block Blob Get Block List operation.
|
1539
|
+
* @returns Response data for the Block Blob Get Block List operation.
|
1540
|
+
*/
|
1541
|
+
async getBlockList(listType, options = {}) {
|
1542
|
+
return tracingClient.withSpan("BlockBlobClient-getBlockList", options, async (updatedOptions) => {
|
1543
|
+
var _a;
|
1544
|
+
const res = assertResponse(await this.blockBlobContext.getBlockList(listType, {
|
1545
|
+
abortSignal: options.abortSignal,
|
1546
|
+
leaseAccessConditions: options.conditions,
|
1547
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
1548
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1549
|
+
}));
|
1550
|
+
if (!res.committedBlocks) {
|
1551
|
+
res.committedBlocks = [];
|
1552
|
+
}
|
1553
|
+
if (!res.uncommittedBlocks) {
|
1554
|
+
res.uncommittedBlocks = [];
|
1555
|
+
}
|
1556
|
+
return res;
|
1557
|
+
});
|
1558
|
+
}
|
1559
|
+
// High level functions
|
1560
|
+
/**
|
1561
|
+
* Uploads a Buffer(Node.js)/Blob(browsers)/ArrayBuffer/ArrayBufferView object to a BlockBlob.
|
1562
|
+
*
|
1563
|
+
* When data length is no more than the specifiled {@link BlockBlobParallelUploadOptions.maxSingleShotSize} (default is
|
1564
|
+
* {@link BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES}), this method will use 1 {@link upload} call to finish the upload.
|
1565
|
+
* Otherwise, this method will call {@link stageBlock} to upload blocks, and finally call {@link commitBlockList}
|
1566
|
+
* to commit the block list.
|
1567
|
+
*
|
1568
|
+
* A common {@link BlockBlobParallelUploadOptions.blobHTTPHeaders} option to set is
|
1569
|
+
* `blobContentType`, enabling the browser to provide
|
1570
|
+
* functionality based on file type.
|
1571
|
+
*
|
1572
|
+
* @param data - Buffer(Node.js), Blob, ArrayBuffer or ArrayBufferView
|
1573
|
+
* @param options -
|
1574
|
+
*/
|
1575
|
+
async uploadData(data, options = {}) {
|
1576
|
+
return tracingClient.withSpan("BlockBlobClient-uploadData", options, async (updatedOptions) => {
|
1577
|
+
if (isNode) {
|
1578
|
+
let buffer;
|
1579
|
+
if (data instanceof Buffer) {
|
1580
|
+
buffer = data;
|
1581
|
+
}
|
1582
|
+
else if (data instanceof ArrayBuffer) {
|
1583
|
+
buffer = Buffer.from(data);
|
1584
|
+
}
|
1585
|
+
else {
|
1586
|
+
data = data;
|
1587
|
+
buffer = Buffer.from(data.buffer, data.byteOffset, data.byteLength);
|
1588
|
+
}
|
1589
|
+
return this.uploadSeekableInternal((offset, size) => buffer.slice(offset, offset + size), buffer.byteLength, updatedOptions);
|
1590
|
+
}
|
1591
|
+
else {
|
1592
|
+
const browserBlob = new Blob([data]);
|
1593
|
+
return this.uploadSeekableInternal((offset, size) => browserBlob.slice(offset, offset + size), browserBlob.size, updatedOptions);
|
1594
|
+
}
|
1595
|
+
});
|
1596
|
+
}
|
1597
|
+
/**
|
1598
|
+
* ONLY AVAILABLE IN BROWSERS.
|
1599
|
+
*
|
1600
|
+
* Uploads a browser Blob/File/ArrayBuffer/ArrayBufferView object to block blob.
|
1601
|
+
*
|
1602
|
+
* When buffer length lesser than or equal to 256MB, this method will use 1 upload call to finish the upload.
|
1603
|
+
* Otherwise, this method will call {@link stageBlock} to upload blocks, and finally call
|
1604
|
+
* {@link commitBlockList} to commit the block list.
|
1605
|
+
*
|
1606
|
+
* A common {@link BlockBlobParallelUploadOptions.blobHTTPHeaders} option to set is
|
1607
|
+
* `blobContentType`, enabling the browser to provide
|
1608
|
+
* functionality based on file type.
|
1609
|
+
*
|
1610
|
+
* @deprecated Use {@link uploadData} instead.
|
1611
|
+
*
|
1612
|
+
* @param browserData - Blob, File, ArrayBuffer or ArrayBufferView
|
1613
|
+
* @param options - Options to upload browser data.
|
1614
|
+
* @returns Response data for the Blob Upload operation.
|
1615
|
+
*/
|
1616
|
+
async uploadBrowserData(browserData, options = {}) {
|
1617
|
+
return tracingClient.withSpan("BlockBlobClient-uploadBrowserData", options, async (updatedOptions) => {
|
1618
|
+
const browserBlob = new Blob([browserData]);
|
1619
|
+
return this.uploadSeekableInternal((offset, size) => browserBlob.slice(offset, offset + size), browserBlob.size, updatedOptions);
|
1620
|
+
});
|
1621
|
+
}
|
1622
|
+
/**
|
1623
|
+
*
|
1624
|
+
* Uploads data to block blob. Requires a bodyFactory as the data source,
|
1625
|
+
* which need to return a {@link HttpRequestBody} object with the offset and size provided.
|
1626
|
+
*
|
1627
|
+
* When data length is no more than the specified {@link BlockBlobParallelUploadOptions.maxSingleShotSize} (default is
|
1628
|
+
* {@link BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES}), this method will use 1 {@link upload} call to finish the upload.
|
1629
|
+
* Otherwise, this method will call {@link stageBlock} to upload blocks, and finally call {@link commitBlockList}
|
1630
|
+
* to commit the block list.
|
1631
|
+
*
|
1632
|
+
* @param bodyFactory -
|
1633
|
+
* @param size - size of the data to upload.
|
1634
|
+
* @param options - Options to Upload to Block Blob operation.
|
1635
|
+
* @returns Response data for the Blob Upload operation.
|
1636
|
+
*/
|
1637
|
+
async uploadSeekableInternal(bodyFactory, size, options = {}) {
|
1638
|
+
var _a, _b;
|
1639
|
+
let blockSize = (_a = options.blockSize) !== null && _a !== void 0 ? _a : 0;
|
1640
|
+
if (blockSize < 0 || blockSize > BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES) {
|
1641
|
+
throw new RangeError(`blockSize option must be >= 0 and <= ${BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES}`);
|
1642
|
+
}
|
1643
|
+
const maxSingleShotSize = (_b = options.maxSingleShotSize) !== null && _b !== void 0 ? _b : BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES;
|
1644
|
+
if (maxSingleShotSize < 0 || maxSingleShotSize > BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES) {
|
1645
|
+
throw new RangeError(`maxSingleShotSize option must be >= 0 and <= ${BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES}`);
|
1646
|
+
}
|
1647
|
+
if (blockSize === 0) {
|
1648
|
+
if (size > BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES * BLOCK_BLOB_MAX_BLOCKS) {
|
1649
|
+
throw new RangeError(`${size} is too larger to upload to a block blob.`);
|
1650
|
+
}
|
1651
|
+
if (size > maxSingleShotSize) {
|
1652
|
+
blockSize = Math.ceil(size / BLOCK_BLOB_MAX_BLOCKS);
|
1653
|
+
if (blockSize < DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES) {
|
1654
|
+
blockSize = DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES;
|
1655
|
+
}
|
1656
|
+
}
|
1657
|
+
}
|
1658
|
+
if (!options.blobHTTPHeaders) {
|
1659
|
+
options.blobHTTPHeaders = {};
|
1660
|
+
}
|
1661
|
+
if (!options.conditions) {
|
1662
|
+
options.conditions = {};
|
1663
|
+
}
|
1664
|
+
return tracingClient.withSpan("BlockBlobClient-uploadSeekableInternal", options, async (updatedOptions) => {
|
1665
|
+
if (size <= maxSingleShotSize) {
|
1666
|
+
return assertResponse(await this.upload(bodyFactory(0, size), size, updatedOptions));
|
1667
|
+
}
|
1668
|
+
const numBlocks = Math.floor((size - 1) / blockSize) + 1;
|
1669
|
+
if (numBlocks > BLOCK_BLOB_MAX_BLOCKS) {
|
1670
|
+
throw new RangeError(`The buffer's size is too big or the BlockSize is too small;` +
|
1671
|
+
`the number of blocks must be <= ${BLOCK_BLOB_MAX_BLOCKS}`);
|
1672
|
+
}
|
1673
|
+
const blockList = [];
|
1674
|
+
const blockIDPrefix = generateUuid();
|
1675
|
+
let transferProgress = 0;
|
1676
|
+
const batch = new Batch(options.concurrency);
|
1677
|
+
for (let i = 0; i < numBlocks; i++) {
|
1678
|
+
batch.addOperation(async () => {
|
1679
|
+
const blockID = generateBlockID(blockIDPrefix, i);
|
1680
|
+
const start = blockSize * i;
|
1681
|
+
const end = i === numBlocks - 1 ? size : start + blockSize;
|
1682
|
+
const contentLength = end - start;
|
1683
|
+
blockList.push(blockID);
|
1684
|
+
await this.stageBlock(blockID, bodyFactory(start, contentLength), contentLength, {
|
1685
|
+
abortSignal: options.abortSignal,
|
1686
|
+
conditions: options.conditions,
|
1687
|
+
encryptionScope: options.encryptionScope,
|
1688
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1689
|
+
});
|
1690
|
+
// Update progress after block is successfully uploaded to server, in case of block trying
|
1691
|
+
// TODO: Hook with convenience layer progress event in finer level
|
1692
|
+
transferProgress += contentLength;
|
1693
|
+
if (options.onProgress) {
|
1694
|
+
options.onProgress({
|
1695
|
+
loadedBytes: transferProgress,
|
1696
|
+
});
|
1697
|
+
}
|
1698
|
+
});
|
1699
|
+
}
|
1700
|
+
await batch.do();
|
1701
|
+
return this.commitBlockList(blockList, updatedOptions);
|
1702
|
+
});
|
1703
|
+
}
|
1704
|
+
/**
|
1705
|
+
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
1706
|
+
*
|
1707
|
+
* Uploads a local file in blocks to a block blob.
|
1708
|
+
*
|
1709
|
+
* When file size lesser than or equal to 256MB, this method will use 1 upload call to finish the upload.
|
1710
|
+
* Otherwise, this method will call stageBlock to upload blocks, and finally call commitBlockList
|
1711
|
+
* to commit the block list.
|
1712
|
+
*
|
1713
|
+
* @param filePath - Full path of local file
|
1714
|
+
* @param options - Options to Upload to Block Blob operation.
|
1715
|
+
* @returns Response data for the Blob Upload operation.
|
1716
|
+
*/
|
1717
|
+
async uploadFile(filePath, options = {}) {
|
1718
|
+
return tracingClient.withSpan("BlockBlobClient-uploadFile", options, async (updatedOptions) => {
|
1719
|
+
const size = (await fsStat(filePath)).size;
|
1720
|
+
return this.uploadSeekableInternal((offset, count) => {
|
1721
|
+
return () => fsCreateReadStream(filePath, {
|
1722
|
+
autoClose: true,
|
1723
|
+
end: count ? offset + count - 1 : Infinity,
|
1724
|
+
start: offset,
|
1725
|
+
});
|
1726
|
+
}, size, Object.assign(Object.assign({}, options), { tracingOptions: updatedOptions.tracingOptions }));
|
1727
|
+
});
|
1728
|
+
}
|
1729
|
+
/**
|
1730
|
+
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
1731
|
+
*
|
1732
|
+
* Uploads a Node.js Readable stream into block blob.
|
1733
|
+
*
|
1734
|
+
* PERFORMANCE IMPROVEMENT TIPS:
|
1735
|
+
* * Input stream highWaterMark is better to set a same value with bufferSize
|
1736
|
+
* parameter, which will avoid Buffer.concat() operations.
|
1737
|
+
*
|
1738
|
+
* @param stream - Node.js Readable stream
|
1739
|
+
* @param bufferSize - Size of every buffer allocated, also the block size in the uploaded block blob. Default value is 8MB
|
1740
|
+
* @param maxConcurrency - Max concurrency indicates the max number of buffers that can be allocated,
|
1741
|
+
* positive correlation with max uploading concurrency. Default value is 5
|
1742
|
+
* @param options - Options to Upload Stream to Block Blob operation.
|
1743
|
+
* @returns Response data for the Blob Upload operation.
|
1744
|
+
*/
|
1745
|
+
async uploadStream(stream, bufferSize = DEFAULT_BLOCK_BUFFER_SIZE_BYTES, maxConcurrency = 5, options = {}) {
|
1746
|
+
if (!options.blobHTTPHeaders) {
|
1747
|
+
options.blobHTTPHeaders = {};
|
1748
|
+
}
|
1749
|
+
if (!options.conditions) {
|
1750
|
+
options.conditions = {};
|
1751
|
+
}
|
1752
|
+
return tracingClient.withSpan("BlockBlobClient-uploadStream", options, async (updatedOptions) => {
|
1753
|
+
let blockNum = 0;
|
1754
|
+
const blockIDPrefix = generateUuid();
|
1755
|
+
let transferProgress = 0;
|
1756
|
+
const blockList = [];
|
1757
|
+
const scheduler = new BufferScheduler(stream, bufferSize, maxConcurrency, async (body, length) => {
|
1758
|
+
const blockID = generateBlockID(blockIDPrefix, blockNum);
|
1759
|
+
blockList.push(blockID);
|
1760
|
+
blockNum++;
|
1761
|
+
await this.stageBlock(blockID, body, length, {
|
1762
|
+
conditions: options.conditions,
|
1763
|
+
encryptionScope: options.encryptionScope,
|
1764
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1765
|
+
});
|
1766
|
+
// Update progress after block is successfully uploaded to server, in case of block trying
|
1767
|
+
transferProgress += length;
|
1768
|
+
if (options.onProgress) {
|
1769
|
+
options.onProgress({ loadedBytes: transferProgress });
|
1770
|
+
}
|
1771
|
+
},
|
1772
|
+
// concurrency should set a smaller value than maxConcurrency, which is helpful to
|
1773
|
+
// reduce the possibility when a outgoing handler waits for stream data, in
|
1774
|
+
// this situation, outgoing handlers are blocked.
|
1775
|
+
// Outgoing queue shouldn't be empty.
|
1776
|
+
Math.ceil((maxConcurrency / 4) * 3));
|
1777
|
+
await scheduler.do();
|
1778
|
+
return assertResponse(await this.commitBlockList(blockList, Object.assign(Object.assign({}, options), { tracingOptions: updatedOptions.tracingOptions })));
|
1779
|
+
});
|
1780
|
+
}
|
1781
|
+
}
|
1782
|
+
/**
|
1783
|
+
* PageBlobClient defines a set of operations applicable to page blobs.
|
1784
|
+
*/
|
1785
|
+
export class PageBlobClient extends BlobClient {
|
1786
|
+
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
1787
|
+
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
1788
|
+
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
1789
|
+
options) {
|
1790
|
+
// In TypeScript we cannot simply pass all parameters to super() like below so have to duplicate the code instead.
|
1791
|
+
// super(s, credentialOrPipelineOrContainerNameOrOptions, blobNameOrOptions, options);
|
1792
|
+
let pipeline;
|
1793
|
+
let url;
|
1794
|
+
options = options || {};
|
1795
|
+
if (isPipelineLike(credentialOrPipelineOrContainerName)) {
|
1796
|
+
// (url: string, pipeline: Pipeline)
|
1797
|
+
url = urlOrConnectionString;
|
1798
|
+
pipeline = credentialOrPipelineOrContainerName;
|
1799
|
+
}
|
1800
|
+
else if ((isNode && credentialOrPipelineOrContainerName instanceof StorageSharedKeyCredential) ||
|
1801
|
+
credentialOrPipelineOrContainerName instanceof AnonymousCredential ||
|
1802
|
+
isTokenCredential(credentialOrPipelineOrContainerName)) {
|
1803
|
+
// (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions)
|
1804
|
+
url = urlOrConnectionString;
|
1805
|
+
options = blobNameOrOptions;
|
1806
|
+
pipeline = newPipeline(credentialOrPipelineOrContainerName, options);
|
1807
|
+
}
|
1808
|
+
else if (!credentialOrPipelineOrContainerName &&
|
1809
|
+
typeof credentialOrPipelineOrContainerName !== "string") {
|
1810
|
+
// (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions)
|
1811
|
+
// The second parameter is undefined. Use anonymous credential.
|
1812
|
+
url = urlOrConnectionString;
|
1813
|
+
pipeline = newPipeline(new AnonymousCredential(), options);
|
1814
|
+
}
|
1815
|
+
else if (credentialOrPipelineOrContainerName &&
|
1816
|
+
typeof credentialOrPipelineOrContainerName === "string" &&
|
1817
|
+
blobNameOrOptions &&
|
1818
|
+
typeof blobNameOrOptions === "string") {
|
1819
|
+
// (connectionString: string, containerName: string, blobName: string, options?: StoragePipelineOptions)
|
1820
|
+
const containerName = credentialOrPipelineOrContainerName;
|
1821
|
+
const blobName = blobNameOrOptions;
|
1822
|
+
const extractedCreds = extractConnectionStringParts(urlOrConnectionString);
|
1823
|
+
if (extractedCreds.kind === "AccountConnString") {
|
1824
|
+
if (isNode) {
|
1825
|
+
const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey);
|
1826
|
+
url = appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName));
|
1827
|
+
if (!options.proxyOptions) {
|
1828
|
+
options.proxyOptions = getDefaultProxySettings(extractedCreds.proxyUri);
|
1829
|
+
}
|
1830
|
+
pipeline = newPipeline(sharedKeyCredential, options);
|
1831
|
+
}
|
1832
|
+
else {
|
1833
|
+
throw new Error("Account connection string is only supported in Node.js environment");
|
1834
|
+
}
|
1835
|
+
}
|
1836
|
+
else if (extractedCreds.kind === "SASConnString") {
|
1837
|
+
url =
|
1838
|
+
appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)) +
|
1839
|
+
"?" +
|
1840
|
+
extractedCreds.accountSas;
|
1841
|
+
pipeline = newPipeline(new AnonymousCredential(), options);
|
1842
|
+
}
|
1843
|
+
else {
|
1844
|
+
throw new Error("Connection string must be either an Account connection string or a SAS connection string");
|
1845
|
+
}
|
1846
|
+
}
|
1847
|
+
else {
|
1848
|
+
throw new Error("Expecting non-empty strings for containerName and blobName parameters");
|
1849
|
+
}
|
1850
|
+
super(url, pipeline);
|
1851
|
+
this.pageBlobContext = this.storageClientContext.pageBlob;
|
1852
|
+
}
|
1853
|
+
/**
|
1854
|
+
* Creates a new PageBlobClient object identical to the source but with the
|
1855
|
+
* specified snapshot timestamp.
|
1856
|
+
* Provide "" will remove the snapshot and return a Client to the base blob.
|
1857
|
+
*
|
1858
|
+
* @param snapshot - The snapshot timestamp.
|
1859
|
+
* @returns A new PageBlobClient object identical to the source but with the specified snapshot timestamp.
|
1860
|
+
*/
|
1861
|
+
withSnapshot(snapshot) {
|
1862
|
+
return new PageBlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline);
|
1863
|
+
}
|
1864
|
+
/**
|
1865
|
+
* Creates a page blob of the specified length. Call uploadPages to upload data
|
1866
|
+
* data to a page blob.
|
1867
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/put-blob
|
1868
|
+
*
|
1869
|
+
* @param size - size of the page blob.
|
1870
|
+
* @param options - Options to the Page Blob Create operation.
|
1871
|
+
* @returns Response data for the Page Blob Create operation.
|
1872
|
+
*/
|
1873
|
+
async create(size, options = {}) {
|
1874
|
+
options.conditions = options.conditions || {};
|
1875
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1876
|
+
return tracingClient.withSpan("PageBlobClient-create", options, async (updatedOptions) => {
|
1877
|
+
var _a, _b, _c;
|
1878
|
+
return assertResponse(await this.pageBlobContext.create(0, size, {
|
1879
|
+
abortSignal: options.abortSignal,
|
1880
|
+
blobHttpHeaders: options.blobHTTPHeaders,
|
1881
|
+
blobSequenceNumber: options.blobSequenceNumber,
|
1882
|
+
leaseAccessConditions: options.conditions,
|
1883
|
+
metadata: options.metadata,
|
1884
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
1885
|
+
cpkInfo: options.customerProvidedKey,
|
1886
|
+
encryptionScope: options.encryptionScope,
|
1887
|
+
immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn,
|
1888
|
+
immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode,
|
1889
|
+
legalHold: options.legalHold,
|
1890
|
+
tier: toAccessTier(options.tier),
|
1891
|
+
blobTagsString: toBlobTagsString(options.tags),
|
1892
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1893
|
+
}));
|
1894
|
+
});
|
1895
|
+
}
|
1896
|
+
/**
|
1897
|
+
* Creates a page blob of the specified length. Call uploadPages to upload data
|
1898
|
+
* data to a page blob. If the blob with the same name already exists, the content
|
1899
|
+
* of the existing blob will remain unchanged.
|
1900
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/put-blob
|
1901
|
+
*
|
1902
|
+
* @param size - size of the page blob.
|
1903
|
+
* @param options -
|
1904
|
+
*/
|
1905
|
+
async createIfNotExists(size, options = {}) {
|
1906
|
+
return tracingClient.withSpan("PageBlobClient-createIfNotExists", options, async (updatedOptions) => {
|
1907
|
+
var _a, _b;
|
1908
|
+
try {
|
1909
|
+
const conditions = { ifNoneMatch: ETagAny };
|
1910
|
+
const res = assertResponse(await this.create(size, Object.assign(Object.assign({}, options), { conditions, tracingOptions: updatedOptions.tracingOptions })));
|
1911
|
+
return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response });
|
1912
|
+
}
|
1913
|
+
catch (e) {
|
1914
|
+
if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "BlobAlreadyExists") {
|
1915
|
+
return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response });
|
1916
|
+
}
|
1917
|
+
throw e;
|
1918
|
+
}
|
1919
|
+
});
|
1920
|
+
}
|
1921
|
+
/**
|
1922
|
+
* Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512.
|
1923
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/put-page
|
1924
|
+
*
|
1925
|
+
* @param body - Data to upload
|
1926
|
+
* @param offset - Offset of destination page blob
|
1927
|
+
* @param count - Content length of the body, also number of bytes to be uploaded
|
1928
|
+
* @param options - Options to the Page Blob Upload Pages operation.
|
1929
|
+
* @returns Response data for the Page Blob Upload Pages operation.
|
1930
|
+
*/
|
1931
|
+
async uploadPages(body, offset, count, options = {}) {
|
1932
|
+
options.conditions = options.conditions || {};
|
1933
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1934
|
+
return tracingClient.withSpan("PageBlobClient-uploadPages", options, async (updatedOptions) => {
|
1935
|
+
var _a;
|
1936
|
+
return assertResponse(await this.pageBlobContext.uploadPages(count, body, {
|
1937
|
+
abortSignal: options.abortSignal,
|
1938
|
+
leaseAccessConditions: options.conditions,
|
1939
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
1940
|
+
requestOptions: {
|
1941
|
+
onUploadProgress: options.onProgress,
|
1942
|
+
},
|
1943
|
+
range: rangeToString({ offset, count }),
|
1944
|
+
sequenceNumberAccessConditions: options.conditions,
|
1945
|
+
transactionalContentMD5: options.transactionalContentMD5,
|
1946
|
+
transactionalContentCrc64: options.transactionalContentCrc64,
|
1947
|
+
cpkInfo: options.customerProvidedKey,
|
1948
|
+
encryptionScope: options.encryptionScope,
|
1949
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1950
|
+
}));
|
1951
|
+
});
|
1952
|
+
}
|
1953
|
+
/**
|
1954
|
+
* The Upload Pages operation writes a range of pages to a page blob where the
|
1955
|
+
* contents are read from a URL.
|
1956
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/put-page-from-url
|
1957
|
+
*
|
1958
|
+
* @param sourceURL - Specify a URL to the copy source, Shared Access Signature(SAS) maybe needed for authentication
|
1959
|
+
* @param sourceOffset - The source offset to copy from. Pass 0 to copy from the beginning of source page blob
|
1960
|
+
* @param destOffset - Offset of destination page blob
|
1961
|
+
* @param count - Number of bytes to be uploaded from source page blob
|
1962
|
+
* @param options -
|
1963
|
+
*/
|
1964
|
+
async uploadPagesFromURL(sourceURL, sourceOffset, destOffset, count, options = {}) {
|
1965
|
+
options.conditions = options.conditions || {};
|
1966
|
+
options.sourceConditions = options.sourceConditions || {};
|
1967
|
+
ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps);
|
1968
|
+
return tracingClient.withSpan("PageBlobClient-uploadPagesFromURL", options, async (updatedOptions) => {
|
1969
|
+
var _a, _b, _c, _d, _e;
|
1970
|
+
return assertResponse(await this.pageBlobContext.uploadPagesFromURL(sourceURL, rangeToString({ offset: sourceOffset, count }), 0, rangeToString({ offset: destOffset, count }), {
|
1971
|
+
abortSignal: options.abortSignal,
|
1972
|
+
sourceContentMD5: options.sourceContentMD5,
|
1973
|
+
sourceContentCrc64: options.sourceContentCrc64,
|
1974
|
+
leaseAccessConditions: options.conditions,
|
1975
|
+
sequenceNumberAccessConditions: options.conditions,
|
1976
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
1977
|
+
sourceModifiedAccessConditions: {
|
1978
|
+
sourceIfMatch: (_b = options.sourceConditions) === null || _b === void 0 ? void 0 : _b.ifMatch,
|
1979
|
+
sourceIfModifiedSince: (_c = options.sourceConditions) === null || _c === void 0 ? void 0 : _c.ifModifiedSince,
|
1980
|
+
sourceIfNoneMatch: (_d = options.sourceConditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch,
|
1981
|
+
sourceIfUnmodifiedSince: (_e = options.sourceConditions) === null || _e === void 0 ? void 0 : _e.ifUnmodifiedSince,
|
1982
|
+
},
|
1983
|
+
cpkInfo: options.customerProvidedKey,
|
1984
|
+
encryptionScope: options.encryptionScope,
|
1985
|
+
copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization),
|
1986
|
+
tracingOptions: updatedOptions.tracingOptions,
|
1987
|
+
}));
|
1988
|
+
});
|
1989
|
+
}
|
1990
|
+
/**
|
1991
|
+
* Frees the specified pages from the page blob.
|
1992
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/put-page
|
1993
|
+
*
|
1994
|
+
* @param offset - Starting byte position of the pages to clear.
|
1995
|
+
* @param count - Number of bytes to clear.
|
1996
|
+
* @param options - Options to the Page Blob Clear Pages operation.
|
1997
|
+
* @returns Response data for the Page Blob Clear Pages operation.
|
1998
|
+
*/
|
1999
|
+
async clearPages(offset = 0, count, options = {}) {
|
2000
|
+
options.conditions = options.conditions || {};
|
2001
|
+
return tracingClient.withSpan("PageBlobClient-clearPages", options, async (updatedOptions) => {
|
2002
|
+
var _a;
|
2003
|
+
return assertResponse(await this.pageBlobContext.clearPages(0, {
|
2004
|
+
abortSignal: options.abortSignal,
|
2005
|
+
leaseAccessConditions: options.conditions,
|
2006
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
2007
|
+
range: rangeToString({ offset, count }),
|
2008
|
+
sequenceNumberAccessConditions: options.conditions,
|
2009
|
+
cpkInfo: options.customerProvidedKey,
|
2010
|
+
encryptionScope: options.encryptionScope,
|
2011
|
+
tracingOptions: updatedOptions.tracingOptions,
|
2012
|
+
}));
|
2013
|
+
});
|
2014
|
+
}
|
2015
|
+
/**
|
2016
|
+
* Returns the list of valid page ranges for a page blob or snapshot of a page blob.
|
2017
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges
|
2018
|
+
*
|
2019
|
+
* @param offset - Starting byte position of the page ranges.
|
2020
|
+
* @param count - Number of bytes to get.
|
2021
|
+
* @param options - Options to the Page Blob Get Ranges operation.
|
2022
|
+
* @returns Response data for the Page Blob Get Ranges operation.
|
2023
|
+
*/
|
2024
|
+
async getPageRanges(offset = 0, count, options = {}) {
|
2025
|
+
options.conditions = options.conditions || {};
|
2026
|
+
return tracingClient.withSpan("PageBlobClient-getPageRanges", options, async (updatedOptions) => {
|
2027
|
+
var _a;
|
2028
|
+
const response = assertResponse(await this.pageBlobContext.getPageRanges({
|
2029
|
+
abortSignal: options.abortSignal,
|
2030
|
+
leaseAccessConditions: options.conditions,
|
2031
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
2032
|
+
range: rangeToString({ offset, count }),
|
2033
|
+
tracingOptions: updatedOptions.tracingOptions,
|
2034
|
+
}));
|
2035
|
+
return rangeResponseFromModel(response);
|
2036
|
+
});
|
2037
|
+
}
|
2038
|
+
/**
|
2039
|
+
* getPageRangesSegment returns a single segment of page ranges starting from the
|
2040
|
+
* specified Marker. Use an empty Marker to start enumeration from the beginning.
|
2041
|
+
* After getting a segment, process it, and then call getPageRangesSegment again
|
2042
|
+
* (passing the the previously-returned Marker) to get the next segment.
|
2043
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges
|
2044
|
+
*
|
2045
|
+
* @param offset - Starting byte position of the page ranges.
|
2046
|
+
* @param count - Number of bytes to get.
|
2047
|
+
* @param marker - A string value that identifies the portion of the list to be returned with the next list operation.
|
2048
|
+
* @param options - Options to PageBlob Get Page Ranges Segment operation.
|
2049
|
+
*/
|
2050
|
+
async listPageRangesSegment(offset = 0, count, marker, options = {}) {
|
2051
|
+
return tracingClient.withSpan("PageBlobClient-getPageRangesSegment", options, async (updatedOptions) => {
|
2052
|
+
var _a;
|
2053
|
+
return assertResponse(await this.pageBlobContext.getPageRanges({
|
2054
|
+
abortSignal: options.abortSignal,
|
2055
|
+
leaseAccessConditions: options.conditions,
|
2056
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
2057
|
+
range: rangeToString({ offset, count }),
|
2058
|
+
marker: marker,
|
2059
|
+
maxPageSize: options.maxPageSize,
|
2060
|
+
tracingOptions: updatedOptions.tracingOptions,
|
2061
|
+
}));
|
2062
|
+
});
|
2063
|
+
}
|
2064
|
+
/**
|
2065
|
+
* Returns an AsyncIterableIterator for {@link PageBlobGetPageRangesResponseModel}
|
2066
|
+
*
|
2067
|
+
* @param offset - Starting byte position of the page ranges.
|
2068
|
+
* @param count - Number of bytes to get.
|
2069
|
+
* @param marker - A string value that identifies the portion of
|
2070
|
+
* the get of page ranges to be returned with the next getting operation. The
|
2071
|
+
* operation returns the ContinuationToken value within the response body if the
|
2072
|
+
* getting operation did not return all page ranges remaining within the current page.
|
2073
|
+
* The ContinuationToken value can be used as the value for
|
2074
|
+
* the marker parameter in a subsequent call to request the next page of get
|
2075
|
+
* items. The marker value is opaque to the client.
|
2076
|
+
* @param options - Options to List Page Ranges operation.
|
2077
|
+
*/
|
2078
|
+
listPageRangeItemSegments(offset = 0, count, marker, options = {}) {
|
2079
|
+
return __asyncGenerator(this, arguments, function* listPageRangeItemSegments_1() {
|
2080
|
+
let getPageRangeItemSegmentsResponse;
|
2081
|
+
if (!!marker || marker === undefined) {
|
2082
|
+
do {
|
2083
|
+
getPageRangeItemSegmentsResponse = yield __await(this.listPageRangesSegment(offset, count, marker, options));
|
2084
|
+
marker = getPageRangeItemSegmentsResponse.continuationToken;
|
2085
|
+
yield yield __await(yield __await(getPageRangeItemSegmentsResponse));
|
2086
|
+
} while (marker);
|
2087
|
+
}
|
2088
|
+
});
|
2089
|
+
}
|
2090
|
+
/**
|
2091
|
+
* Returns an AsyncIterableIterator of {@link PageRangeInfo} objects
|
2092
|
+
*
|
2093
|
+
* @param offset - Starting byte position of the page ranges.
|
2094
|
+
* @param count - Number of bytes to get.
|
2095
|
+
* @param options - Options to List Page Ranges operation.
|
2096
|
+
*/
|
2097
|
+
listPageRangeItems(offset = 0, count, options = {}) {
|
2098
|
+
return __asyncGenerator(this, arguments, function* listPageRangeItems_1() {
|
2099
|
+
var _a, e_1, _b, _c;
|
2100
|
+
let marker;
|
2101
|
+
try {
|
2102
|
+
for (var _d = true, _e = __asyncValues(this.listPageRangeItemSegments(offset, count, marker, options)), _f; _f = yield __await(_e.next()), _a = _f.done, !_a;) {
|
2103
|
+
_c = _f.value;
|
2104
|
+
_d = false;
|
2105
|
+
try {
|
2106
|
+
const getPageRangesSegment = _c;
|
2107
|
+
yield __await(yield* __asyncDelegator(__asyncValues(ExtractPageRangeInfoItems(getPageRangesSegment))));
|
2108
|
+
}
|
2109
|
+
finally {
|
2110
|
+
_d = true;
|
2111
|
+
}
|
2112
|
+
}
|
2113
|
+
}
|
2114
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
2115
|
+
finally {
|
2116
|
+
try {
|
2117
|
+
if (!_d && !_a && (_b = _e.return)) yield __await(_b.call(_e));
|
2118
|
+
}
|
2119
|
+
finally { if (e_1) throw e_1.error; }
|
2120
|
+
}
|
2121
|
+
});
|
2122
|
+
}
|
2123
|
+
/**
|
2124
|
+
* Returns an async iterable iterator to list of page ranges for a page blob.
|
2125
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges
|
2126
|
+
*
|
2127
|
+
* .byPage() returns an async iterable iterator to list of page ranges for a page blob.
|
2128
|
+
*
|
2129
|
+
* Example using `for await` syntax:
|
2130
|
+
*
|
2131
|
+
* ```js
|
2132
|
+
* // Get the pageBlobClient before you run these snippets,
|
2133
|
+
* // Can be obtained from `blobServiceClient.getContainerClient("<your-container-name>").getPageBlobClient("<your-blob-name>");`
|
2134
|
+
* let i = 1;
|
2135
|
+
* for await (const pageRange of pageBlobClient.listPageRanges()) {
|
2136
|
+
* console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`);
|
2137
|
+
* }
|
2138
|
+
* ```
|
2139
|
+
*
|
2140
|
+
* Example using `iter.next()`:
|
2141
|
+
*
|
2142
|
+
* ```js
|
2143
|
+
* let i = 1;
|
2144
|
+
* let iter = pageBlobClient.listPageRanges();
|
2145
|
+
* let pageRangeItem = await iter.next();
|
2146
|
+
* while (!pageRangeItem.done) {
|
2147
|
+
* console.log(`Page range ${i++}: ${pageRangeItem.value.start} - ${pageRangeItem.value.end}, IsClear: ${pageRangeItem.value.isClear}`);
|
2148
|
+
* pageRangeItem = await iter.next();
|
2149
|
+
* }
|
2150
|
+
* ```
|
2151
|
+
*
|
2152
|
+
* Example using `byPage()`:
|
2153
|
+
*
|
2154
|
+
* ```js
|
2155
|
+
* // passing optional maxPageSize in the page settings
|
2156
|
+
* let i = 1;
|
2157
|
+
* for await (const response of pageBlobClient.listPageRanges().byPage({ maxPageSize: 20 })) {
|
2158
|
+
* for (const pageRange of response) {
|
2159
|
+
* console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`);
|
2160
|
+
* }
|
2161
|
+
* }
|
2162
|
+
* ```
|
2163
|
+
*
|
2164
|
+
* Example using paging with a marker:
|
2165
|
+
*
|
2166
|
+
* ```js
|
2167
|
+
* let i = 1;
|
2168
|
+
* let iterator = pageBlobClient.listPageRanges().byPage({ maxPageSize: 2 });
|
2169
|
+
* let response = (await iterator.next()).value;
|
2170
|
+
*
|
2171
|
+
* // Prints 2 page ranges
|
2172
|
+
* for (const pageRange of response) {
|
2173
|
+
* console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`);
|
2174
|
+
* }
|
2175
|
+
*
|
2176
|
+
* // Gets next marker
|
2177
|
+
* let marker = response.continuationToken;
|
2178
|
+
*
|
2179
|
+
* // Passing next marker as continuationToken
|
2180
|
+
*
|
2181
|
+
* iterator = pageBlobClient.listPageRanges().byPage({ continuationToken: marker, maxPageSize: 10 });
|
2182
|
+
* response = (await iterator.next()).value;
|
2183
|
+
*
|
2184
|
+
* // Prints 10 page ranges
|
2185
|
+
* for (const blob of response) {
|
2186
|
+
* console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`);
|
2187
|
+
* }
|
2188
|
+
* ```
|
2189
|
+
* @param offset - Starting byte position of the page ranges.
|
2190
|
+
* @param count - Number of bytes to get.
|
2191
|
+
* @param options - Options to the Page Blob Get Ranges operation.
|
2192
|
+
* @returns An asyncIterableIterator that supports paging.
|
2193
|
+
*/
|
2194
|
+
listPageRanges(offset = 0, count, options = {}) {
|
2195
|
+
options.conditions = options.conditions || {};
|
2196
|
+
// AsyncIterableIterator to iterate over blobs
|
2197
|
+
const iter = this.listPageRangeItems(offset, count, options);
|
2198
|
+
return {
|
2199
|
+
/**
|
2200
|
+
* The next method, part of the iteration protocol
|
2201
|
+
*/
|
2202
|
+
next() {
|
2203
|
+
return iter.next();
|
2204
|
+
},
|
2205
|
+
/**
|
2206
|
+
* The connection to the async iterator, part of the iteration protocol
|
2207
|
+
*/
|
2208
|
+
[Symbol.asyncIterator]() {
|
2209
|
+
return this;
|
2210
|
+
},
|
2211
|
+
/**
|
2212
|
+
* Return an AsyncIterableIterator that works a page at a time
|
2213
|
+
*/
|
2214
|
+
byPage: (settings = {}) => {
|
2215
|
+
return this.listPageRangeItemSegments(offset, count, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, options));
|
2216
|
+
},
|
2217
|
+
};
|
2218
|
+
}
|
2219
|
+
/**
|
2220
|
+
* Gets the collection of page ranges that differ between a specified snapshot and this page blob.
|
2221
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges
|
2222
|
+
*
|
2223
|
+
* @param offset - Starting byte position of the page blob
|
2224
|
+
* @param count - Number of bytes to get ranges diff.
|
2225
|
+
* @param prevSnapshot - Timestamp of snapshot to retrieve the difference.
|
2226
|
+
* @param options - Options to the Page Blob Get Page Ranges Diff operation.
|
2227
|
+
* @returns Response data for the Page Blob Get Page Range Diff operation.
|
2228
|
+
*/
|
2229
|
+
async getPageRangesDiff(offset, count, prevSnapshot, options = {}) {
|
2230
|
+
options.conditions = options.conditions || {};
|
2231
|
+
return tracingClient.withSpan("PageBlobClient-getPageRangesDiff", options, async (updatedOptions) => {
|
2232
|
+
var _a;
|
2233
|
+
const result = assertResponse(await this.pageBlobContext.getPageRangesDiff({
|
2234
|
+
abortSignal: options.abortSignal,
|
2235
|
+
leaseAccessConditions: options.conditions,
|
2236
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
2237
|
+
prevsnapshot: prevSnapshot,
|
2238
|
+
range: rangeToString({ offset, count }),
|
2239
|
+
tracingOptions: updatedOptions.tracingOptions,
|
2240
|
+
}));
|
2241
|
+
return rangeResponseFromModel(result);
|
2242
|
+
});
|
2243
|
+
}
|
2244
|
+
/**
|
2245
|
+
* getPageRangesDiffSegment returns a single segment of page ranges starting from the
|
2246
|
+
* specified Marker for difference between previous snapshot and the target page blob.
|
2247
|
+
* Use an empty Marker to start enumeration from the beginning.
|
2248
|
+
* After getting a segment, process it, and then call getPageRangesDiffSegment again
|
2249
|
+
* (passing the the previously-returned Marker) to get the next segment.
|
2250
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges
|
2251
|
+
*
|
2252
|
+
* @param offset - Starting byte position of the page ranges.
|
2253
|
+
* @param count - Number of bytes to get.
|
2254
|
+
* @param prevSnapshotOrUrl - Timestamp of snapshot to retrieve the difference or URL of snapshot to retrieve the difference.
|
2255
|
+
* @param marker - A string value that identifies the portion of the get to be returned with the next get operation.
|
2256
|
+
* @param options - Options to the Page Blob Get Page Ranges Diff operation.
|
2257
|
+
*/
|
2258
|
+
async listPageRangesDiffSegment(offset, count, prevSnapshotOrUrl, marker, options = {}) {
|
2259
|
+
return tracingClient.withSpan("PageBlobClient-getPageRangesDiffSegment", options, async (updatedOptions) => {
|
2260
|
+
var _a;
|
2261
|
+
return assertResponse(await this.pageBlobContext.getPageRangesDiff({
|
2262
|
+
abortSignal: options === null || options === void 0 ? void 0 : options.abortSignal,
|
2263
|
+
leaseAccessConditions: options === null || options === void 0 ? void 0 : options.conditions,
|
2264
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options === null || options === void 0 ? void 0 : options.conditions), { ifTags: (_a = options === null || options === void 0 ? void 0 : options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
2265
|
+
prevsnapshot: prevSnapshotOrUrl,
|
2266
|
+
range: rangeToString({
|
2267
|
+
offset: offset,
|
2268
|
+
count: count,
|
2269
|
+
}),
|
2270
|
+
marker: marker,
|
2271
|
+
maxPageSize: options === null || options === void 0 ? void 0 : options.maxPageSize,
|
2272
|
+
tracingOptions: updatedOptions.tracingOptions,
|
2273
|
+
}));
|
2274
|
+
});
|
2275
|
+
}
|
2276
|
+
/**
|
2277
|
+
* Returns an AsyncIterableIterator for {@link PageBlobGetPageRangesDiffResponseModel}
|
2278
|
+
*
|
2279
|
+
*
|
2280
|
+
* @param offset - Starting byte position of the page ranges.
|
2281
|
+
* @param count - Number of bytes to get.
|
2282
|
+
* @param prevSnapshotOrUrl - Timestamp of snapshot to retrieve the difference or URL of snapshot to retrieve the difference.
|
2283
|
+
* @param marker - A string value that identifies the portion of
|
2284
|
+
* the get of page ranges to be returned with the next getting operation. The
|
2285
|
+
* operation returns the ContinuationToken value within the response body if the
|
2286
|
+
* getting operation did not return all page ranges remaining within the current page.
|
2287
|
+
* The ContinuationToken value can be used as the value for
|
2288
|
+
* the marker parameter in a subsequent call to request the next page of get
|
2289
|
+
* items. The marker value is opaque to the client.
|
2290
|
+
* @param options - Options to the Page Blob Get Page Ranges Diff operation.
|
2291
|
+
*/
|
2292
|
+
listPageRangeDiffItemSegments(offset, count, prevSnapshotOrUrl, marker, options) {
|
2293
|
+
return __asyncGenerator(this, arguments, function* listPageRangeDiffItemSegments_1() {
|
2294
|
+
let getPageRangeItemSegmentsResponse;
|
2295
|
+
if (!!marker || marker === undefined) {
|
2296
|
+
do {
|
2297
|
+
getPageRangeItemSegmentsResponse = yield __await(this.listPageRangesDiffSegment(offset, count, prevSnapshotOrUrl, marker, options));
|
2298
|
+
marker = getPageRangeItemSegmentsResponse.continuationToken;
|
2299
|
+
yield yield __await(yield __await(getPageRangeItemSegmentsResponse));
|
2300
|
+
} while (marker);
|
2301
|
+
}
|
2302
|
+
});
|
2303
|
+
}
|
2304
|
+
/**
|
2305
|
+
* Returns an AsyncIterableIterator of {@link PageRangeInfo} objects
|
2306
|
+
*
|
2307
|
+
* @param offset - Starting byte position of the page ranges.
|
2308
|
+
* @param count - Number of bytes to get.
|
2309
|
+
* @param prevSnapshotOrUrl - Timestamp of snapshot to retrieve the difference or URL of snapshot to retrieve the difference.
|
2310
|
+
* @param options - Options to the Page Blob Get Page Ranges Diff operation.
|
2311
|
+
*/
|
2312
|
+
listPageRangeDiffItems(offset, count, prevSnapshotOrUrl, options) {
|
2313
|
+
return __asyncGenerator(this, arguments, function* listPageRangeDiffItems_1() {
|
2314
|
+
var _a, e_2, _b, _c;
|
2315
|
+
let marker;
|
2316
|
+
try {
|
2317
|
+
for (var _d = true, _e = __asyncValues(this.listPageRangeDiffItemSegments(offset, count, prevSnapshotOrUrl, marker, options)), _f; _f = yield __await(_e.next()), _a = _f.done, !_a;) {
|
2318
|
+
_c = _f.value;
|
2319
|
+
_d = false;
|
2320
|
+
try {
|
2321
|
+
const getPageRangesSegment = _c;
|
2322
|
+
yield __await(yield* __asyncDelegator(__asyncValues(ExtractPageRangeInfoItems(getPageRangesSegment))));
|
2323
|
+
}
|
2324
|
+
finally {
|
2325
|
+
_d = true;
|
2326
|
+
}
|
2327
|
+
}
|
2328
|
+
}
|
2329
|
+
catch (e_2_1) { e_2 = { error: e_2_1 }; }
|
2330
|
+
finally {
|
2331
|
+
try {
|
2332
|
+
if (!_d && !_a && (_b = _e.return)) yield __await(_b.call(_e));
|
2333
|
+
}
|
2334
|
+
finally { if (e_2) throw e_2.error; }
|
2335
|
+
}
|
2336
|
+
});
|
2337
|
+
}
|
2338
|
+
/**
|
2339
|
+
* Returns an async iterable iterator to list of page ranges that differ between a specified snapshot and this page blob.
|
2340
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges
|
2341
|
+
*
|
2342
|
+
* .byPage() returns an async iterable iterator to list of page ranges that differ between a specified snapshot and this page blob.
|
2343
|
+
*
|
2344
|
+
* Example using `for await` syntax:
|
2345
|
+
*
|
2346
|
+
* ```js
|
2347
|
+
* // Get the pageBlobClient before you run these snippets,
|
2348
|
+
* // Can be obtained from `blobServiceClient.getContainerClient("<your-container-name>").getPageBlobClient("<your-blob-name>");`
|
2349
|
+
* let i = 1;
|
2350
|
+
* for await (const pageRange of pageBlobClient.listPageRangesDiff()) {
|
2351
|
+
* console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`);
|
2352
|
+
* }
|
2353
|
+
* ```
|
2354
|
+
*
|
2355
|
+
* Example using `iter.next()`:
|
2356
|
+
*
|
2357
|
+
* ```js
|
2358
|
+
* let i = 1;
|
2359
|
+
* let iter = pageBlobClient.listPageRangesDiff();
|
2360
|
+
* let pageRangeItem = await iter.next();
|
2361
|
+
* while (!pageRangeItem.done) {
|
2362
|
+
* console.log(`Page range ${i++}: ${pageRangeItem.value.start} - ${pageRangeItem.value.end}, IsClear: ${pageRangeItem.value.isClear}`);
|
2363
|
+
* pageRangeItem = await iter.next();
|
2364
|
+
* }
|
2365
|
+
* ```
|
2366
|
+
*
|
2367
|
+
* Example using `byPage()`:
|
2368
|
+
*
|
2369
|
+
* ```js
|
2370
|
+
* // passing optional maxPageSize in the page settings
|
2371
|
+
* let i = 1;
|
2372
|
+
* for await (const response of pageBlobClient.listPageRangesDiff().byPage({ maxPageSize: 20 })) {
|
2373
|
+
* for (const pageRange of response) {
|
2374
|
+
* console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`);
|
2375
|
+
* }
|
2376
|
+
* }
|
2377
|
+
* ```
|
2378
|
+
*
|
2379
|
+
* Example using paging with a marker:
|
2380
|
+
*
|
2381
|
+
* ```js
|
2382
|
+
* let i = 1;
|
2383
|
+
* let iterator = pageBlobClient.listPageRangesDiff().byPage({ maxPageSize: 2 });
|
2384
|
+
* let response = (await iterator.next()).value;
|
2385
|
+
*
|
2386
|
+
* // Prints 2 page ranges
|
2387
|
+
* for (const pageRange of response) {
|
2388
|
+
* console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`);
|
2389
|
+
* }
|
2390
|
+
*
|
2391
|
+
* // Gets next marker
|
2392
|
+
* let marker = response.continuationToken;
|
2393
|
+
*
|
2394
|
+
* // Passing next marker as continuationToken
|
2395
|
+
*
|
2396
|
+
* iterator = pageBlobClient.listPageRangesDiff().byPage({ continuationToken: marker, maxPageSize: 10 });
|
2397
|
+
* response = (await iterator.next()).value;
|
2398
|
+
*
|
2399
|
+
* // Prints 10 page ranges
|
2400
|
+
* for (const blob of response) {
|
2401
|
+
* console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`);
|
2402
|
+
* }
|
2403
|
+
* ```
|
2404
|
+
* @param offset - Starting byte position of the page ranges.
|
2405
|
+
* @param count - Number of bytes to get.
|
2406
|
+
* @param prevSnapshot - Timestamp of snapshot to retrieve the difference.
|
2407
|
+
* @param options - Options to the Page Blob Get Ranges operation.
|
2408
|
+
* @returns An asyncIterableIterator that supports paging.
|
2409
|
+
*/
|
2410
|
+
listPageRangesDiff(offset, count, prevSnapshot, options = {}) {
|
2411
|
+
options.conditions = options.conditions || {};
|
2412
|
+
// AsyncIterableIterator to iterate over blobs
|
2413
|
+
const iter = this.listPageRangeDiffItems(offset, count, prevSnapshot, Object.assign({}, options));
|
2414
|
+
return {
|
2415
|
+
/**
|
2416
|
+
* The next method, part of the iteration protocol
|
2417
|
+
*/
|
2418
|
+
next() {
|
2419
|
+
return iter.next();
|
2420
|
+
},
|
2421
|
+
/**
|
2422
|
+
* The connection to the async iterator, part of the iteration protocol
|
2423
|
+
*/
|
2424
|
+
[Symbol.asyncIterator]() {
|
2425
|
+
return this;
|
2426
|
+
},
|
2427
|
+
/**
|
2428
|
+
* Return an AsyncIterableIterator that works a page at a time
|
2429
|
+
*/
|
2430
|
+
byPage: (settings = {}) => {
|
2431
|
+
return this.listPageRangeDiffItemSegments(offset, count, prevSnapshot, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, options));
|
2432
|
+
},
|
2433
|
+
};
|
2434
|
+
}
|
2435
|
+
/**
|
2436
|
+
* Gets the collection of page ranges that differ between a specified snapshot and this page blob for managed disks.
|
2437
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges
|
2438
|
+
*
|
2439
|
+
* @param offset - Starting byte position of the page blob
|
2440
|
+
* @param count - Number of bytes to get ranges diff.
|
2441
|
+
* @param prevSnapshotUrl - URL of snapshot to retrieve the difference.
|
2442
|
+
* @param options - Options to the Page Blob Get Page Ranges Diff operation.
|
2443
|
+
* @returns Response data for the Page Blob Get Page Range Diff operation.
|
2444
|
+
*/
|
2445
|
+
async getPageRangesDiffForManagedDisks(offset, count, prevSnapshotUrl, options = {}) {
|
2446
|
+
options.conditions = options.conditions || {};
|
2447
|
+
return tracingClient.withSpan("PageBlobClient-GetPageRangesDiffForManagedDisks", options, async (updatedOptions) => {
|
2448
|
+
var _a;
|
2449
|
+
const response = assertResponse(await this.pageBlobContext.getPageRangesDiff({
|
2450
|
+
abortSignal: options.abortSignal,
|
2451
|
+
leaseAccessConditions: options.conditions,
|
2452
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
2453
|
+
prevSnapshotUrl,
|
2454
|
+
range: rangeToString({ offset, count }),
|
2455
|
+
tracingOptions: updatedOptions.tracingOptions,
|
2456
|
+
}));
|
2457
|
+
return rangeResponseFromModel(response);
|
2458
|
+
});
|
2459
|
+
}
|
2460
|
+
/**
|
2461
|
+
* Resizes the page blob to the specified size (which must be a multiple of 512).
|
2462
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties
|
2463
|
+
*
|
2464
|
+
* @param size - Target size
|
2465
|
+
* @param options - Options to the Page Blob Resize operation.
|
2466
|
+
* @returns Response data for the Page Blob Resize operation.
|
2467
|
+
*/
|
2468
|
+
async resize(size, options = {}) {
|
2469
|
+
options.conditions = options.conditions || {};
|
2470
|
+
return tracingClient.withSpan("PageBlobClient-resize", options, async (updatedOptions) => {
|
2471
|
+
var _a;
|
2472
|
+
return assertResponse(await this.pageBlobContext.resize(size, {
|
2473
|
+
abortSignal: options.abortSignal,
|
2474
|
+
leaseAccessConditions: options.conditions,
|
2475
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
2476
|
+
encryptionScope: options.encryptionScope,
|
2477
|
+
tracingOptions: updatedOptions.tracingOptions,
|
2478
|
+
}));
|
2479
|
+
});
|
2480
|
+
}
|
2481
|
+
/**
|
2482
|
+
* Sets a page blob's sequence number.
|
2483
|
+
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-properties
|
2484
|
+
*
|
2485
|
+
* @param sequenceNumberAction - Indicates how the service should modify the blob's sequence number.
|
2486
|
+
* @param sequenceNumber - Required if sequenceNumberAction is max or update
|
2487
|
+
* @param options - Options to the Page Blob Update Sequence Number operation.
|
2488
|
+
* @returns Response data for the Page Blob Update Sequence Number operation.
|
2489
|
+
*/
|
2490
|
+
async updateSequenceNumber(sequenceNumberAction, sequenceNumber, options = {}) {
|
2491
|
+
options.conditions = options.conditions || {};
|
2492
|
+
return tracingClient.withSpan("PageBlobClient-updateSequenceNumber", options, async (updatedOptions) => {
|
2493
|
+
var _a;
|
2494
|
+
return assertResponse(await this.pageBlobContext.updateSequenceNumber(sequenceNumberAction, {
|
2495
|
+
abortSignal: options.abortSignal,
|
2496
|
+
blobSequenceNumber: sequenceNumber,
|
2497
|
+
leaseAccessConditions: options.conditions,
|
2498
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
2499
|
+
tracingOptions: updatedOptions.tracingOptions,
|
2500
|
+
}));
|
2501
|
+
});
|
2502
|
+
}
|
2503
|
+
/**
|
2504
|
+
* Begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
|
2505
|
+
* The snapshot is copied such that only the differential changes between the previously
|
2506
|
+
* copied snapshot are transferred to the destination.
|
2507
|
+
* The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
|
2508
|
+
* @see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob
|
2509
|
+
* @see https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots
|
2510
|
+
*
|
2511
|
+
* @param copySource - Specifies the name of the source page blob snapshot. For example,
|
2512
|
+
* https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
|
2513
|
+
* @param options - Options to the Page Blob Copy Incremental operation.
|
2514
|
+
* @returns Response data for the Page Blob Copy Incremental operation.
|
2515
|
+
*/
|
2516
|
+
async startCopyIncremental(copySource, options = {}) {
|
2517
|
+
return tracingClient.withSpan("PageBlobClient-startCopyIncremental", options, async (updatedOptions) => {
|
2518
|
+
var _a;
|
2519
|
+
return assertResponse(await this.pageBlobContext.copyIncremental(copySource, {
|
2520
|
+
abortSignal: options.abortSignal,
|
2521
|
+
modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }),
|
2522
|
+
tracingOptions: updatedOptions.tracingOptions,
|
2523
|
+
}));
|
2524
|
+
});
|
2525
|
+
}
|
2526
|
+
}
|
2527
|
+
//# sourceMappingURL=Clients.js.map
|