opendal 0.1.6.pre.rc.1-arm64-darwin-23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.standard.yml +20 -0
- data/.tool-versions +1 -0
- data/.yardopts +1 -0
- data/DEPENDENCIES.md +9 -0
- data/DEPENDENCIES.rust.tsv +277 -0
- data/Gemfile +35 -0
- data/README.md +159 -0
- data/Rakefile +149 -0
- data/core/CHANGELOG.md +4929 -0
- data/core/CONTRIBUTING.md +61 -0
- data/core/DEPENDENCIES.md +3 -0
- data/core/DEPENDENCIES.rust.tsv +185 -0
- data/core/LICENSE +201 -0
- data/core/README.md +228 -0
- data/core/benches/README.md +18 -0
- data/core/benches/ops/README.md +26 -0
- data/core/benches/types/README.md +9 -0
- data/core/benches/vs_fs/README.md +35 -0
- data/core/benches/vs_s3/README.md +55 -0
- data/core/edge/README.md +3 -0
- data/core/edge/file_write_on_full_disk/README.md +14 -0
- data/core/edge/s3_aws_assume_role_with_web_identity/README.md +18 -0
- data/core/edge/s3_read_on_wasm/.gitignore +3 -0
- data/core/edge/s3_read_on_wasm/README.md +42 -0
- data/core/edge/s3_read_on_wasm/webdriver.json +15 -0
- data/core/examples/README.md +23 -0
- data/core/examples/basic/README.md +15 -0
- data/core/examples/concurrent-upload/README.md +15 -0
- data/core/examples/multipart-upload/README.md +15 -0
- data/core/fuzz/.gitignore +5 -0
- data/core/fuzz/README.md +68 -0
- data/core/src/docs/comparisons/vs_object_store.md +183 -0
- data/core/src/docs/performance/concurrent_write.md +101 -0
- data/core/src/docs/performance/http_optimization.md +124 -0
- data/core/src/docs/rfcs/0000_example.md +74 -0
- data/core/src/docs/rfcs/0000_foyer_integration.md +111 -0
- data/core/src/docs/rfcs/0041_object_native_api.md +185 -0
- data/core/src/docs/rfcs/0044_error_handle.md +198 -0
- data/core/src/docs/rfcs/0057_auto_region.md +160 -0
- data/core/src/docs/rfcs/0069_object_stream.md +145 -0
- data/core/src/docs/rfcs/0090_limited_reader.md +155 -0
- data/core/src/docs/rfcs/0112_path_normalization.md +79 -0
- data/core/src/docs/rfcs/0191_async_streaming_io.md +328 -0
- data/core/src/docs/rfcs/0203_remove_credential.md +96 -0
- data/core/src/docs/rfcs/0221_create_dir.md +89 -0
- data/core/src/docs/rfcs/0247_retryable_error.md +87 -0
- data/core/src/docs/rfcs/0293_object_id.md +67 -0
- data/core/src/docs/rfcs/0337_dir_entry.md +191 -0
- data/core/src/docs/rfcs/0409_accessor_capabilities.md +67 -0
- data/core/src/docs/rfcs/0413_presign.md +154 -0
- data/core/src/docs/rfcs/0423_command_line_interface.md +268 -0
- data/core/src/docs/rfcs/0429_init_from_iter.md +107 -0
- data/core/src/docs/rfcs/0438_multipart.md +163 -0
- data/core/src/docs/rfcs/0443_gateway.md +73 -0
- data/core/src/docs/rfcs/0501_new_builder.md +111 -0
- data/core/src/docs/rfcs/0554_write_refactor.md +96 -0
- data/core/src/docs/rfcs/0561_list_metadata_reuse.md +210 -0
- data/core/src/docs/rfcs/0599_blocking_api.md +157 -0
- data/core/src/docs/rfcs/0623_redis_service.md +300 -0
- data/core/src/docs/rfcs/0627_split_capabilities.md +89 -0
- data/core/src/docs/rfcs/0661_path_in_accessor.md +126 -0
- data/core/src/docs/rfcs/0793_generic_kv_services.md +209 -0
- data/core/src/docs/rfcs/0926_object_reader.md +93 -0
- data/core/src/docs/rfcs/0977_refactor_error.md +151 -0
- data/core/src/docs/rfcs/1085_object_handler.md +73 -0
- data/core/src/docs/rfcs/1391_object_metadataer.md +110 -0
- data/core/src/docs/rfcs/1398_query_based_metadata.md +125 -0
- data/core/src/docs/rfcs/1420_object_writer.md +147 -0
- data/core/src/docs/rfcs/1477_remove_object_concept.md +159 -0
- data/core/src/docs/rfcs/1735_operation_extension.md +117 -0
- data/core/src/docs/rfcs/2083_writer_sink_api.md +106 -0
- data/core/src/docs/rfcs/2133_append_api.md +88 -0
- data/core/src/docs/rfcs/2299_chain_based_operator_api.md +99 -0
- data/core/src/docs/rfcs/2602_object_versioning.md +138 -0
- data/core/src/docs/rfcs/2758_merge_append_into_write.md +79 -0
- data/core/src/docs/rfcs/2774_lister_api.md +66 -0
- data/core/src/docs/rfcs/2779_list_with_metakey.md +143 -0
- data/core/src/docs/rfcs/2852_native_capability.md +58 -0
- data/core/src/docs/rfcs/2884_merge_range_read_into_read.md +80 -0
- data/core/src/docs/rfcs/3017_remove_write_copy_from.md +94 -0
- data/core/src/docs/rfcs/3197_config.md +237 -0
- data/core/src/docs/rfcs/3232_align_list_api.md +69 -0
- data/core/src/docs/rfcs/3243_list_prefix.md +128 -0
- data/core/src/docs/rfcs/3356_lazy_reader.md +111 -0
- data/core/src/docs/rfcs/3526_list_recursive.md +59 -0
- data/core/src/docs/rfcs/3574_concurrent_stat_in_list.md +80 -0
- data/core/src/docs/rfcs/3734_buffered_reader.md +64 -0
- data/core/src/docs/rfcs/3898_concurrent_writer.md +66 -0
- data/core/src/docs/rfcs/3911_deleter_api.md +165 -0
- data/core/src/docs/rfcs/4382_range_based_read.md +213 -0
- data/core/src/docs/rfcs/4638_executor.md +215 -0
- data/core/src/docs/rfcs/5314_remove_metakey.md +120 -0
- data/core/src/docs/rfcs/5444_operator_from_uri.md +162 -0
- data/core/src/docs/rfcs/5479_context.md +140 -0
- data/core/src/docs/rfcs/5485_conditional_reader.md +112 -0
- data/core/src/docs/rfcs/5495_list_with_deleted.md +81 -0
- data/core/src/docs/rfcs/5556_write_returns_metadata.md +121 -0
- data/core/src/docs/rfcs/5871_read_returns_metadata.md +112 -0
- data/core/src/docs/rfcs/6189_remove_native_blocking.md +106 -0
- data/core/src/docs/rfcs/6209_glob_support.md +132 -0
- data/core/src/docs/rfcs/6213_options_api.md +142 -0
- data/core/src/docs/rfcs/README.md +62 -0
- data/core/src/docs/upgrade.md +1556 -0
- data/core/src/services/aliyun_drive/docs.md +61 -0
- data/core/src/services/alluxio/docs.md +45 -0
- data/core/src/services/azblob/docs.md +77 -0
- data/core/src/services/azdls/docs.md +73 -0
- data/core/src/services/azfile/docs.md +65 -0
- data/core/src/services/b2/docs.md +54 -0
- data/core/src/services/cacache/docs.md +38 -0
- data/core/src/services/cloudflare_kv/docs.md +21 -0
- data/core/src/services/cos/docs.md +55 -0
- data/core/src/services/d1/docs.md +48 -0
- data/core/src/services/dashmap/docs.md +38 -0
- data/core/src/services/dbfs/docs.md +57 -0
- data/core/src/services/dropbox/docs.md +64 -0
- data/core/src/services/etcd/docs.md +45 -0
- data/core/src/services/foundationdb/docs.md +42 -0
- data/core/src/services/fs/docs.md +49 -0
- data/core/src/services/ftp/docs.md +42 -0
- data/core/src/services/gcs/docs.md +76 -0
- data/core/src/services/gdrive/docs.md +65 -0
- data/core/src/services/ghac/docs.md +84 -0
- data/core/src/services/github/docs.md +52 -0
- data/core/src/services/gridfs/docs.md +46 -0
- data/core/src/services/hdfs/docs.md +140 -0
- data/core/src/services/hdfs_native/docs.md +35 -0
- data/core/src/services/http/docs.md +45 -0
- data/core/src/services/huggingface/docs.md +61 -0
- data/core/src/services/ipfs/docs.md +45 -0
- data/core/src/services/ipmfs/docs.md +14 -0
- data/core/src/services/koofr/docs.md +51 -0
- data/core/src/services/lakefs/docs.md +62 -0
- data/core/src/services/memcached/docs.md +47 -0
- data/core/src/services/memory/docs.md +36 -0
- data/core/src/services/mini_moka/docs.md +19 -0
- data/core/src/services/moka/docs.md +42 -0
- data/core/src/services/mongodb/docs.md +49 -0
- data/core/src/services/monoiofs/docs.md +46 -0
- data/core/src/services/mysql/docs.md +47 -0
- data/core/src/services/obs/docs.md +54 -0
- data/core/src/services/onedrive/docs.md +115 -0
- data/core/src/services/opfs/docs.md +18 -0
- data/core/src/services/oss/docs.md +74 -0
- data/core/src/services/pcloud/docs.md +51 -0
- data/core/src/services/persy/docs.md +43 -0
- data/core/src/services/postgresql/docs.md +47 -0
- data/core/src/services/redb/docs.md +41 -0
- data/core/src/services/redis/docs.md +43 -0
- data/core/src/services/rocksdb/docs.md +54 -0
- data/core/src/services/s3/compatible_services.md +126 -0
- data/core/src/services/s3/docs.md +244 -0
- data/core/src/services/seafile/docs.md +54 -0
- data/core/src/services/sftp/docs.md +49 -0
- data/core/src/services/sled/docs.md +39 -0
- data/core/src/services/sqlite/docs.md +46 -0
- data/core/src/services/surrealdb/docs.md +54 -0
- data/core/src/services/swift/compatible_services.md +53 -0
- data/core/src/services/swift/docs.md +52 -0
- data/core/src/services/tikv/docs.md +43 -0
- data/core/src/services/upyun/docs.md +51 -0
- data/core/src/services/vercel_artifacts/docs.md +40 -0
- data/core/src/services/vercel_blob/docs.md +45 -0
- data/core/src/services/webdav/docs.md +49 -0
- data/core/src/services/webhdfs/docs.md +90 -0
- data/core/src/services/yandex_disk/docs.md +45 -0
- data/core/tests/behavior/README.md +77 -0
- data/core/tests/data/normal_dir/.gitkeep +0 -0
- data/core/tests/data/normal_file.txt +1041 -0
- data/core/tests/data/special_dir !@#$%^&()_+-=;',/.gitkeep +0 -0
- data/core/tests/data/special_file !@#$%^&()_+-=;',.txt +1041 -0
- data/core/users.md +13 -0
- data/extconf.rb +24 -0
- data/lib/opendal.rb +25 -0
- data/lib/opendal_ruby/entry.rb +35 -0
- data/lib/opendal_ruby/io.rb +70 -0
- data/lib/opendal_ruby/metadata.rb +44 -0
- data/lib/opendal_ruby/opendal_ruby.bundle +0 -0
- data/lib/opendal_ruby/operator.rb +29 -0
- data/lib/opendal_ruby/operator_info.rb +26 -0
- data/opendal.gemspec +91 -0
- data/test/blocking_op_test.rb +112 -0
- data/test/capability_test.rb +42 -0
- data/test/io_test.rb +172 -0
- data/test/lister_test.rb +77 -0
- data/test/metadata_test.rb +78 -0
- data/test/middlewares_test.rb +46 -0
- data/test/operator_info_test.rb +35 -0
- data/test/test_helper.rb +36 -0
- metadata +240 -0
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
## Capabilities
|
|
2
|
+
|
|
3
|
+
This service can be used to:
|
|
4
|
+
|
|
5
|
+
- [x] stat
|
|
6
|
+
- [x] read
|
|
7
|
+
- [x] write
|
|
8
|
+
- [x] create_dir
|
|
9
|
+
- [x] delete
|
|
10
|
+
- [x] copy
|
|
11
|
+
- [x] rename
|
|
12
|
+
- [x] list
|
|
13
|
+
- [ ] ~~presign~~
|
|
14
|
+
- [ ] blocking
|
|
15
|
+
|
|
16
|
+
## Configuration
|
|
17
|
+
|
|
18
|
+
- `root`: Set the working directory of `OpenDAL`
|
|
19
|
+
- `endpoints`: Set the network address of etcd servers
|
|
20
|
+
- `username`: Set the username of Etcd
|
|
21
|
+
- `password`: Set the password for authentication
|
|
22
|
+
- `ca_path`: Set the ca path to the etcd connection
|
|
23
|
+
- `cert_path`: Set the cert path to the etcd connection
|
|
24
|
+
- `key_path`: Set the key path to the etcd connection
|
|
25
|
+
|
|
26
|
+
You can refer to [`EtcdBuilder`]'s docs for more information
|
|
27
|
+
|
|
28
|
+
## Example
|
|
29
|
+
|
|
30
|
+
### Via Builder
|
|
31
|
+
|
|
32
|
+
```rust,no_run
|
|
33
|
+
use anyhow::Result;
|
|
34
|
+
use opendal::services::Etcd;
|
|
35
|
+
use opendal::Operator;
|
|
36
|
+
|
|
37
|
+
#[tokio::main]
|
|
38
|
+
async fn main() -> Result<()> {
|
|
39
|
+
let mut builder = Etcd::default();
|
|
40
|
+
|
|
41
|
+
// this will build a Operator accessing etcd which runs on http://127.0.0.1:2379
|
|
42
|
+
let op: Operator = Operator::new(builder)?.finish();
|
|
43
|
+
Ok(())
|
|
44
|
+
}
|
|
45
|
+
```
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
## Capabilities
|
|
2
|
+
|
|
3
|
+
This service can be used to:
|
|
4
|
+
|
|
5
|
+
- [x] stat
|
|
6
|
+
- [x] read
|
|
7
|
+
- [x] write
|
|
8
|
+
- [x] create_dir
|
|
9
|
+
- [x] delete
|
|
10
|
+
- [x] copy
|
|
11
|
+
- [x] rename
|
|
12
|
+
- [ ] ~~list~~
|
|
13
|
+
- [ ] ~~presign~~
|
|
14
|
+
- [ ] blocking
|
|
15
|
+
|
|
16
|
+
**Note**: As for [Known Limitations - FoundationDB](https://apple.github.io/foundationdb/known-limitations), keys cannot exceed 10,000 bytes in size, and values cannot exceed 100,000 bytes in size. Errors will be raised by OpenDAL if these limits are exceeded.
|
|
17
|
+
|
|
18
|
+
## Configuration
|
|
19
|
+
|
|
20
|
+
- `root`: Set the work directory for this backend.
|
|
21
|
+
- `config_path`: Set the configuration path for foundationdb. If not provided, the default configuration path will be used.
|
|
22
|
+
|
|
23
|
+
You can refer to [`FoundationdbBuilder`]'s docs for more information
|
|
24
|
+
|
|
25
|
+
## Example
|
|
26
|
+
|
|
27
|
+
### Via Builder
|
|
28
|
+
|
|
29
|
+
```rust,no_run
|
|
30
|
+
use anyhow::Result;
|
|
31
|
+
use opendal::services::Foundationdb;
|
|
32
|
+
use opendal::Operator;
|
|
33
|
+
|
|
34
|
+
#[tokio::main]
|
|
35
|
+
async fn main() -> Result<()> {
|
|
36
|
+
let mut builder = Foundationdb::default()
|
|
37
|
+
.config_path("/etc/foundationdb/foundationdb.conf");
|
|
38
|
+
|
|
39
|
+
let op: Operator = Operator::new(builder)?.finish();
|
|
40
|
+
Ok(())
|
|
41
|
+
}
|
|
42
|
+
```
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
## Capabilities
|
|
2
|
+
|
|
3
|
+
This service can be used to:
|
|
4
|
+
|
|
5
|
+
- [x] stat
|
|
6
|
+
- [x] read
|
|
7
|
+
- [x] write
|
|
8
|
+
- [x] append
|
|
9
|
+
- [x] create_dir
|
|
10
|
+
- [x] delete
|
|
11
|
+
- [x] copy
|
|
12
|
+
- [x] rename
|
|
13
|
+
- [x] list
|
|
14
|
+
- [ ] ~~presign~~
|
|
15
|
+
- [x] blocking
|
|
16
|
+
|
|
17
|
+
## Configuration
|
|
18
|
+
|
|
19
|
+
- `root`: Set the work dir for backend.
|
|
20
|
+
-
|
|
21
|
+
You can refer to [`FsBuilder`]'s docs for more information
|
|
22
|
+
|
|
23
|
+
## Example
|
|
24
|
+
|
|
25
|
+
### Via Builder
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
```rust,no_run
|
|
29
|
+
use std::sync::Arc;
|
|
30
|
+
|
|
31
|
+
use anyhow::Result;
|
|
32
|
+
use opendal::services::Fs;
|
|
33
|
+
use opendal::Operator;
|
|
34
|
+
|
|
35
|
+
#[tokio::main]
|
|
36
|
+
async fn main() -> Result<()> {
|
|
37
|
+
// Create fs backend builder.
|
|
38
|
+
let mut builder = Fs::default()
|
|
39
|
+
// Set the root for fs, all operations will happen under this root.
|
|
40
|
+
//
|
|
41
|
+
// NOTE: the root must be absolute path.
|
|
42
|
+
.root("/tmp");
|
|
43
|
+
|
|
44
|
+
// `Accessor` provides the low level APIs, we will use `Operator` normally.
|
|
45
|
+
let op: Operator = Operator::new(builder)?.finish();
|
|
46
|
+
|
|
47
|
+
Ok(())
|
|
48
|
+
}
|
|
49
|
+
```
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
## Capabilities
|
|
2
|
+
|
|
3
|
+
This service can be used to:
|
|
4
|
+
|
|
5
|
+
- [x] stat
|
|
6
|
+
- [x] read
|
|
7
|
+
- [x] write
|
|
8
|
+
- [x] create_dir
|
|
9
|
+
- [x] delete
|
|
10
|
+
- [ ] copy
|
|
11
|
+
- [ ] rename
|
|
12
|
+
- [x] list
|
|
13
|
+
- [ ] ~~presign~~
|
|
14
|
+
- [ ] blocking
|
|
15
|
+
|
|
16
|
+
## Configuration
|
|
17
|
+
|
|
18
|
+
- `endpoint`: Set the endpoint for connection
|
|
19
|
+
- `root`: Set the work directory for backend
|
|
20
|
+
- `user`: Set the login user
|
|
21
|
+
- `password`: Set the login password
|
|
22
|
+
|
|
23
|
+
You can refer to [`FtpBuilder`]'s docs for more information
|
|
24
|
+
|
|
25
|
+
## Example
|
|
26
|
+
|
|
27
|
+
### Via Builder
|
|
28
|
+
|
|
29
|
+
```rust,no_run
|
|
30
|
+
use anyhow::Result;
|
|
31
|
+
use opendal::services::Ftp;
|
|
32
|
+
use opendal::Operator;
|
|
33
|
+
|
|
34
|
+
#[tokio::main]
|
|
35
|
+
async fn main() -> Result<()> {
|
|
36
|
+
let mut builder = Ftp::default()
|
|
37
|
+
.endpoint("127.0.0.1");
|
|
38
|
+
|
|
39
|
+
let op: Operator = Operator::new(builder)?.finish();
|
|
40
|
+
Ok(())
|
|
41
|
+
}
|
|
42
|
+
```
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
## Capabilities
|
|
2
|
+
|
|
3
|
+
This service can be used to:
|
|
4
|
+
|
|
5
|
+
- [x] stat
|
|
6
|
+
- [x] read
|
|
7
|
+
- [x] write
|
|
8
|
+
- [x] create_dir
|
|
9
|
+
- [x] delete
|
|
10
|
+
- [x] copy
|
|
11
|
+
- [ ] rename
|
|
12
|
+
- [x] list
|
|
13
|
+
- [x] presign
|
|
14
|
+
- [ ] blocking
|
|
15
|
+
|
|
16
|
+
## Configuration
|
|
17
|
+
|
|
18
|
+
- `root`: Set the work directory for backend
|
|
19
|
+
- `bucket`: Set the container name for backend
|
|
20
|
+
- `endpoint`: Customizable endpoint setting
|
|
21
|
+
- `credential`: Service Account or External Account JSON, in base64
|
|
22
|
+
- `credential_path`: local path to Service Account or External Account JSON file
|
|
23
|
+
- `service_account`: name of Service Account
|
|
24
|
+
- `predefined_acl`: Predefined ACL for GCS
|
|
25
|
+
- `default_storage_class`: Default storage class for GCS
|
|
26
|
+
|
|
27
|
+
Refer to public API docs for more information. For authentication related options, read on.
|
|
28
|
+
|
|
29
|
+
## Options to authenticate to GCS
|
|
30
|
+
|
|
31
|
+
OpenDAL supports the following authentication options:
|
|
32
|
+
|
|
33
|
+
1. Provide a base64-ed JSON key string with `credential`
|
|
34
|
+
2. Provide a JSON key file at explicit path with `credential_path`
|
|
35
|
+
3. Provide a JSON key file at implicit path
|
|
36
|
+
- `GcsBackend` will attempt to load Service Account key from [ADC well-known places](https://cloud.google.com/docs/authentication/application-default-credentials).
|
|
37
|
+
4. Fetch access token from [VM metadata](https://cloud.google.com/docs/authentication/rest#metadata-server)
|
|
38
|
+
- Only works when running inside Google Cloud.
|
|
39
|
+
- If a non-default Service Account name is required, set with `service_account`. Otherwise, nothing need to be set.
|
|
40
|
+
5. A custom `TokenLoader` via `GcsBuilder.customized_token_loader()`
|
|
41
|
+
|
|
42
|
+
Notes:
|
|
43
|
+
|
|
44
|
+
- When a Service Account key is provided, it will be used to create access tokens (VM metadata will not be used).
|
|
45
|
+
- Explicit Service Account key, in json or path, always take precedence over ADC-defined key paths.
|
|
46
|
+
- Due to [limitation in GCS](https://cloud.google.com/storage/docs/authentication/signatures#signing-process), a private key is required to create Pre-signed URL. Currently, OpenDAL only supports Service Account key.
|
|
47
|
+
|
|
48
|
+
## Example
|
|
49
|
+
|
|
50
|
+
### Via Builder
|
|
51
|
+
|
|
52
|
+
```rust,no_run
|
|
53
|
+
use anyhow::Result;
|
|
54
|
+
use opendal::services::Gcs;
|
|
55
|
+
use opendal::Operator;
|
|
56
|
+
|
|
57
|
+
#[tokio::main]
|
|
58
|
+
async fn main() -> Result<()> {
|
|
59
|
+
// create backend builder
|
|
60
|
+
let mut builder = Gcs::default()
|
|
61
|
+
// set the storage bucket for OpenDAL
|
|
62
|
+
.bucket("test")
|
|
63
|
+
// set the working directory root for GCS
|
|
64
|
+
// all operations will happen within it
|
|
65
|
+
.root("/path/to/dir")
|
|
66
|
+
// set the credentials with service account
|
|
67
|
+
.credential("service account JSON in base64")
|
|
68
|
+
// set the predefined ACL for GCS
|
|
69
|
+
.predefined_acl("publicRead")
|
|
70
|
+
// set the default storage class for GCS
|
|
71
|
+
.default_storage_class("STANDARD");
|
|
72
|
+
|
|
73
|
+
let op: Operator = Operator::new(builder)?.finish();
|
|
74
|
+
Ok(())
|
|
75
|
+
}
|
|
76
|
+
```
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
## Capabilities
|
|
2
|
+
|
|
3
|
+
This service can be used to:
|
|
4
|
+
|
|
5
|
+
- [x] stat
|
|
6
|
+
- [x] read
|
|
7
|
+
- [x] write
|
|
8
|
+
- [x] delete
|
|
9
|
+
- [x] create_dir
|
|
10
|
+
- [x] list
|
|
11
|
+
- [x] copy
|
|
12
|
+
- [x] rename
|
|
13
|
+
- [ ] batch
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# Configuration
|
|
17
|
+
|
|
18
|
+
- `root`: Set the work directory for backend
|
|
19
|
+
|
|
20
|
+
### Credentials related
|
|
21
|
+
|
|
22
|
+
#### Just provide Access Token (Temporary)
|
|
23
|
+
|
|
24
|
+
- `access_token`: set the access_token for google drive api
|
|
25
|
+
Please notice its expiration.
|
|
26
|
+
|
|
27
|
+
#### Or provide Client ID and Client Secret and refresh token (Long Term)
|
|
28
|
+
|
|
29
|
+
If you want to let OpenDAL to refresh the access token automatically,
|
|
30
|
+
please provide the following fields:
|
|
31
|
+
|
|
32
|
+
- `refresh_token`: set the refresh_token for google drive api
|
|
33
|
+
- `client_id`: set the client_id for google drive api
|
|
34
|
+
- `client_secret`: set the client_secret for google drive api
|
|
35
|
+
|
|
36
|
+
OpenDAL is a library, it cannot do the first step of OAuth2 for you.
|
|
37
|
+
You need to get authorization code from user by calling GoogleDrive's authorize url
|
|
38
|
+
and exchange it for refresh token.
|
|
39
|
+
|
|
40
|
+
Make sure you have enabled Google Drive API in your Google Cloud Console.
|
|
41
|
+
And your OAuth scope contains `https://www.googleapis.com/auth/drive`.
|
|
42
|
+
|
|
43
|
+
Please refer to [GoogleDrive OAuth2 Flow](https://developers.google.com/identity/protocols/oauth2/)
|
|
44
|
+
for more information.
|
|
45
|
+
|
|
46
|
+
You can refer to [`GdriveBuilder`]'s docs for more information
|
|
47
|
+
|
|
48
|
+
## Example
|
|
49
|
+
|
|
50
|
+
### Via Builder
|
|
51
|
+
|
|
52
|
+
```rust,no_run
|
|
53
|
+
use anyhow::Result;
|
|
54
|
+
use opendal::services::Gdrive;
|
|
55
|
+
use opendal::Operator;
|
|
56
|
+
|
|
57
|
+
#[tokio::main]
|
|
58
|
+
async fn main() -> Result<()> {
|
|
59
|
+
let mut builder = Gdrive::default()
|
|
60
|
+
.root("/test")
|
|
61
|
+
.access_token("<token>");
|
|
62
|
+
|
|
63
|
+
Ok(())
|
|
64
|
+
}
|
|
65
|
+
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
## Capabilities
|
|
2
|
+
|
|
3
|
+
This service can be used to:
|
|
4
|
+
|
|
5
|
+
- [x] stat
|
|
6
|
+
- [x] read
|
|
7
|
+
- [x] write
|
|
8
|
+
- [x] create_dir
|
|
9
|
+
- [x] delete
|
|
10
|
+
- [x] copy
|
|
11
|
+
- [ ] rename
|
|
12
|
+
- [ ] list
|
|
13
|
+
- [ ] presign
|
|
14
|
+
- [ ] blocking
|
|
15
|
+
|
|
16
|
+
## Notes
|
|
17
|
+
|
|
18
|
+
This service is mainly provided by GitHub actions.
|
|
19
|
+
|
|
20
|
+
Refer to [Caching dependencies to speed up workflows](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows) for more information.
|
|
21
|
+
|
|
22
|
+
To make this service work as expected, please make sure to either call `endpoint` and `token` to
|
|
23
|
+
configure the URL and credentials, or that the following environment has been setup correctly:
|
|
24
|
+
|
|
25
|
+
- `ACTIONS_CACHE_URL`
|
|
26
|
+
- `ACTIONS_RUNTIME_TOKEN`
|
|
27
|
+
|
|
28
|
+
They can be exposed by following action:
|
|
29
|
+
|
|
30
|
+
```yaml
|
|
31
|
+
- name: Configure Cache Env
|
|
32
|
+
uses: actions/github-script@v6
|
|
33
|
+
with:
|
|
34
|
+
script: |
|
|
35
|
+
core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || '');
|
|
36
|
+
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
To make `delete` work as expected, `GITHUB_TOKEN` should also be set via:
|
|
40
|
+
|
|
41
|
+
```yaml
|
|
42
|
+
env:
|
|
43
|
+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
## Limitations
|
|
47
|
+
|
|
48
|
+
Unlike other services, ghac doesn't support create empty files.
|
|
49
|
+
We provide a `enable_create_simulation()` to support this operation but may result unexpected side effects.
|
|
50
|
+
|
|
51
|
+
Also, `ghac` is a cache service which means the data store inside could
|
|
52
|
+
be automatically evicted at any time.
|
|
53
|
+
|
|
54
|
+
## Configuration
|
|
55
|
+
|
|
56
|
+
- `root`: Set the work dir for backend.
|
|
57
|
+
|
|
58
|
+
Refer to [`GhacBuilder`]'s public API docs for more information.
|
|
59
|
+
|
|
60
|
+
## Example
|
|
61
|
+
|
|
62
|
+
### Via Builder
|
|
63
|
+
|
|
64
|
+
```no_run
|
|
65
|
+
use std::sync::Arc;
|
|
66
|
+
|
|
67
|
+
use anyhow::Result;
|
|
68
|
+
use opendal::services::Ghac;
|
|
69
|
+
use opendal::Operator;
|
|
70
|
+
|
|
71
|
+
#[tokio::main]
|
|
72
|
+
async fn main() -> Result<()> {
|
|
73
|
+
// Create ghac backend builder.
|
|
74
|
+
let mut builder = Ghac::default()
|
|
75
|
+
// Set the root for ghac, all operations will happen under this root.
|
|
76
|
+
//
|
|
77
|
+
// NOTE: the root must be absolute path.
|
|
78
|
+
.root("/path/to/dir");
|
|
79
|
+
|
|
80
|
+
let op: Operator = Operator::new(builder)?.finish();
|
|
81
|
+
|
|
82
|
+
Ok(())
|
|
83
|
+
}
|
|
84
|
+
```
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
## Capabilities
|
|
2
|
+
|
|
3
|
+
This service can be used to:
|
|
4
|
+
|
|
5
|
+
- [x] stat
|
|
6
|
+
- [x] read
|
|
7
|
+
- [x] write
|
|
8
|
+
- [ ] create_dir
|
|
9
|
+
- [x] delete
|
|
10
|
+
- [ ] copy
|
|
11
|
+
- [ ] rename
|
|
12
|
+
- [x] list
|
|
13
|
+
- [ ] presign
|
|
14
|
+
- [ ] blocking
|
|
15
|
+
|
|
16
|
+
## Configuration
|
|
17
|
+
|
|
18
|
+
- `root`: Set the work directory for backend
|
|
19
|
+
- `token`: Github access token
|
|
20
|
+
- `owner`: Github owner
|
|
21
|
+
- `repo`: Github repository
|
|
22
|
+
|
|
23
|
+
You can refer to [`GithubBuilder`]'s docs for more information
|
|
24
|
+
|
|
25
|
+
## Example
|
|
26
|
+
|
|
27
|
+
### Via Builder
|
|
28
|
+
|
|
29
|
+
```rust,no_run
|
|
30
|
+
use anyhow::Result;
|
|
31
|
+
use opendal::services::Github;
|
|
32
|
+
use opendal::Operator;
|
|
33
|
+
|
|
34
|
+
#[tokio::main]
|
|
35
|
+
async fn main() -> Result<()> {
|
|
36
|
+
// create backend builder
|
|
37
|
+
let mut builder = Github::default()
|
|
38
|
+
// set the storage root for OpenDAL
|
|
39
|
+
.root("/")
|
|
40
|
+
// set the access token for Github API
|
|
41
|
+
.token("your_access_token")
|
|
42
|
+
// set the owner for Github
|
|
43
|
+
.owner("your_owner")
|
|
44
|
+
// set the repository for Github
|
|
45
|
+
.repo("your_repo");
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
let op: Operator = Operator::new(builder)?.finish();
|
|
49
|
+
|
|
50
|
+
Ok(())
|
|
51
|
+
}
|
|
52
|
+
```
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
## Capabilities
|
|
2
|
+
|
|
3
|
+
This service can be used to:
|
|
4
|
+
|
|
5
|
+
- [x] stat
|
|
6
|
+
- [x] read
|
|
7
|
+
- [x] write
|
|
8
|
+
- [x] create_dir
|
|
9
|
+
- [x] delete
|
|
10
|
+
- [ ] copy
|
|
11
|
+
- [ ] rename
|
|
12
|
+
- [ ] ~~list~~
|
|
13
|
+
- [ ] ~~presign~~
|
|
14
|
+
- [ ] blocking
|
|
15
|
+
|
|
16
|
+
## Configuration
|
|
17
|
+
|
|
18
|
+
- `root`: Set the working directory of `OpenDAL`
|
|
19
|
+
- `connection_string`: Set the connection string of mongodb server
|
|
20
|
+
- `database`: Set the database of mongodb
|
|
21
|
+
- `bucket`: Set the bucket of mongodb gridfs
|
|
22
|
+
- `chunk_size`: Set the chunk size of mongodb gridfs
|
|
23
|
+
|
|
24
|
+
## Example
|
|
25
|
+
|
|
26
|
+
### Via Builder
|
|
27
|
+
|
|
28
|
+
```rust,no_run
|
|
29
|
+
use anyhow::Result;
|
|
30
|
+
use opendal::services::Gridfs;
|
|
31
|
+
use opendal::Operator;
|
|
32
|
+
|
|
33
|
+
#[tokio::main]
|
|
34
|
+
async fn main() -> Result<()> {
|
|
35
|
+
let mut builder = Gridfs::default()
|
|
36
|
+
.root("/")
|
|
37
|
+
.connection_string("mongodb://myUser:myPassword@localhost:27017/myAuthDB")
|
|
38
|
+
.database("your_database")
|
|
39
|
+
.bucket("your_bucket")
|
|
40
|
+
// The chunk size in bytes used to break the user file into chunks.
|
|
41
|
+
.chunk_size(255);
|
|
42
|
+
|
|
43
|
+
let op = Operator::new(builder)?.finish();
|
|
44
|
+
Ok(())
|
|
45
|
+
}
|
|
46
|
+
```
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
A distributed file system that provides high-throughput access to application data.
|
|
2
|
+
|
|
3
|
+
## Capabilities
|
|
4
|
+
|
|
5
|
+
This service can be used to:
|
|
6
|
+
|
|
7
|
+
- [x] stat
|
|
8
|
+
- [x] read
|
|
9
|
+
- [x] write
|
|
10
|
+
- [x] create_dir
|
|
11
|
+
- [x] delete
|
|
12
|
+
- [ ] copy
|
|
13
|
+
- [x] rename
|
|
14
|
+
- [x] list
|
|
15
|
+
- [ ] ~~presign~~
|
|
16
|
+
- [x] blocking
|
|
17
|
+
- [x] append
|
|
18
|
+
|
|
19
|
+
## Differences with webhdfs
|
|
20
|
+
|
|
21
|
+
[Webhdfs][crate::services::Webhdfs] is powered by hdfs's RESTful HTTP API.
|
|
22
|
+
|
|
23
|
+
## Features
|
|
24
|
+
|
|
25
|
+
HDFS support needs to enable feature `services-hdfs`.
|
|
26
|
+
|
|
27
|
+
## Configuration
|
|
28
|
+
|
|
29
|
+
- `root`: Set the work dir for backend.
|
|
30
|
+
- `name_node`: Set the name node for backend.
|
|
31
|
+
- `kerberos_ticket_cache_path`: Set the kerberos ticket cache path for backend, this should be gotten by `klist` after `kinit`
|
|
32
|
+
- `user`: Set the user for backend
|
|
33
|
+
- `enable_append`: enable the append capacity. Default is false.
|
|
34
|
+
|
|
35
|
+
Refer to [`HdfsBuilder`]'s public API docs for more information.
|
|
36
|
+
|
|
37
|
+
## Environment
|
|
38
|
+
|
|
39
|
+
HDFS needs some environment set correctly.
|
|
40
|
+
|
|
41
|
+
- `JAVA_HOME`: the path to java home, could be found via `java -XshowSettings:properties -version`
|
|
42
|
+
- `HADOOP_HOME`: the path to hadoop home, opendal relays on this env to discover hadoop jars and set `CLASSPATH` automatically.
|
|
43
|
+
|
|
44
|
+
Most of the time, setting `JAVA_HOME` and `HADOOP_HOME` is enough. But there are some edge cases:
|
|
45
|
+
|
|
46
|
+
- If meeting errors like the following:
|
|
47
|
+
|
|
48
|
+
```shell
|
|
49
|
+
error while loading shared libraries: libjvm.so: cannot open shared object file: No such file or directory
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
Java's lib are not including in pkg-config find path, please set `LD_LIBRARY_PATH`:
|
|
53
|
+
|
|
54
|
+
```shell
|
|
55
|
+
export LD_LIBRARY_PATH=${JAVA_HOME}/lib/server:${LD_LIBRARY_PATH}
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
The path of `libjvm.so` could be different, please keep an eye on it.
|
|
59
|
+
|
|
60
|
+
- If meeting errors like the following:
|
|
61
|
+
|
|
62
|
+
```shell
|
|
63
|
+
(unable to get stack trace for java.lang.NoClassDefFoundError exception: ExceptionUtils::getStackTrace error.)
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
`CLASSPATH` is not set correctly or your hadoop installation is incorrect.
|
|
67
|
+
|
|
68
|
+
To set `CLASSPATH`:
|
|
69
|
+
```shell
|
|
70
|
+
export CLASSPATH=$(find $HADOOP_HOME -iname "*.jar" | xargs echo | tr ' ' ':'):${CLASSPATH}
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
- If HDFS has High Availability (HA) enabled with multiple available NameNodes, some configuration is required:
|
|
74
|
+
1. Obtain the entire HDFS config folder (usually located at HADOOP_HOME/etc/hadoop).
|
|
75
|
+
2. Set the environment variable HADOOP_CONF_DIR to the path of this folder.
|
|
76
|
+
```shell
|
|
77
|
+
export HADOOP_CONF_DIR=<path of the config folder>
|
|
78
|
+
```
|
|
79
|
+
3. Append the HADOOP_CONF_DIR to the `CLASSPATH`
|
|
80
|
+
```shell
|
|
81
|
+
export CLASSPATH=$HADOOP_CONF_DIR:$HADOOP_CLASSPATH:$CLASSPATH
|
|
82
|
+
```
|
|
83
|
+
4. Use the `cluster_name` specified in the `core-site.xml` file (located in the HADOOP_CONF_DIR folder) to replace namenode:port.
|
|
84
|
+
|
|
85
|
+
```ignore
|
|
86
|
+
builder.name_node("hdfs://cluster_name");
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
### macOS Specific Note
|
|
90
|
+
|
|
91
|
+
If you encounter an issue during the build process on macOS with an error message similar to:
|
|
92
|
+
|
|
93
|
+
```shell
|
|
94
|
+
ld: unknown file type in $HADOOP_HOME/lib/native/libhdfs.so.0.0.0
|
|
95
|
+
clang: error: linker command failed with exit code 1 (use -v to see invocation)
|
|
96
|
+
```
|
|
97
|
+
This error is likely due to the fact that the official Hadoop build includes the libhdfs.so file for the x86-64 architecture, which is not compatible with aarch64 architecture required for MacOS.
|
|
98
|
+
|
|
99
|
+
To resolve this issue, you can add hdrs as a dependency in your Rust application's Cargo.toml file, and enable the vendored feature:
|
|
100
|
+
|
|
101
|
+
```toml
|
|
102
|
+
[dependencies]
|
|
103
|
+
hdrs = { version = "<version_number>", features = ["vendored"] }
|
|
104
|
+
```
|
|
105
|
+
Enabling the vendored feature ensures that hdrs includes the necessary libhdfs.so library built for the correct architecture.
|
|
106
|
+
|
|
107
|
+
## Example
|
|
108
|
+
|
|
109
|
+
### Via Builder
|
|
110
|
+
|
|
111
|
+
```rust,no_run
|
|
112
|
+
use std::sync::Arc;
|
|
113
|
+
|
|
114
|
+
use anyhow::Result;
|
|
115
|
+
use opendal::services::Hdfs;
|
|
116
|
+
use opendal::Operator;
|
|
117
|
+
|
|
118
|
+
#[tokio::main]
|
|
119
|
+
async fn main() -> Result<()> {
|
|
120
|
+
// Create fs backend builder.
|
|
121
|
+
let mut builder = Hdfs::default()
|
|
122
|
+
// Set the name node for hdfs.
|
|
123
|
+
// If the string starts with a protocol type such as file://, hdfs://, or gs://, this protocol type will be used.
|
|
124
|
+
.name_node("hdfs://127.0.0.1:9000")
|
|
125
|
+
// Set the root for hdfs, all operations will happen under this root.
|
|
126
|
+
//
|
|
127
|
+
// NOTE: the root must be absolute path.
|
|
128
|
+
.root("/tmp")
|
|
129
|
+
|
|
130
|
+
// Enable the append capacity for hdfs.
|
|
131
|
+
//
|
|
132
|
+
// Note: HDFS run in non-distributed mode doesn't support append.
|
|
133
|
+
.enable_append(true);
|
|
134
|
+
|
|
135
|
+
// `Accessor` provides the low level APIs, we will use `Operator` normally.
|
|
136
|
+
let op: Operator = Operator::new(builder)?.finish();
|
|
137
|
+
|
|
138
|
+
Ok(())
|
|
139
|
+
}
|
|
140
|
+
```
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
A distributed file system that provides high-throughput access to application data.
|
|
2
|
+
Using [Native Rust HDFS client](https://github.com/Kimahriman/hdfs-native).
|
|
3
|
+
|
|
4
|
+
## Capabilities
|
|
5
|
+
|
|
6
|
+
This service can be used to:
|
|
7
|
+
|
|
8
|
+
- [x] stat
|
|
9
|
+
- [x] read
|
|
10
|
+
- [x] write
|
|
11
|
+
- [x] create_dir
|
|
12
|
+
- [x] delete
|
|
13
|
+
- [x] rename
|
|
14
|
+
- [x] list
|
|
15
|
+
- [x] blocking
|
|
16
|
+
- [x] append
|
|
17
|
+
|
|
18
|
+
## Differences with webhdfs
|
|
19
|
+
|
|
20
|
+
[Webhdfs][crate::services::Webhdfs] is powered by hdfs's RESTful HTTP API.
|
|
21
|
+
|
|
22
|
+
## Differences with hdfs
|
|
23
|
+
|
|
24
|
+
[hdfs][crate::services::Hdfs] is powered by libhdfs and require the Java dependencies
|
|
25
|
+
|
|
26
|
+
## Features
|
|
27
|
+
|
|
28
|
+
HDFS-native support needs to enable feature `services-hdfs-native`.
|
|
29
|
+
|
|
30
|
+
## Configuration
|
|
31
|
+
|
|
32
|
+
- `root`: Set the work dir for backend.
|
|
33
|
+
- `name_node`: Set the name node for backend.
|
|
34
|
+
- `enable_append`: enable the append capacity. Default is false.
|
|
35
|
+
|