tosnativeclient 1.0.4__tar.gz → 1.0.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tosnativeclient might be problematic. Click here for more details.
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/Cargo.lock +1 -1
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/Cargo.toml +1 -1
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/PKG-INFO +1 -1
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/python/tosnativeclient/tosnativeclient.pyi +8 -1
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/src/list_stream.rs +37 -12
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/src/read_stream.rs +14 -19
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/src/tos_client.rs +5 -1
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/src/tos_raw_client.rs +4 -4
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/src/write_stream.rs +28 -16
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/.github/workflows/CI.yml +0 -0
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/.gitignore +0 -0
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/build.sh +0 -0
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/pyproject.toml +0 -0
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/python/tosnativeclient/__init__.py +0 -0
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/src/lib.rs +0 -0
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/src/tos_error.rs +0 -0
- {tosnativeclient-1.0.4 → tosnativeclient-1.0.5}/src/tos_model.rs +0 -0
|
@@ -31,6 +31,8 @@ class ListStream(object):
|
|
|
31
31
|
prefix: str
|
|
32
32
|
delimiter: str
|
|
33
33
|
max_keys: int
|
|
34
|
+
continuation_token: str
|
|
35
|
+
start_after: str
|
|
34
36
|
|
|
35
37
|
def __iter__(self) -> ListStream: ...
|
|
36
38
|
|
|
@@ -38,6 +40,10 @@ class ListStream(object):
|
|
|
38
40
|
|
|
39
41
|
def close(self) -> None: ...
|
|
40
42
|
|
|
43
|
+
def current_prefix(self) -> Optional[str]: ...
|
|
44
|
+
|
|
45
|
+
def current_continuation_token(self) -> Optional[str]: ...
|
|
46
|
+
|
|
41
47
|
|
|
42
48
|
class ReadStream(object):
|
|
43
49
|
bucket: str
|
|
@@ -82,7 +88,8 @@ class TosClient(object):
|
|
|
82
88
|
file_name_prefix: str = '', shared_prefetch_tasks: int = 20):
|
|
83
89
|
...
|
|
84
90
|
|
|
85
|
-
def list_objects(self, bucket: str, prefix: str = '', max_keys: int = 1000, delimiter: str = ''
|
|
91
|
+
def list_objects(self, bucket: str, prefix: str = '', max_keys: int = 1000, delimiter: str = '',
|
|
92
|
+
continuation_token: str = '', start_after: str = '') -> ListStream:
|
|
86
93
|
...
|
|
87
94
|
|
|
88
95
|
def head_object(self, bucket: str, key: str) -> TosObject:
|
|
@@ -27,6 +27,10 @@ pub struct ListStream {
|
|
|
27
27
|
delimiter: String,
|
|
28
28
|
#[pyo3(get)]
|
|
29
29
|
max_keys: isize,
|
|
30
|
+
#[pyo3(get)]
|
|
31
|
+
continuation_token: String,
|
|
32
|
+
#[pyo3(get)]
|
|
33
|
+
start_after: String,
|
|
30
34
|
}
|
|
31
35
|
|
|
32
36
|
#[pymethods]
|
|
@@ -65,15 +69,20 @@ impl ListStream {
|
|
|
65
69
|
}
|
|
66
70
|
}
|
|
67
71
|
|
|
68
|
-
pub fn
|
|
69
|
-
let
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
72
|
+
pub fn current_prefix(&self) -> PyResult<Option<String>> {
|
|
73
|
+
let pg = self.paginator.read().unwrap();
|
|
74
|
+
match pg.as_ref() {
|
|
75
|
+
None => Ok(None),
|
|
76
|
+
Some(pg) => Ok(Some(pg.current_prefix())),
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
pub fn current_continuation_token(&self) -> PyResult<Option<String>> {
|
|
81
|
+
let pg = self.paginator.read().unwrap();
|
|
82
|
+
match pg.as_ref() {
|
|
83
|
+
None => Ok(None),
|
|
84
|
+
Some(pg) => Ok(Some(pg.current_continuation_token())),
|
|
85
|
+
}
|
|
77
86
|
}
|
|
78
87
|
}
|
|
79
88
|
|
|
@@ -85,6 +94,8 @@ impl ListStream {
|
|
|
85
94
|
prefix: String,
|
|
86
95
|
delimiter: String,
|
|
87
96
|
max_keys: isize,
|
|
97
|
+
continuation_token: String,
|
|
98
|
+
start_after: String,
|
|
88
99
|
) -> Self {
|
|
89
100
|
Self {
|
|
90
101
|
client,
|
|
@@ -95,6 +106,8 @@ impl ListStream {
|
|
|
95
106
|
prefix,
|
|
96
107
|
delimiter,
|
|
97
108
|
max_keys,
|
|
109
|
+
continuation_token,
|
|
110
|
+
start_after,
|
|
98
111
|
}
|
|
99
112
|
}
|
|
100
113
|
|
|
@@ -105,7 +118,12 @@ impl ListStream {
|
|
|
105
118
|
input.set_prefix(self.prefix.as_str());
|
|
106
119
|
input.set_max_keys(self.max_keys);
|
|
107
120
|
input.set_delimiter(self.delimiter.as_str());
|
|
108
|
-
|
|
121
|
+
if self.continuation_token != "" {
|
|
122
|
+
input.set_continuation_token(self.continuation_token.as_str());
|
|
123
|
+
}
|
|
124
|
+
if self.start_after != "" {
|
|
125
|
+
input.set_start_after(self.start_after.as_str());
|
|
126
|
+
}
|
|
109
127
|
py.allow_threads(|| {
|
|
110
128
|
self.runtime.spawn(async move {
|
|
111
129
|
let mut need_break = false;
|
|
@@ -169,6 +187,7 @@ impl ListStream {
|
|
|
169
187
|
is_end: ArcSwap::new(Arc::new(false)),
|
|
170
188
|
last_err: ArcSwap::new(Arc::new(None)),
|
|
171
189
|
current_prefix: ArcSwap::new(Arc::new(self.prefix.clone())),
|
|
190
|
+
current_continuation_token: ArcSwap::new(Arc::new(self.continuation_token.clone())),
|
|
172
191
|
receiver,
|
|
173
192
|
})
|
|
174
193
|
}
|
|
@@ -207,6 +226,7 @@ pub(crate) struct Paginator {
|
|
|
207
226
|
is_end: ArcSwap<bool>,
|
|
208
227
|
last_err: ArcSwap<Option<TosError>>,
|
|
209
228
|
current_prefix: ArcSwap<String>,
|
|
229
|
+
current_continuation_token: ArcSwap<String>,
|
|
210
230
|
receiver: Receiver<(bool, Result<ListObjectsType2Output, TosError>)>,
|
|
211
231
|
}
|
|
212
232
|
|
|
@@ -218,8 +238,11 @@ impl Paginator {
|
|
|
218
238
|
Ok(!*self.is_end.load().as_ref())
|
|
219
239
|
}
|
|
220
240
|
|
|
221
|
-
fn current_prefix(&self) ->
|
|
222
|
-
self.current_prefix.load().
|
|
241
|
+
fn current_prefix(&self) -> String {
|
|
242
|
+
self.current_prefix.load().to_string()
|
|
243
|
+
}
|
|
244
|
+
fn current_continuation_token(&self) -> String {
|
|
245
|
+
self.current_continuation_token.load().to_string()
|
|
223
246
|
}
|
|
224
247
|
async fn next_page(&self) -> Result<ListObjectsType2Output, TosError> {
|
|
225
248
|
if let Some(e) = self.last_err.load().as_ref() {
|
|
@@ -248,6 +271,8 @@ impl Paginator {
|
|
|
248
271
|
Ok(output) => {
|
|
249
272
|
self.current_prefix
|
|
250
273
|
.store(Arc::new(output.prefix().to_string()));
|
|
274
|
+
self.current_continuation_token
|
|
275
|
+
.store(Arc::new(output.continuation_token().to_string()));
|
|
251
276
|
if is_end {
|
|
252
277
|
self.is_end.store(Arc::new(true));
|
|
253
278
|
}
|
|
@@ -66,17 +66,6 @@ impl ReadStream {
|
|
|
66
66
|
slf.py()
|
|
67
67
|
.allow_threads(|| runtime.block_on(async move { fetcher.close().await }))
|
|
68
68
|
}
|
|
69
|
-
|
|
70
|
-
pub fn __getnewargs__(slf: PyRef<'_, Self>) -> PyResult<Bound<'_, PyTuple>> {
|
|
71
|
-
let py = slf.py();
|
|
72
|
-
let state = [
|
|
73
|
-
slf.bucket.clone().into_pyobject(py)?.into_any(),
|
|
74
|
-
slf.key.clone().into_pyobject(py)?.into_any(),
|
|
75
|
-
slf.etag.clone().into_pyobject(py)?.into_any(),
|
|
76
|
-
slf.size.clone().into_pyobject(py)?.into_any(),
|
|
77
|
-
];
|
|
78
|
-
PyTuple::new(py, state)
|
|
79
|
-
}
|
|
80
69
|
}
|
|
81
70
|
|
|
82
71
|
impl ReadStream {
|
|
@@ -128,6 +117,7 @@ impl ReadStream {
|
|
|
128
117
|
Self {
|
|
129
118
|
object_fetcher: Arc::new(ObjectFetcher {
|
|
130
119
|
client,
|
|
120
|
+
runtime: runtime.clone(),
|
|
131
121
|
closed: Arc::new(AtomicI8::new(0)),
|
|
132
122
|
pc,
|
|
133
123
|
fc: Mutex::new(fc),
|
|
@@ -160,6 +150,7 @@ impl ReadStream {
|
|
|
160
150
|
|
|
161
151
|
struct ObjectFetcher {
|
|
162
152
|
client: Arc<InnerTosClient>,
|
|
153
|
+
runtime: Arc<Runtime>,
|
|
163
154
|
closed: Arc<AtomicI8>,
|
|
164
155
|
pc: PrefetchConfig,
|
|
165
156
|
fc: Mutex<FetchContext>,
|
|
@@ -306,7 +297,7 @@ impl ObjectFetcher {
|
|
|
306
297
|
if fc.has_tasks() {
|
|
307
298
|
fc.current = fc.pop_task()
|
|
308
299
|
} else {
|
|
309
|
-
fc.current = self.start_fetch_task(fc);
|
|
300
|
+
fc.current = self.start_fetch_task(fc, self.runtime.clone());
|
|
310
301
|
}
|
|
311
302
|
}
|
|
312
303
|
Some(current) => {
|
|
@@ -315,17 +306,17 @@ impl ObjectFetcher {
|
|
|
315
306
|
if fc.has_tasks() {
|
|
316
307
|
fc.current = fc.pop_task()
|
|
317
308
|
} else {
|
|
318
|
-
fc.current = self.start_fetch_task(fc);
|
|
309
|
+
fc.current = self.start_fetch_task(fc, self.runtime.clone());
|
|
319
310
|
}
|
|
320
311
|
} else if fc.exceed_sequential_read_threshold() {
|
|
321
312
|
if fc.can_add_task(self.pc.max_prefetch_tasks) {
|
|
322
|
-
if let Some(next) = self.start_fetch_task(fc) {
|
|
313
|
+
if let Some(next) = self.start_fetch_task(fc, self.runtime.clone()) {
|
|
323
314
|
fc.push_task(next, false);
|
|
324
315
|
}
|
|
325
316
|
} else if fc.can_steal_shared_task(self.pc.shared_prefetch_task_limit)
|
|
326
317
|
&& fc.pcontext.try_steal_shared_prefetch_task()
|
|
327
318
|
{
|
|
328
|
-
match self.start_fetch_task(fc) {
|
|
319
|
+
match self.start_fetch_task(fc, self.runtime.clone()) {
|
|
329
320
|
Some(mut next) => {
|
|
330
321
|
next.is_shared = true;
|
|
331
322
|
fc.steal_shared_task();
|
|
@@ -341,14 +332,18 @@ impl ObjectFetcher {
|
|
|
341
332
|
}
|
|
342
333
|
}
|
|
343
334
|
|
|
344
|
-
fn start_fetch_task(
|
|
335
|
+
fn start_fetch_task(
|
|
336
|
+
&self,
|
|
337
|
+
fc: &mut MutexGuard<'_, FetchContext>,
|
|
338
|
+
runtime: Arc<Runtime>,
|
|
339
|
+
) -> Option<FetchTask> {
|
|
345
340
|
if fc.next_request_offset >= self.om.size {
|
|
346
341
|
return None;
|
|
347
342
|
}
|
|
348
343
|
|
|
349
344
|
let task = self.new_fetch_task(fc);
|
|
350
345
|
self.revise_next_request_offset_and_size(fc, task.size);
|
|
351
|
-
task.async_fetch(self.client.clone());
|
|
346
|
+
task.async_fetch(self.client.clone(), runtime);
|
|
352
347
|
Some(task)
|
|
353
348
|
}
|
|
354
349
|
|
|
@@ -671,9 +666,9 @@ impl FetchTask {
|
|
|
671
666
|
self.start_offset + self.fetched_size.load(Ordering::Acquire)
|
|
672
667
|
}
|
|
673
668
|
|
|
674
|
-
fn async_fetch(&self, client: Arc<InnerTosClient>) {
|
|
669
|
+
fn async_fetch(&self, client: Arc<InnerTosClient>, runtime: Arc<Runtime>) {
|
|
675
670
|
let fetch_task_context = FetchTaskContext::new(self, client);
|
|
676
|
-
|
|
671
|
+
runtime.spawn(async move {
|
|
677
672
|
fetch_task_context.fetch_from_server().await;
|
|
678
673
|
fetch_task_context.chunk_queue.close();
|
|
679
674
|
});
|
|
@@ -163,13 +163,15 @@ impl TosClient {
|
|
|
163
163
|
})
|
|
164
164
|
}
|
|
165
165
|
|
|
166
|
-
#[pyo3(signature = (bucket, prefix=String::from(""), max_keys=1000, delimiter=String::from("")))]
|
|
166
|
+
#[pyo3(signature = (bucket, prefix=String::from(""), max_keys=1000, delimiter=String::from(""), continuation_token=String::from(""), start_after=String::from("")))]
|
|
167
167
|
pub fn list_objects(
|
|
168
168
|
&self,
|
|
169
169
|
bucket: String,
|
|
170
170
|
prefix: String,
|
|
171
171
|
max_keys: isize,
|
|
172
172
|
delimiter: String,
|
|
173
|
+
continuation_token: String,
|
|
174
|
+
start_after: String,
|
|
173
175
|
) -> ListStream {
|
|
174
176
|
ListStream::new(
|
|
175
177
|
self.rclient.clone(),
|
|
@@ -178,6 +180,8 @@ impl TosClient {
|
|
|
178
180
|
prefix,
|
|
179
181
|
delimiter,
|
|
180
182
|
max_keys,
|
|
183
|
+
continuation_token,
|
|
184
|
+
start_after,
|
|
181
185
|
)
|
|
182
186
|
}
|
|
183
187
|
pub fn head_object(slf: PyRef<'_, Self>, bucket: String, key: String) -> PyResult<TosObject> {
|
|
@@ -228,10 +228,10 @@ impl TosRawClient {
|
|
|
228
228
|
slf.endpoint.clone().into_pyobject(py)?.into_any(),
|
|
229
229
|
slf.ak.clone().into_pyobject(py)?.into_any(),
|
|
230
230
|
slf.sk.clone().into_pyobject(py)?.into_any(),
|
|
231
|
-
slf.connection_timeout.
|
|
232
|
-
slf.request_timeout.
|
|
233
|
-
slf.max_connections.
|
|
234
|
-
slf.max_retry_count.
|
|
231
|
+
slf.connection_timeout.into_pyobject(py)?.into_any(),
|
|
232
|
+
slf.request_timeout.into_pyobject(py)?.into_any(),
|
|
233
|
+
slf.max_connections.into_pyobject(py)?.into_any(),
|
|
234
|
+
slf.max_retry_count.into_pyobject(py)?.into_any(),
|
|
235
235
|
];
|
|
236
236
|
PyTuple::new(py, state)
|
|
237
237
|
}
|
|
@@ -72,16 +72,6 @@ impl WriteStream {
|
|
|
72
72
|
Ok(_) => Ok(()),
|
|
73
73
|
}
|
|
74
74
|
}
|
|
75
|
-
|
|
76
|
-
pub fn __getnewargs__(slf: PyRef<'_, Self>) -> PyResult<Bound<'_, PyTuple>> {
|
|
77
|
-
let py = slf.py();
|
|
78
|
-
let state = [
|
|
79
|
-
slf.bucket.clone().into_pyobject(py)?.into_any(),
|
|
80
|
-
slf.key.clone().into_pyobject(py)?.into_any(),
|
|
81
|
-
slf.storage_class.clone().into_pyobject(py)?.into_any(),
|
|
82
|
-
];
|
|
83
|
-
PyTuple::new(py, state)
|
|
84
|
-
}
|
|
85
75
|
}
|
|
86
76
|
|
|
87
77
|
impl WriteStream {
|
|
@@ -104,8 +94,15 @@ impl WriteStream {
|
|
|
104
94
|
let _key = key.clone();
|
|
105
95
|
let _storage_class = storage_class.clone();
|
|
106
96
|
|
|
107
|
-
let object_writer =
|
|
108
|
-
|
|
97
|
+
let object_writer = ObjectWriter::new(
|
|
98
|
+
client,
|
|
99
|
+
runtime.clone(),
|
|
100
|
+
_bucket,
|
|
101
|
+
_key,
|
|
102
|
+
_storage_class,
|
|
103
|
+
part_size,
|
|
104
|
+
)
|
|
105
|
+
.await?;
|
|
109
106
|
Ok(Self {
|
|
110
107
|
object_writer: Arc::new(object_writer),
|
|
111
108
|
runtime,
|
|
@@ -145,6 +142,7 @@ struct ObjectWriter {
|
|
|
145
142
|
impl ObjectWriter {
|
|
146
143
|
async fn new(
|
|
147
144
|
client: Arc<InnerTosClient>,
|
|
145
|
+
runtime: Arc<Runtime>,
|
|
148
146
|
bucket: String,
|
|
149
147
|
key: String,
|
|
150
148
|
storage_class: Option<String>,
|
|
@@ -155,7 +153,15 @@ impl ObjectWriter {
|
|
|
155
153
|
key,
|
|
156
154
|
storage_class,
|
|
157
155
|
});
|
|
158
|
-
let ou = ObjectUploader::new(
|
|
156
|
+
let ou = ObjectUploader::new(
|
|
157
|
+
client,
|
|
158
|
+
runtime,
|
|
159
|
+
wp,
|
|
160
|
+
part_size,
|
|
161
|
+
0,
|
|
162
|
+
Arc::new(AtomicI8::new(0)),
|
|
163
|
+
)
|
|
164
|
+
.await?;
|
|
159
165
|
Ok(Self {
|
|
160
166
|
ctx: Mutex::new((ou, 0)),
|
|
161
167
|
closed: Arc::new(AtomicI8::new(0)),
|
|
@@ -274,6 +280,7 @@ struct WriteParam {
|
|
|
274
280
|
struct ObjectUploader {
|
|
275
281
|
next_write_offset: isize,
|
|
276
282
|
uc: Arc<UploadContext>,
|
|
283
|
+
runtime: Arc<Runtime>,
|
|
277
284
|
current: Option<Part>,
|
|
278
285
|
part_size: isize,
|
|
279
286
|
max_size: isize,
|
|
@@ -288,6 +295,7 @@ struct ObjectUploader {
|
|
|
288
295
|
impl ObjectUploader {
|
|
289
296
|
async fn new(
|
|
290
297
|
client: Arc<InnerTosClient>,
|
|
298
|
+
runtime: Arc<Runtime>,
|
|
291
299
|
wp: Arc<WriteParam>,
|
|
292
300
|
part_size: isize,
|
|
293
301
|
next_write_offset: isize,
|
|
@@ -304,6 +312,7 @@ impl ObjectUploader {
|
|
|
304
312
|
let mut ou = Self {
|
|
305
313
|
next_write_offset,
|
|
306
314
|
uc: Arc::new(UploadContext::new(created, wp, client)),
|
|
315
|
+
runtime,
|
|
307
316
|
current: None,
|
|
308
317
|
part_size,
|
|
309
318
|
max_size,
|
|
@@ -485,7 +494,7 @@ impl ObjectUploader {
|
|
|
485
494
|
let dp = self.dp.clone();
|
|
486
495
|
let st = self.st.clone();
|
|
487
496
|
let uc = self.uc.clone();
|
|
488
|
-
self.wait_dispatch = Some(
|
|
497
|
+
self.wait_dispatch = Some(self.runtime.spawn(async move {
|
|
489
498
|
loop {
|
|
490
499
|
match dp.pull().await {
|
|
491
500
|
None => return,
|
|
@@ -510,12 +519,13 @@ impl ObjectUploader {
|
|
|
510
519
|
}
|
|
511
520
|
|
|
512
521
|
async fn execute(&mut self) {
|
|
522
|
+
let runtime = self.runtime.clone();
|
|
513
523
|
let dp = self.dp.clone();
|
|
514
524
|
let st = self.st.clone();
|
|
515
525
|
let ta = self.ta.clone();
|
|
516
526
|
let uc = self.uc.clone();
|
|
517
527
|
let mu_ctx = self.mu_ctx.clone();
|
|
518
|
-
self.wait_execute = Some(
|
|
528
|
+
self.wait_execute = Some(self.runtime.spawn(async move {
|
|
519
529
|
let mut wait_async_uploads = Vec::with_capacity(16);
|
|
520
530
|
loop {
|
|
521
531
|
match st.pull().await {
|
|
@@ -529,6 +539,7 @@ impl ObjectUploader {
|
|
|
529
539
|
wait_async_uploads.push(
|
|
530
540
|
uc.clone()
|
|
531
541
|
.async_upload(
|
|
542
|
+
runtime.clone(),
|
|
532
543
|
dp.clone(),
|
|
533
544
|
st.clone(),
|
|
534
545
|
ta.clone(),
|
|
@@ -600,6 +611,7 @@ impl UploadContext {
|
|
|
600
611
|
|
|
601
612
|
async fn async_upload(
|
|
602
613
|
self: Arc<Self>,
|
|
614
|
+
runtime: Arc<Runtime>,
|
|
603
615
|
dp: Arc<Dispatcher>,
|
|
604
616
|
st: Arc<Store>,
|
|
605
617
|
ta: Arc<TokenAcquirer>,
|
|
@@ -607,7 +619,7 @@ impl UploadContext {
|
|
|
607
619
|
mut si: StoreItem,
|
|
608
620
|
) -> JoinHandle<()> {
|
|
609
621
|
let aborted = self.aborted.clone();
|
|
610
|
-
|
|
622
|
+
runtime.spawn(async move {
|
|
611
623
|
let _ = ta.acquire().await;
|
|
612
624
|
if aborted.load(Ordering::Acquire) == 1 {
|
|
613
625
|
si.release();
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|