eio 0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +8 -0
- data/COPYING +502 -0
- data/LICENSE +16 -0
- data/README.rdoc +201 -0
- data/Rakefile +48 -0
- data/bench/eventmachine.rb +134 -0
- data/eio.gemspec +17 -0
- data/ext/eio/eio_ext.c +1447 -0
- data/ext/eio/extconf.rb +11 -0
- data/ext/libeio/CVS/Entries +13 -0
- data/ext/libeio/CVS/Repository +1 -0
- data/ext/libeio/CVS/Root +1 -0
- data/ext/libeio/Changes +40 -0
- data/ext/libeio/LICENSE +36 -0
- data/ext/libeio/Makefile +692 -0
- data/ext/libeio/Makefile.am +15 -0
- data/ext/libeio/Makefile.in +692 -0
- data/ext/libeio/aclocal.m4 +8937 -0
- data/ext/libeio/autogen.sh +3 -0
- data/ext/libeio/autom4te.cache/output.0 +13871 -0
- data/ext/libeio/autom4te.cache/output.1 +13867 -0
- data/ext/libeio/autom4te.cache/requests +275 -0
- data/ext/libeio/autom4te.cache/traces.0 +2384 -0
- data/ext/libeio/autom4te.cache/traces.1 +621 -0
- data/ext/libeio/config.guess +1501 -0
- data/ext/libeio/config.h +122 -0
- data/ext/libeio/config.h.in +121 -0
- data/ext/libeio/config.status +2035 -0
- data/ext/libeio/config.sub +1705 -0
- data/ext/libeio/configure +13867 -0
- data/ext/libeio/configure.ac +22 -0
- data/ext/libeio/demo.c +194 -0
- data/ext/libeio/eio.3 +3428 -0
- data/ext/libeio/eio.c +2075 -0
- data/ext/libeio/eio.h +336 -0
- data/ext/libeio/eio.pod +303 -0
- data/ext/libeio/install-sh +520 -0
- data/ext/libeio/libeio.m4 +156 -0
- data/ext/libeio/libtool +8890 -0
- data/ext/libeio/ltmain.sh +8406 -0
- data/ext/libeio/missing +376 -0
- data/ext/libeio/stamp-h1 +1 -0
- data/ext/libeio/xthread.h +168 -0
- data/lib/eio.rb +9 -0
- data/lib/eio/eventmachine.rb +24 -0
- data/lib/eio/middleware.rb +21 -0
- data/test/test_eio.rb +1161 -0
- data/test/test_eventmachine.rb +23 -0
- data/test/test_middleware.rb +20 -0
- metadata +148 -0
data/ext/libeio/eio.c
ADDED
@@ -0,0 +1,2075 @@
|
|
1
|
+
/*
|
2
|
+
* libeio implementation
|
3
|
+
*
|
4
|
+
* Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libeio@schmorp.de>
|
5
|
+
* All rights reserved.
|
6
|
+
*
|
7
|
+
* Redistribution and use in source and binary forms, with or without modifica-
|
8
|
+
* tion, are permitted provided that the following conditions are met:
|
9
|
+
*
|
10
|
+
* 1. Redistributions of source code must retain the above copyright notice,
|
11
|
+
* this list of conditions and the following disclaimer.
|
12
|
+
*
|
13
|
+
* 2. Redistributions in binary form must reproduce the above copyright
|
14
|
+
* notice, this list of conditions and the following disclaimer in the
|
15
|
+
* documentation and/or other materials provided with the distribution.
|
16
|
+
*
|
17
|
+
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
18
|
+
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
|
19
|
+
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
20
|
+
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
|
21
|
+
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
22
|
+
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
23
|
+
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
24
|
+
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
|
25
|
+
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
26
|
+
* OF THE POSSIBILITY OF SUCH DAMAGE.
|
27
|
+
*
|
28
|
+
* Alternatively, the contents of this file may be used under the terms of
|
29
|
+
* the GNU General Public License ("GPL") version 2 or any later version,
|
30
|
+
* in which case the provisions of the GPL are applicable instead of
|
31
|
+
* the above. If you wish to allow the use of your version of this file
|
32
|
+
* only under the terms of the GPL and not to allow others to use your
|
33
|
+
* version of this file under the BSD license, indicate your decision
|
34
|
+
* by deleting the provisions above and replace them with the notice
|
35
|
+
* and other provisions required by the GPL. If you do not delete the
|
36
|
+
* provisions above, a recipient may use your version of this file under
|
37
|
+
* either the BSD or the GPL.
|
38
|
+
*/
|
39
|
+
|
40
|
+
#ifndef _WIN32
|
41
|
+
# include "config.h"
|
42
|
+
#endif
|
43
|
+
|
44
|
+
#include "eio.h"
|
45
|
+
|
46
|
+
#ifdef EIO_STACKSIZE
|
47
|
+
# define XTHREAD_STACKSIZE EIO_STACKSIZE
|
48
|
+
#endif
|
49
|
+
#include "xthread.h"
|
50
|
+
|
51
|
+
#include <errno.h>
|
52
|
+
#include <stddef.h>
|
53
|
+
#include <stdlib.h>
|
54
|
+
#include <string.h>
|
55
|
+
#include <errno.h>
|
56
|
+
#include <sys/types.h>
|
57
|
+
#include <sys/stat.h>
|
58
|
+
#include <sys/statvfs.h>
|
59
|
+
#include <limits.h>
|
60
|
+
#include <fcntl.h>
|
61
|
+
#include <assert.h>
|
62
|
+
|
63
|
+
#ifndef EIO_FINISH
|
64
|
+
# define EIO_FINISH(req) ((req)->finish) && !EIO_CANCELLED (req) ? (req)->finish (req) : 0
|
65
|
+
#endif
|
66
|
+
|
67
|
+
#ifndef EIO_DESTROY
|
68
|
+
# define EIO_DESTROY(req) do { if ((req)->destroy) (req)->destroy (req); } while (0)
|
69
|
+
#endif
|
70
|
+
|
71
|
+
#ifndef EIO_FEED
|
72
|
+
# define EIO_FEED(req) do { if ((req)->feed ) (req)->feed (req); } while (0)
|
73
|
+
#endif
|
74
|
+
|
75
|
+
#ifdef _WIN32
|
76
|
+
|
77
|
+
/*doh*/
|
78
|
+
#else
|
79
|
+
|
80
|
+
# include <sys/time.h>
|
81
|
+
# include <sys/select.h>
|
82
|
+
# include <unistd.h>
|
83
|
+
# include <utime.h>
|
84
|
+
# include <signal.h>
|
85
|
+
# include <dirent.h>
|
86
|
+
|
87
|
+
#if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES
|
88
|
+
# include <sys/mman.h>
|
89
|
+
#endif
|
90
|
+
|
91
|
+
/* POSIX_SOURCE is useless on bsd's, and XOPEN_SOURCE is unreliable there, too */
|
92
|
+
# if __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__
|
93
|
+
# define _DIRENT_HAVE_D_TYPE /* sigh */
|
94
|
+
# define D_INO(de) (de)->d_fileno
|
95
|
+
# define D_NAMLEN(de) (de)->d_namlen
|
96
|
+
# elif __linux || defined d_ino || _XOPEN_SOURCE >= 600
|
97
|
+
# define D_INO(de) (de)->d_ino
|
98
|
+
# endif
|
99
|
+
|
100
|
+
#ifdef _D_EXACT_NAMLEN
|
101
|
+
# undef D_NAMLEN
|
102
|
+
# define D_NAMLEN(de) _D_EXACT_NAMLEN (de)
|
103
|
+
#endif
|
104
|
+
|
105
|
+
# ifdef _DIRENT_HAVE_D_TYPE
|
106
|
+
# define D_TYPE(de) (de)->d_type
|
107
|
+
# endif
|
108
|
+
|
109
|
+
# ifndef EIO_STRUCT_DIRENT
|
110
|
+
# define EIO_STRUCT_DIRENT struct dirent
|
111
|
+
# endif
|
112
|
+
|
113
|
+
#endif
|
114
|
+
|
115
|
+
#if HAVE_SENDFILE
|
116
|
+
# if __linux
|
117
|
+
# include <sys/sendfile.h>
|
118
|
+
# elif __FreeBSD__ || defined __APPLE__
|
119
|
+
# include <sys/socket.h>
|
120
|
+
# include <sys/uio.h>
|
121
|
+
# elif __hpux
|
122
|
+
# include <sys/socket.h>
|
123
|
+
# elif __solaris
|
124
|
+
# include <sys/sendfile.h>
|
125
|
+
# else
|
126
|
+
# error sendfile support requested but not available
|
127
|
+
# endif
|
128
|
+
#endif
|
129
|
+
|
130
|
+
#ifndef D_TYPE
|
131
|
+
# define D_TYPE(de) 0
|
132
|
+
#endif
|
133
|
+
#ifndef D_INO
|
134
|
+
# define D_INO(de) 0
|
135
|
+
#endif
|
136
|
+
#ifndef D_NAMLEN
|
137
|
+
# define D_NAMLEN(de) strlen ((de)->d_name)
|
138
|
+
#endif
|
139
|
+
|
140
|
+
/* used for struct dirent, AIX doesn't provide it */
|
141
|
+
#ifndef NAME_MAX
|
142
|
+
# define NAME_MAX 4096
|
143
|
+
#endif
|
144
|
+
|
145
|
+
/* used for readlink etc. */
|
146
|
+
#ifndef PATH_MAX
|
147
|
+
# define PATH_MAX 4096
|
148
|
+
#endif
|
149
|
+
|
150
|
+
/* buffer size for various temporary buffers */
|
151
|
+
#define EIO_BUFSIZE 65536
|
152
|
+
|
153
|
+
#define dBUF \
|
154
|
+
char *eio_buf; \
|
155
|
+
ETP_WORKER_LOCK (self); \
|
156
|
+
self->dbuf = eio_buf = malloc (EIO_BUFSIZE); \
|
157
|
+
ETP_WORKER_UNLOCK (self); \
|
158
|
+
errno = ENOMEM; \
|
159
|
+
if (!eio_buf) \
|
160
|
+
return -1;
|
161
|
+
|
162
|
+
#define EIO_TICKS ((1000000 + 1023) >> 10)
|
163
|
+
|
164
|
+
/*****************************************************************************/
|
165
|
+
|
166
|
+
#if __GNUC__ >= 3
|
167
|
+
# define expect(expr,value) __builtin_expect ((expr),(value))
|
168
|
+
#else
|
169
|
+
# define expect(expr,value) (expr)
|
170
|
+
#endif
|
171
|
+
|
172
|
+
#define expect_false(expr) expect ((expr) != 0, 0)
|
173
|
+
#define expect_true(expr) expect ((expr) != 0, 1)
|
174
|
+
|
175
|
+
/*****************************************************************************/
|
176
|
+
|
177
|
+
#define ETP_PRI_MIN EIO_PRI_MIN
|
178
|
+
#define ETP_PRI_MAX EIO_PRI_MAX
|
179
|
+
|
180
|
+
struct etp_worker;
|
181
|
+
|
182
|
+
#define ETP_REQ eio_req
|
183
|
+
#define ETP_DESTROY(req) eio_destroy (req)
|
184
|
+
static int eio_finish (eio_req *req);
|
185
|
+
#define ETP_FINISH(req) eio_finish (req)
|
186
|
+
static void eio_execute (struct etp_worker *self, eio_req *req);
|
187
|
+
#define ETP_EXECUTE(wrk,req) eio_execute (wrk,req)
|
188
|
+
|
189
|
+
#define ETP_WORKER_CLEAR(req) \
|
190
|
+
if (wrk->dbuf) \
|
191
|
+
{ \
|
192
|
+
free (wrk->dbuf); \
|
193
|
+
wrk->dbuf = 0; \
|
194
|
+
} \
|
195
|
+
\
|
196
|
+
if (wrk->dirp) \
|
197
|
+
{ \
|
198
|
+
closedir (wrk->dirp); \
|
199
|
+
wrk->dirp = 0; \
|
200
|
+
}
|
201
|
+
|
202
|
+
#define ETP_WORKER_COMMON \
|
203
|
+
void *dbuf; \
|
204
|
+
DIR *dirp;
|
205
|
+
|
206
|
+
/*****************************************************************************/
|
207
|
+
|
208
|
+
#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1)
|
209
|
+
|
210
|
+
/* calculate time difference in ~1/EIO_TICKS of a second */
|
211
|
+
static int tvdiff (struct timeval *tv1, struct timeval *tv2)
|
212
|
+
{
|
213
|
+
return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS
|
214
|
+
+ ((tv2->tv_usec - tv1->tv_usec) >> 10);
|
215
|
+
}
|
216
|
+
|
217
|
+
static unsigned int started, idle, wanted = 4;
|
218
|
+
|
219
|
+
static void (*want_poll_cb) (void);
|
220
|
+
static void (*done_poll_cb) (void);
|
221
|
+
|
222
|
+
static unsigned int max_poll_time; /* reslock */
|
223
|
+
static unsigned int max_poll_reqs; /* reslock */
|
224
|
+
|
225
|
+
static volatile unsigned int nreqs; /* reqlock */
|
226
|
+
static volatile unsigned int nready; /* reqlock */
|
227
|
+
static volatile unsigned int npending; /* reqlock */
|
228
|
+
static volatile unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */
|
229
|
+
static volatile unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */
|
230
|
+
|
231
|
+
static xmutex_t wrklock;
|
232
|
+
static xmutex_t reslock;
|
233
|
+
static xmutex_t reqlock;
|
234
|
+
static xcond_t reqwait;
|
235
|
+
|
236
|
+
#if !HAVE_PREADWRITE
|
237
|
+
/*
|
238
|
+
* make our pread/pwrite emulation safe against themselves, but not against
|
239
|
+
* normal read/write by using a mutex. slows down execution a lot,
|
240
|
+
* but that's your problem, not mine.
|
241
|
+
*/
|
242
|
+
static xmutex_t preadwritelock = X_MUTEX_INIT;
|
243
|
+
#endif
|
244
|
+
|
245
|
+
typedef struct etp_worker
|
246
|
+
{
|
247
|
+
/* locked by wrklock */
|
248
|
+
struct etp_worker *prev, *next;
|
249
|
+
|
250
|
+
xthread_t tid;
|
251
|
+
|
252
|
+
/* locked by reslock, reqlock or wrklock */
|
253
|
+
ETP_REQ *req; /* currently processed request */
|
254
|
+
|
255
|
+
ETP_WORKER_COMMON
|
256
|
+
} etp_worker;
|
257
|
+
|
258
|
+
static etp_worker wrk_first = { &wrk_first, &wrk_first, 0 }; /* NOT etp */
|
259
|
+
|
260
|
+
#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock)
|
261
|
+
#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock)
|
262
|
+
|
263
|
+
/* worker threads management */
|
264
|
+
|
265
|
+
static void etp_worker_clear (etp_worker *wrk)
|
266
|
+
{
|
267
|
+
ETP_WORKER_CLEAR (wrk);
|
268
|
+
}
|
269
|
+
|
270
|
+
static void etp_worker_free (etp_worker *wrk)
|
271
|
+
{
|
272
|
+
wrk->next->prev = wrk->prev;
|
273
|
+
wrk->prev->next = wrk->next;
|
274
|
+
|
275
|
+
free (wrk);
|
276
|
+
}
|
277
|
+
|
278
|
+
static unsigned int etp_nreqs (void)
|
279
|
+
{
|
280
|
+
int retval;
|
281
|
+
if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
|
282
|
+
retval = nreqs;
|
283
|
+
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
|
284
|
+
return retval;
|
285
|
+
}
|
286
|
+
|
287
|
+
static unsigned int etp_nready (void)
|
288
|
+
{
|
289
|
+
unsigned int retval;
|
290
|
+
|
291
|
+
if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
|
292
|
+
retval = nready;
|
293
|
+
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
|
294
|
+
|
295
|
+
return retval;
|
296
|
+
}
|
297
|
+
|
298
|
+
static unsigned int etp_npending (void)
|
299
|
+
{
|
300
|
+
unsigned int retval;
|
301
|
+
|
302
|
+
if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
|
303
|
+
retval = npending;
|
304
|
+
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
|
305
|
+
|
306
|
+
return retval;
|
307
|
+
}
|
308
|
+
|
309
|
+
static unsigned int etp_nthreads (void)
|
310
|
+
{
|
311
|
+
unsigned int retval;
|
312
|
+
|
313
|
+
if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
|
314
|
+
retval = started;
|
315
|
+
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
|
316
|
+
|
317
|
+
return retval;
|
318
|
+
}
|
319
|
+
|
320
|
+
/*
|
321
|
+
* a somewhat faster data structure might be nice, but
|
322
|
+
* with 8 priorities this actually needs <20 insns
|
323
|
+
* per shift, the most expensive operation.
|
324
|
+
*/
|
325
|
+
typedef struct {
|
326
|
+
ETP_REQ *qs[ETP_NUM_PRI], *qe[ETP_NUM_PRI]; /* qstart, qend */
|
327
|
+
int size;
|
328
|
+
} etp_reqq;
|
329
|
+
|
330
|
+
static etp_reqq req_queue;
|
331
|
+
static etp_reqq res_queue;
|
332
|
+
|
333
|
+
static int reqq_push (etp_reqq *q, ETP_REQ *req)
|
334
|
+
{
|
335
|
+
int pri = req->pri;
|
336
|
+
req->next = 0;
|
337
|
+
|
338
|
+
if (q->qe[pri])
|
339
|
+
{
|
340
|
+
q->qe[pri]->next = req;
|
341
|
+
q->qe[pri] = req;
|
342
|
+
}
|
343
|
+
else
|
344
|
+
q->qe[pri] = q->qs[pri] = req;
|
345
|
+
|
346
|
+
return q->size++;
|
347
|
+
}
|
348
|
+
|
349
|
+
static ETP_REQ *reqq_shift (etp_reqq *q)
|
350
|
+
{
|
351
|
+
int pri;
|
352
|
+
|
353
|
+
if (!q->size)
|
354
|
+
return 0;
|
355
|
+
|
356
|
+
--q->size;
|
357
|
+
|
358
|
+
for (pri = ETP_NUM_PRI; pri--; )
|
359
|
+
{
|
360
|
+
eio_req *req = q->qs[pri];
|
361
|
+
|
362
|
+
if (req)
|
363
|
+
{
|
364
|
+
if (!(q->qs[pri] = (eio_req *)req->next))
|
365
|
+
q->qe[pri] = 0;
|
366
|
+
|
367
|
+
return req;
|
368
|
+
}
|
369
|
+
}
|
370
|
+
|
371
|
+
abort ();
|
372
|
+
}
|
373
|
+
|
374
|
+
static void etp_thread_init (void)
|
375
|
+
{
|
376
|
+
X_MUTEX_CREATE (wrklock);
|
377
|
+
X_MUTEX_CREATE (reslock);
|
378
|
+
X_MUTEX_CREATE (reqlock);
|
379
|
+
X_COND_CREATE (reqwait);
|
380
|
+
}
|
381
|
+
|
382
|
+
static void etp_atfork_prepare (void)
|
383
|
+
{
|
384
|
+
X_LOCK (wrklock);
|
385
|
+
X_LOCK (reqlock);
|
386
|
+
X_LOCK (reslock);
|
387
|
+
#if !HAVE_PREADWRITE
|
388
|
+
X_LOCK (preadwritelock);
|
389
|
+
#endif
|
390
|
+
}
|
391
|
+
|
392
|
+
static void etp_atfork_parent (void)
|
393
|
+
{
|
394
|
+
#if !HAVE_PREADWRITE
|
395
|
+
X_UNLOCK (preadwritelock);
|
396
|
+
#endif
|
397
|
+
X_UNLOCK (reslock);
|
398
|
+
X_UNLOCK (reqlock);
|
399
|
+
X_UNLOCK (wrklock);
|
400
|
+
}
|
401
|
+
|
402
|
+
static void etp_atfork_child (void)
|
403
|
+
{
|
404
|
+
ETP_REQ *prv;
|
405
|
+
|
406
|
+
while ((prv = reqq_shift (&req_queue)))
|
407
|
+
ETP_DESTROY (prv);
|
408
|
+
|
409
|
+
while ((prv = reqq_shift (&res_queue)))
|
410
|
+
ETP_DESTROY (prv);
|
411
|
+
|
412
|
+
while (wrk_first.next != &wrk_first)
|
413
|
+
{
|
414
|
+
etp_worker *wrk = wrk_first.next;
|
415
|
+
|
416
|
+
if (wrk->req)
|
417
|
+
ETP_DESTROY (wrk->req);
|
418
|
+
|
419
|
+
etp_worker_clear (wrk);
|
420
|
+
etp_worker_free (wrk);
|
421
|
+
}
|
422
|
+
|
423
|
+
started = 0;
|
424
|
+
idle = 0;
|
425
|
+
nreqs = 0;
|
426
|
+
nready = 0;
|
427
|
+
npending = 0;
|
428
|
+
|
429
|
+
etp_thread_init ();
|
430
|
+
}
|
431
|
+
|
432
|
+
static void
|
433
|
+
etp_once_init (void)
|
434
|
+
{
|
435
|
+
etp_thread_init ();
|
436
|
+
X_THREAD_ATFORK (etp_atfork_prepare, etp_atfork_parent, etp_atfork_child);
|
437
|
+
}
|
438
|
+
|
439
|
+
static int
|
440
|
+
etp_init (void (*want_poll)(void), void (*done_poll)(void))
|
441
|
+
{
|
442
|
+
static pthread_once_t doinit = PTHREAD_ONCE_INIT;
|
443
|
+
|
444
|
+
pthread_once (&doinit, etp_once_init);
|
445
|
+
|
446
|
+
want_poll_cb = want_poll;
|
447
|
+
done_poll_cb = done_poll;
|
448
|
+
|
449
|
+
return 0;
|
450
|
+
}
|
451
|
+
|
452
|
+
X_THREAD_PROC (etp_proc);
|
453
|
+
|
454
|
+
static void etp_start_thread (void)
|
455
|
+
{
|
456
|
+
etp_worker *wrk = calloc (1, sizeof (etp_worker));
|
457
|
+
|
458
|
+
/*TODO*/
|
459
|
+
assert (("unable to allocate worker thread data", wrk));
|
460
|
+
|
461
|
+
X_LOCK (wrklock);
|
462
|
+
|
463
|
+
if (thread_create (&wrk->tid, etp_proc, (void *)wrk))
|
464
|
+
{
|
465
|
+
wrk->prev = &wrk_first;
|
466
|
+
wrk->next = wrk_first.next;
|
467
|
+
wrk_first.next->prev = wrk;
|
468
|
+
wrk_first.next = wrk;
|
469
|
+
++started;
|
470
|
+
}
|
471
|
+
else
|
472
|
+
free (wrk);
|
473
|
+
|
474
|
+
X_UNLOCK (wrklock);
|
475
|
+
}
|
476
|
+
|
477
|
+
static void etp_maybe_start_thread (void)
|
478
|
+
{
|
479
|
+
if (expect_true (etp_nthreads () >= wanted))
|
480
|
+
return;
|
481
|
+
|
482
|
+
/* todo: maybe use idle here, but might be less exact */
|
483
|
+
if (expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ()))
|
484
|
+
return;
|
485
|
+
|
486
|
+
etp_start_thread ();
|
487
|
+
}
|
488
|
+
|
489
|
+
static void etp_end_thread (void)
|
490
|
+
{
|
491
|
+
eio_req *req = calloc (1, sizeof (eio_req));
|
492
|
+
|
493
|
+
req->type = -1;
|
494
|
+
req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
|
495
|
+
|
496
|
+
X_LOCK (reqlock);
|
497
|
+
reqq_push (&req_queue, req);
|
498
|
+
X_COND_SIGNAL (reqwait);
|
499
|
+
X_UNLOCK (reqlock);
|
500
|
+
|
501
|
+
X_LOCK (wrklock);
|
502
|
+
--started;
|
503
|
+
X_UNLOCK (wrklock);
|
504
|
+
}
|
505
|
+
|
506
|
+
static int etp_poll (void)
|
507
|
+
{
|
508
|
+
unsigned int maxreqs;
|
509
|
+
unsigned int maxtime;
|
510
|
+
struct timeval tv_start, tv_now;
|
511
|
+
|
512
|
+
X_LOCK (reslock);
|
513
|
+
maxreqs = max_poll_reqs;
|
514
|
+
maxtime = max_poll_time;
|
515
|
+
X_UNLOCK (reslock);
|
516
|
+
|
517
|
+
if (maxtime)
|
518
|
+
gettimeofday (&tv_start, 0);
|
519
|
+
|
520
|
+
for (;;)
|
521
|
+
{
|
522
|
+
ETP_REQ *req;
|
523
|
+
|
524
|
+
etp_maybe_start_thread ();
|
525
|
+
|
526
|
+
X_LOCK (reslock);
|
527
|
+
req = reqq_shift (&res_queue);
|
528
|
+
|
529
|
+
if (req)
|
530
|
+
{
|
531
|
+
--npending;
|
532
|
+
|
533
|
+
if (!res_queue.size && done_poll_cb)
|
534
|
+
done_poll_cb ();
|
535
|
+
}
|
536
|
+
|
537
|
+
X_UNLOCK (reslock);
|
538
|
+
|
539
|
+
if (!req)
|
540
|
+
return 0;
|
541
|
+
|
542
|
+
X_LOCK (reqlock);
|
543
|
+
--nreqs;
|
544
|
+
X_UNLOCK (reqlock);
|
545
|
+
|
546
|
+
if (expect_false (req->type == EIO_GROUP && req->size))
|
547
|
+
{
|
548
|
+
req->int1 = 1; /* mark request as delayed */
|
549
|
+
continue;
|
550
|
+
}
|
551
|
+
else
|
552
|
+
{
|
553
|
+
int res = ETP_FINISH (req);
|
554
|
+
if (expect_false (res))
|
555
|
+
return res;
|
556
|
+
}
|
557
|
+
|
558
|
+
if (expect_false (maxreqs && !--maxreqs))
|
559
|
+
break;
|
560
|
+
|
561
|
+
if (maxtime)
|
562
|
+
{
|
563
|
+
gettimeofday (&tv_now, 0);
|
564
|
+
|
565
|
+
if (tvdiff (&tv_start, &tv_now) >= maxtime)
|
566
|
+
break;
|
567
|
+
}
|
568
|
+
}
|
569
|
+
|
570
|
+
errno = EAGAIN;
|
571
|
+
return -1;
|
572
|
+
}
|
573
|
+
|
574
|
+
static void etp_cancel (ETP_REQ *req)
|
575
|
+
{
|
576
|
+
X_LOCK (wrklock);
|
577
|
+
req->flags |= EIO_FLAG_CANCELLED;
|
578
|
+
X_UNLOCK (wrklock);
|
579
|
+
|
580
|
+
eio_grp_cancel (req);
|
581
|
+
}
|
582
|
+
|
583
|
+
static void etp_submit (ETP_REQ *req)
|
584
|
+
{
|
585
|
+
req->pri -= ETP_PRI_MIN;
|
586
|
+
|
587
|
+
if (expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN;
|
588
|
+
if (expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
|
589
|
+
|
590
|
+
if (expect_false (req->type == EIO_GROUP))
|
591
|
+
{
|
592
|
+
/* I hope this is worth it :/ */
|
593
|
+
X_LOCK (reqlock);
|
594
|
+
++nreqs;
|
595
|
+
X_UNLOCK (reqlock);
|
596
|
+
|
597
|
+
X_LOCK (reslock);
|
598
|
+
|
599
|
+
++npending;
|
600
|
+
|
601
|
+
if (!reqq_push (&res_queue, req) && want_poll_cb)
|
602
|
+
want_poll_cb ();
|
603
|
+
|
604
|
+
X_UNLOCK (reslock);
|
605
|
+
}
|
606
|
+
else
|
607
|
+
{
|
608
|
+
X_LOCK (reqlock);
|
609
|
+
++nreqs;
|
610
|
+
++nready;
|
611
|
+
reqq_push (&req_queue, req);
|
612
|
+
X_COND_SIGNAL (reqwait);
|
613
|
+
X_UNLOCK (reqlock);
|
614
|
+
|
615
|
+
etp_maybe_start_thread ();
|
616
|
+
}
|
617
|
+
}
|
618
|
+
|
619
|
+
static void etp_set_max_poll_time (double nseconds)
|
620
|
+
{
|
621
|
+
if (WORDACCESS_UNSAFE) X_LOCK (reslock);
|
622
|
+
max_poll_time = nseconds * EIO_TICKS;
|
623
|
+
if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
|
624
|
+
}
|
625
|
+
|
626
|
+
static void etp_set_max_poll_reqs (unsigned int maxreqs)
|
627
|
+
{
|
628
|
+
if (WORDACCESS_UNSAFE) X_LOCK (reslock);
|
629
|
+
max_poll_reqs = maxreqs;
|
630
|
+
if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
|
631
|
+
}
|
632
|
+
|
633
|
+
static void etp_set_max_idle (unsigned int nthreads)
|
634
|
+
{
|
635
|
+
if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
|
636
|
+
max_idle = nthreads;
|
637
|
+
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
|
638
|
+
}
|
639
|
+
|
640
|
+
static void etp_set_idle_timeout (unsigned int seconds)
|
641
|
+
{
|
642
|
+
if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
|
643
|
+
idle_timeout = seconds;
|
644
|
+
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
|
645
|
+
}
|
646
|
+
|
647
|
+
static void etp_set_min_parallel (unsigned int nthreads)
|
648
|
+
{
|
649
|
+
if (wanted < nthreads)
|
650
|
+
wanted = nthreads;
|
651
|
+
}
|
652
|
+
|
653
|
+
static void etp_set_max_parallel (unsigned int nthreads)
|
654
|
+
{
|
655
|
+
if (wanted > nthreads)
|
656
|
+
wanted = nthreads;
|
657
|
+
|
658
|
+
while (started > wanted)
|
659
|
+
etp_end_thread ();
|
660
|
+
}
|
661
|
+
|
662
|
+
/*****************************************************************************/
|
663
|
+
|
664
|
+
static void grp_try_feed (eio_req *grp)
|
665
|
+
{
|
666
|
+
while (grp->size < grp->int2 && !EIO_CANCELLED (grp))
|
667
|
+
{
|
668
|
+
grp->flags &= ~EIO_FLAG_GROUPADD;
|
669
|
+
|
670
|
+
EIO_FEED (grp);
|
671
|
+
|
672
|
+
/* stop if no progress has been made */
|
673
|
+
if (!(grp->flags & EIO_FLAG_GROUPADD))
|
674
|
+
{
|
675
|
+
grp->feed = 0;
|
676
|
+
break;
|
677
|
+
}
|
678
|
+
}
|
679
|
+
}
|
680
|
+
|
681
|
+
static int grp_dec (eio_req *grp)
|
682
|
+
{
|
683
|
+
--grp->size;
|
684
|
+
|
685
|
+
/* call feeder, if applicable */
|
686
|
+
grp_try_feed (grp);
|
687
|
+
|
688
|
+
/* finish, if done */
|
689
|
+
if (!grp->size && grp->int1)
|
690
|
+
return eio_finish (grp);
|
691
|
+
else
|
692
|
+
return 0;
|
693
|
+
}
|
694
|
+
|
695
|
+
void eio_destroy (eio_req *req)
|
696
|
+
{
|
697
|
+
if ((req)->flags & EIO_FLAG_PTR1_FREE) free (req->ptr1);
|
698
|
+
if ((req)->flags & EIO_FLAG_PTR2_FREE) free (req->ptr2);
|
699
|
+
|
700
|
+
EIO_DESTROY (req);
|
701
|
+
}
|
702
|
+
|
703
|
+
static int eio_finish (eio_req *req)
|
704
|
+
{
|
705
|
+
int res = EIO_FINISH (req);
|
706
|
+
|
707
|
+
if (req->grp)
|
708
|
+
{
|
709
|
+
int res2;
|
710
|
+
eio_req *grp = req->grp;
|
711
|
+
|
712
|
+
/* unlink request */
|
713
|
+
if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
|
714
|
+
if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
|
715
|
+
|
716
|
+
if (grp->grp_first == req)
|
717
|
+
grp->grp_first = req->grp_next;
|
718
|
+
|
719
|
+
res2 = grp_dec (grp);
|
720
|
+
|
721
|
+
if (!res && res2)
|
722
|
+
res = res2;
|
723
|
+
}
|
724
|
+
|
725
|
+
eio_destroy (req);
|
726
|
+
|
727
|
+
return res;
|
728
|
+
}
|
729
|
+
|
730
|
+
void eio_grp_cancel (eio_req *grp)
|
731
|
+
{
|
732
|
+
for (grp = grp->grp_first; grp; grp = grp->grp_next)
|
733
|
+
eio_cancel (grp);
|
734
|
+
}
|
735
|
+
|
736
|
+
void eio_cancel (eio_req *req)
|
737
|
+
{
|
738
|
+
etp_cancel (req);
|
739
|
+
}
|
740
|
+
|
741
|
+
void eio_submit (eio_req *req)
|
742
|
+
{
|
743
|
+
etp_submit (req);
|
744
|
+
}
|
745
|
+
|
746
|
+
unsigned int eio_nreqs (void)
|
747
|
+
{
|
748
|
+
return etp_nreqs ();
|
749
|
+
}
|
750
|
+
|
751
|
+
unsigned int eio_nready (void)
|
752
|
+
{
|
753
|
+
return etp_nready ();
|
754
|
+
}
|
755
|
+
|
756
|
+
unsigned int eio_npending (void)
|
757
|
+
{
|
758
|
+
return etp_npending ();
|
759
|
+
}
|
760
|
+
|
761
|
+
unsigned int eio_nthreads (void)
|
762
|
+
{
|
763
|
+
return etp_nthreads ();
|
764
|
+
}
|
765
|
+
|
766
|
+
void eio_set_max_poll_time (double nseconds)
|
767
|
+
{
|
768
|
+
etp_set_max_poll_time (nseconds);
|
769
|
+
}
|
770
|
+
|
771
|
+
void eio_set_max_poll_reqs (unsigned int maxreqs)
|
772
|
+
{
|
773
|
+
etp_set_max_poll_reqs (maxreqs);
|
774
|
+
}
|
775
|
+
|
776
|
+
void eio_set_max_idle (unsigned int nthreads)
|
777
|
+
{
|
778
|
+
etp_set_max_idle (nthreads);
|
779
|
+
}
|
780
|
+
|
781
|
+
void eio_set_idle_timeout (unsigned int seconds)
|
782
|
+
{
|
783
|
+
etp_set_idle_timeout (seconds);
|
784
|
+
}
|
785
|
+
|
786
|
+
void eio_set_min_parallel (unsigned int nthreads)
|
787
|
+
{
|
788
|
+
etp_set_min_parallel (nthreads);
|
789
|
+
}
|
790
|
+
|
791
|
+
void eio_set_max_parallel (unsigned int nthreads)
|
792
|
+
{
|
793
|
+
etp_set_max_parallel (nthreads);
|
794
|
+
}
|
795
|
+
|
796
|
+
int eio_poll (void)
|
797
|
+
{
|
798
|
+
return etp_poll ();
|
799
|
+
}
|
800
|
+
|
801
|
+
/*****************************************************************************/
|
802
|
+
/* work around various missing functions */
|
803
|
+
|
804
|
+
#if !HAVE_PREADWRITE
|
805
|
+
# undef pread
|
806
|
+
# undef pwrite
|
807
|
+
# define pread eio__pread
|
808
|
+
# define pwrite eio__pwrite
|
809
|
+
|
810
|
+
static ssize_t
|
811
|
+
eio__pread (int fd, void *buf, size_t count, off_t offset)
|
812
|
+
{
|
813
|
+
ssize_t res;
|
814
|
+
off_t ooffset;
|
815
|
+
|
816
|
+
X_LOCK (preadwritelock);
|
817
|
+
ooffset = lseek (fd, 0, SEEK_CUR);
|
818
|
+
lseek (fd, offset, SEEK_SET);
|
819
|
+
res = read (fd, buf, count);
|
820
|
+
lseek (fd, ooffset, SEEK_SET);
|
821
|
+
X_UNLOCK (preadwritelock);
|
822
|
+
|
823
|
+
return res;
|
824
|
+
}
|
825
|
+
|
826
|
+
static ssize_t
|
827
|
+
eio__pwrite (int fd, void *buf, size_t count, off_t offset)
|
828
|
+
{
|
829
|
+
ssize_t res;
|
830
|
+
off_t ooffset;
|
831
|
+
|
832
|
+
X_LOCK (preadwritelock);
|
833
|
+
ooffset = lseek (fd, 0, SEEK_CUR);
|
834
|
+
lseek (fd, offset, SEEK_SET);
|
835
|
+
res = write (fd, buf, count);
|
836
|
+
lseek (fd, ooffset, SEEK_SET);
|
837
|
+
X_UNLOCK (preadwritelock);
|
838
|
+
|
839
|
+
return res;
|
840
|
+
}
|
841
|
+
#endif
|
842
|
+
|
843
|
+
#ifndef HAVE_UTIMES
|
844
|
+
|
845
|
+
# undef utimes
|
846
|
+
# define utimes(path,times) eio__utimes (path, times)
|
847
|
+
|
848
|
+
static int
|
849
|
+
eio__utimes (const char *filename, const struct timeval times[2])
|
850
|
+
{
|
851
|
+
if (times)
|
852
|
+
{
|
853
|
+
struct utimbuf buf;
|
854
|
+
|
855
|
+
buf.actime = times[0].tv_sec;
|
856
|
+
buf.modtime = times[1].tv_sec;
|
857
|
+
|
858
|
+
return utime (filename, &buf);
|
859
|
+
}
|
860
|
+
else
|
861
|
+
return utime (filename, 0);
|
862
|
+
}
|
863
|
+
|
864
|
+
#endif
|
865
|
+
|
866
|
+
#ifndef HAVE_FUTIMES
|
867
|
+
|
868
|
+
# undef futimes
|
869
|
+
# define futimes(fd,times) eio__futimes (fd, times)
|
870
|
+
|
871
|
+
static int eio__futimes (int fd, const struct timeval tv[2])
|
872
|
+
{
|
873
|
+
errno = ENOSYS;
|
874
|
+
return -1;
|
875
|
+
}
|
876
|
+
|
877
|
+
#endif
|
878
|
+
|
879
|
+
#if !HAVE_FDATASYNC
|
880
|
+
# undef fdatasync
|
881
|
+
# define fdatasync(fd) fsync (fd)
|
882
|
+
#endif
|
883
|
+
|
884
|
+
/* sync_file_range always needs emulation */
|
885
|
+
int
|
886
|
+
eio__sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags)
|
887
|
+
{
|
888
|
+
#if HAVE_SYNC_FILE_RANGE
|
889
|
+
int res;
|
890
|
+
|
891
|
+
if (EIO_SYNC_FILE_RANGE_WAIT_BEFORE != SYNC_FILE_RANGE_WAIT_BEFORE
|
892
|
+
|| EIO_SYNC_FILE_RANGE_WRITE != SYNC_FILE_RANGE_WRITE
|
893
|
+
|| EIO_SYNC_FILE_RANGE_WAIT_AFTER != SYNC_FILE_RANGE_WAIT_AFTER)
|
894
|
+
{
|
895
|
+
flags = 0
|
896
|
+
| (flags & EIO_SYNC_FILE_RANGE_WAIT_BEFORE ? SYNC_FILE_RANGE_WAIT_BEFORE : 0)
|
897
|
+
| (flags & EIO_SYNC_FILE_RANGE_WRITE ? SYNC_FILE_RANGE_WRITE : 0)
|
898
|
+
| (flags & EIO_SYNC_FILE_RANGE_WAIT_AFTER ? SYNC_FILE_RANGE_WAIT_AFTER : 0);
|
899
|
+
}
|
900
|
+
|
901
|
+
res = sync_file_range (fd, offset, nbytes, flags);
|
902
|
+
|
903
|
+
if (!res || errno != ENOSYS)
|
904
|
+
return res;
|
905
|
+
#endif
|
906
|
+
|
907
|
+
/* even though we could play tricks with the flags, it's better to always
|
908
|
+
* call fdatasync, as that matches the expectation of its users best */
|
909
|
+
return fdatasync (fd);
|
910
|
+
}
|
911
|
+
|
912
|
+
#if !HAVE_READAHEAD
|
913
|
+
# undef readahead
|
914
|
+
# define readahead(fd,offset,count) eio__readahead (fd, offset, count, self)
|
915
|
+
|
916
|
+
static ssize_t
|
917
|
+
eio__readahead (int fd, off_t offset, size_t count, etp_worker *self)
|
918
|
+
{
|
919
|
+
size_t todo = count;
|
920
|
+
dBUF;
|
921
|
+
|
922
|
+
while (todo > 0)
|
923
|
+
{
|
924
|
+
size_t len = todo < EIO_BUFSIZE ? todo : EIO_BUFSIZE;
|
925
|
+
|
926
|
+
pread (fd, eio_buf, len, offset);
|
927
|
+
offset += len;
|
928
|
+
todo -= len;
|
929
|
+
}
|
930
|
+
|
931
|
+
errno = 0;
|
932
|
+
return count;
|
933
|
+
}
|
934
|
+
|
935
|
+
#endif
|
936
|
+
|
937
|
+
/* sendfile always needs emulation */
|
938
|
+
static ssize_t
|
939
|
+
eio__sendfile (int ofd, int ifd, off_t offset, size_t count, etp_worker *self)
|
940
|
+
{
|
941
|
+
ssize_t res;
|
942
|
+
|
943
|
+
if (!count)
|
944
|
+
return 0;
|
945
|
+
|
946
|
+
#if HAVE_SENDFILE
|
947
|
+
# if __linux
|
948
|
+
res = sendfile (ofd, ifd, &offset, count);
|
949
|
+
|
950
|
+
# elif __FreeBSD__
|
951
|
+
/*
|
952
|
+
* Of course, the freebsd sendfile is a dire hack with no thoughts
|
953
|
+
* wasted on making it similar to other I/O functions.
|
954
|
+
*/
|
955
|
+
{
|
956
|
+
off_t sbytes;
|
957
|
+
res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
|
958
|
+
|
959
|
+
#if 0 /* according to the manpage, this is correct, but broken behaviour */
|
960
|
+
/* freebsd' sendfile will return 0 on success */
|
961
|
+
/* freebsd 8 documents it as only setting *sbytes on EINTR and EAGAIN, but */
|
962
|
+
/* not on e.g. EIO or EPIPE - sounds broken */
|
963
|
+
if ((res < 0 && (errno == EAGAIN || errno == EINTR) && sbytes) || res == 0)
|
964
|
+
res = sbytes;
|
965
|
+
#endif
|
966
|
+
|
967
|
+
/* according to source inspection, this is correct, and useful behaviour */
|
968
|
+
if (sbytes)
|
969
|
+
res = sbytes;
|
970
|
+
}
|
971
|
+
|
972
|
+
# elif defined (__APPLE__)
|
973
|
+
|
974
|
+
{
|
975
|
+
off_t sbytes = count;
|
976
|
+
res = sendfile (ifd, ofd, offset, &sbytes, 0, 0);
|
977
|
+
|
978
|
+
/* according to the manpage, sbytes is always valid */
|
979
|
+
if (sbytes)
|
980
|
+
res = sbytes;
|
981
|
+
}
|
982
|
+
|
983
|
+
# elif __hpux
|
984
|
+
res = sendfile (ofd, ifd, offset, count, 0, 0);
|
985
|
+
|
986
|
+
# elif __solaris
|
987
|
+
{
|
988
|
+
struct sendfilevec vec;
|
989
|
+
size_t sbytes;
|
990
|
+
|
991
|
+
vec.sfv_fd = ifd;
|
992
|
+
vec.sfv_flag = 0;
|
993
|
+
vec.sfv_off = offset;
|
994
|
+
vec.sfv_len = count;
|
995
|
+
|
996
|
+
res = sendfilev (ofd, &vec, 1, &sbytes);
|
997
|
+
|
998
|
+
if (res < 0 && sbytes)
|
999
|
+
res = sbytes;
|
1000
|
+
}
|
1001
|
+
|
1002
|
+
# endif
|
1003
|
+
|
1004
|
+
#elif defined (_WIN32)
|
1005
|
+
|
1006
|
+
/* does not work, just for documentation of what would need to be done */
|
1007
|
+
{
|
1008
|
+
HANDLE h = TO_SOCKET (ifd);
|
1009
|
+
SetFilePointer (h, offset, 0, FILE_BEGIN);
|
1010
|
+
res = TransmitFile (TO_SOCKET (ofd), h, count, 0, 0, 0, 0);
|
1011
|
+
}
|
1012
|
+
|
1013
|
+
#else
|
1014
|
+
res = -1;
|
1015
|
+
errno = ENOSYS;
|
1016
|
+
#endif
|
1017
|
+
|
1018
|
+
if (res < 0
|
1019
|
+
&& (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
|
1020
|
+
/* BSDs */
|
1021
|
+
#ifdef ENOTSUP /* sigh, if the steenking pile called openbsd would only try to at least compile posix code... */
|
1022
|
+
|| errno == ENOTSUP
|
1023
|
+
#endif
|
1024
|
+
|| errno == EOPNOTSUPP /* BSDs */
|
1025
|
+
#if __solaris
|
1026
|
+
|| errno == EAFNOSUPPORT || errno == EPROTOTYPE
|
1027
|
+
#endif
|
1028
|
+
)
|
1029
|
+
)
|
1030
|
+
{
|
1031
|
+
/* emulate sendfile. this is a major pain in the ass */
|
1032
|
+
dBUF;
|
1033
|
+
|
1034
|
+
res = 0;
|
1035
|
+
|
1036
|
+
while (count)
|
1037
|
+
{
|
1038
|
+
ssize_t cnt;
|
1039
|
+
|
1040
|
+
cnt = pread (ifd, eio_buf, count > EIO_BUFSIZE ? EIO_BUFSIZE : count, offset);
|
1041
|
+
|
1042
|
+
if (cnt <= 0)
|
1043
|
+
{
|
1044
|
+
if (cnt && !res) res = -1;
|
1045
|
+
break;
|
1046
|
+
}
|
1047
|
+
|
1048
|
+
cnt = write (ofd, eio_buf, cnt);
|
1049
|
+
|
1050
|
+
if (cnt <= 0)
|
1051
|
+
{
|
1052
|
+
if (cnt && !res) res = -1;
|
1053
|
+
break;
|
1054
|
+
}
|
1055
|
+
|
1056
|
+
offset += cnt;
|
1057
|
+
res += cnt;
|
1058
|
+
count -= cnt;
|
1059
|
+
}
|
1060
|
+
}
|
1061
|
+
|
1062
|
+
return res;
|
1063
|
+
}
|
1064
|
+
|
1065
|
+
static signed char
|
1066
|
+
eio_dent_cmp (const eio_dirent *a, const eio_dirent *b)
|
1067
|
+
{
|
1068
|
+
return a->score - b->score ? a->score - b->score /* works because our signed char is always 0..100 */
|
1069
|
+
: a->inode < b->inode ? -1
|
1070
|
+
: a->inode > b->inode ? 1
|
1071
|
+
: 0;
|
1072
|
+
}
|
1073
|
+
|
1074
|
+
#define EIO_DENT_CMP(i,op,j) eio_dent_cmp (&i, &j) op 0
|
1075
|
+
|
1076
|
+
#define EIO_SORT_CUTOFF 30 /* quite high, but performs well on many filesystems */
|
1077
|
+
#define EIO_SORT_FAST 60 /* when to only use insertion sort */
|
1078
|
+
|
1079
|
+
static void
|
1080
|
+
eio_dent_radix_sort (eio_dirent *dents, int size, signed char score_bits, ino_t inode_bits)
|
1081
|
+
{
|
1082
|
+
unsigned char bits [9 + sizeof (ino_t) * 8];
|
1083
|
+
unsigned char *bit = bits;
|
1084
|
+
|
1085
|
+
assert (CHAR_BIT == 8);
|
1086
|
+
assert (sizeof (eio_dirent) * 8 < 256);
|
1087
|
+
assert (offsetof (eio_dirent, inode)); /* we use bit #0 as sentinel */
|
1088
|
+
assert (offsetof (eio_dirent, score)); /* we use bit #0 as sentinel */
|
1089
|
+
|
1090
|
+
if (size <= EIO_SORT_FAST)
|
1091
|
+
return;
|
1092
|
+
|
1093
|
+
/* first prepare an array of bits to test in our radix sort */
|
1094
|
+
/* try to take endianness into account, as well as differences in ino_t sizes */
|
1095
|
+
/* inode_bits must contain all inodes ORed together */
|
1096
|
+
/* which is used to skip bits that are 0 everywhere, which is very common */
|
1097
|
+
{
|
1098
|
+
ino_t endianness;
|
1099
|
+
int i, j;
|
1100
|
+
|
1101
|
+
/* we store the byte offset of byte n into byte n of "endianness" */
|
1102
|
+
for (i = 0; i < sizeof (ino_t); ++i)
|
1103
|
+
((unsigned char *)&endianness)[i] = i;
|
1104
|
+
|
1105
|
+
*bit++ = 0;
|
1106
|
+
|
1107
|
+
for (i = 0; i < sizeof (ino_t); ++i)
|
1108
|
+
{
|
1109
|
+
/* shifting off the byte offsets out of "endianness" */
|
1110
|
+
int offs = (offsetof (eio_dirent, inode) + (endianness & 0xff)) * 8;
|
1111
|
+
endianness >>= 8;
|
1112
|
+
|
1113
|
+
for (j = 0; j < 8; ++j)
|
1114
|
+
if (inode_bits & (((ino_t)1) << (i * 8 + j)))
|
1115
|
+
*bit++ = offs + j;
|
1116
|
+
}
|
1117
|
+
|
1118
|
+
for (j = 0; j < 8; ++j)
|
1119
|
+
if (score_bits & (1 << j))
|
1120
|
+
*bit++ = offsetof (eio_dirent, score) * 8 + j;
|
1121
|
+
}
|
1122
|
+
|
1123
|
+
/* now actually do the sorting (a variant of MSD radix sort) */
|
1124
|
+
{
|
1125
|
+
eio_dirent *base_stk [9 + sizeof (ino_t) * 8], *base;
|
1126
|
+
eio_dirent *end_stk [9 + sizeof (ino_t) * 8], *end;
|
1127
|
+
unsigned char *bit_stk [9 + sizeof (ino_t) * 8];
|
1128
|
+
int stk_idx = 0;
|
1129
|
+
|
1130
|
+
base_stk [stk_idx] = dents;
|
1131
|
+
end_stk [stk_idx] = dents + size;
|
1132
|
+
bit_stk [stk_idx] = bit - 1;
|
1133
|
+
|
1134
|
+
do
|
1135
|
+
{
|
1136
|
+
base = base_stk [stk_idx];
|
1137
|
+
end = end_stk [stk_idx];
|
1138
|
+
bit = bit_stk [stk_idx];
|
1139
|
+
|
1140
|
+
for (;;)
|
1141
|
+
{
|
1142
|
+
unsigned char O = *bit >> 3;
|
1143
|
+
unsigned char M = 1 << (*bit & 7);
|
1144
|
+
|
1145
|
+
eio_dirent *a = base;
|
1146
|
+
eio_dirent *b = end;
|
1147
|
+
|
1148
|
+
if (b - a < EIO_SORT_CUTOFF)
|
1149
|
+
break;
|
1150
|
+
|
1151
|
+
/* now bit-partition the array on the bit */
|
1152
|
+
/* this ugly asymmetric loop seems to perform much better than typical */
|
1153
|
+
/* partition algos found in the literature */
|
1154
|
+
do
|
1155
|
+
if (!(((unsigned char *)a)[O] & M))
|
1156
|
+
++a;
|
1157
|
+
else if (!(((unsigned char *)--b)[O] & M))
|
1158
|
+
{
|
1159
|
+
eio_dirent tmp = *a; *a = *b; *b = tmp;
|
1160
|
+
++a;
|
1161
|
+
}
|
1162
|
+
while (b > a);
|
1163
|
+
|
1164
|
+
/* next bit, or stop, if no bits left in this path */
|
1165
|
+
if (!*--bit)
|
1166
|
+
break;
|
1167
|
+
|
1168
|
+
base_stk [stk_idx] = a;
|
1169
|
+
end_stk [stk_idx] = end;
|
1170
|
+
bit_stk [stk_idx] = bit;
|
1171
|
+
++stk_idx;
|
1172
|
+
|
1173
|
+
end = a;
|
1174
|
+
}
|
1175
|
+
}
|
1176
|
+
while (stk_idx--);
|
1177
|
+
}
|
1178
|
+
}
|
1179
|
+
|
1180
|
+
static void
|
1181
|
+
eio_dent_insertion_sort (eio_dirent *dents, int size)
|
1182
|
+
{
|
1183
|
+
/* first move the smallest element to the front, to act as a sentinel */
|
1184
|
+
{
|
1185
|
+
int i;
|
1186
|
+
eio_dirent *min = dents;
|
1187
|
+
|
1188
|
+
/* the radix pre-pass ensures that the minimum element is in the first EIO_SORT_CUTOFF + 1 elements */
|
1189
|
+
for (i = size > EIO_SORT_FAST ? EIO_SORT_CUTOFF + 1 : size; --i; )
|
1190
|
+
if (EIO_DENT_CMP (dents [i], <, *min))
|
1191
|
+
min = &dents [i];
|
1192
|
+
|
1193
|
+
/* swap elements 0 and j (minimum) */
|
1194
|
+
{
|
1195
|
+
eio_dirent tmp = *dents; *dents = *min; *min = tmp;
|
1196
|
+
}
|
1197
|
+
}
|
1198
|
+
|
1199
|
+
/* then do standard insertion sort, assuming that all elements are >= dents [0] */
|
1200
|
+
{
|
1201
|
+
eio_dirent *i, *j;
|
1202
|
+
|
1203
|
+
for (i = dents + 1; i < dents + size; ++i)
|
1204
|
+
{
|
1205
|
+
eio_dirent value = *i;
|
1206
|
+
|
1207
|
+
for (j = i - 1; EIO_DENT_CMP (*j, >, value); --j)
|
1208
|
+
j [1] = j [0];
|
1209
|
+
|
1210
|
+
j [1] = value;
|
1211
|
+
}
|
1212
|
+
}
|
1213
|
+
}
|
1214
|
+
|
1215
|
+
static void
|
1216
|
+
eio_dent_sort (eio_dirent *dents, int size, signed char score_bits, ino_t inode_bits)
|
1217
|
+
{
|
1218
|
+
if (size <= 1)
|
1219
|
+
return; /* our insertion sort relies on size > 0 */
|
1220
|
+
|
1221
|
+
/* first we use a radix sort, but only for dirs >= EIO_SORT_FAST */
|
1222
|
+
/* and stop sorting when the partitions are <= EIO_SORT_CUTOFF */
|
1223
|
+
eio_dent_radix_sort (dents, size, score_bits, inode_bits);
|
1224
|
+
|
1225
|
+
/* use an insertion sort at the end, or for small arrays, */
|
1226
|
+
/* as insertion sort is more efficient for small partitions */
|
1227
|
+
eio_dent_insertion_sort (dents, size);
|
1228
|
+
}
|
1229
|
+
|
1230
|
+
/* read a full directory */
|
1231
|
+
static void
|
1232
|
+
eio__scandir (eio_req *req, etp_worker *self)
|
1233
|
+
{
|
1234
|
+
DIR *dirp;
|
1235
|
+
EIO_STRUCT_DIRENT *entp;
|
1236
|
+
char *name, *names;
|
1237
|
+
int namesalloc = 4096;
|
1238
|
+
int namesoffs = 0;
|
1239
|
+
int flags = req->int1;
|
1240
|
+
eio_dirent *dents = 0;
|
1241
|
+
int dentalloc = 128;
|
1242
|
+
int dentoffs = 0;
|
1243
|
+
ino_t inode_bits = 0;
|
1244
|
+
|
1245
|
+
req->result = -1;
|
1246
|
+
|
1247
|
+
if (!(flags & EIO_READDIR_DENTS))
|
1248
|
+
flags &= ~(EIO_READDIR_DIRS_FIRST | EIO_READDIR_STAT_ORDER);
|
1249
|
+
|
1250
|
+
X_LOCK (wrklock);
|
1251
|
+
/* the corresponding closedir is in ETP_WORKER_CLEAR */
|
1252
|
+
self->dirp = dirp = opendir (req->ptr1);
|
1253
|
+
|
1254
|
+
req->flags |= EIO_FLAG_PTR1_FREE | EIO_FLAG_PTR2_FREE;
|
1255
|
+
req->ptr1 = dents = flags ? malloc (dentalloc * sizeof (eio_dirent)) : 0;
|
1256
|
+
req->ptr2 = names = malloc (namesalloc);
|
1257
|
+
X_UNLOCK (wrklock);
|
1258
|
+
|
1259
|
+
if (dirp && names && (!flags || dents))
|
1260
|
+
for (;;)
|
1261
|
+
{
|
1262
|
+
errno = 0;
|
1263
|
+
entp = readdir (dirp);
|
1264
|
+
|
1265
|
+
if (!entp)
|
1266
|
+
{
|
1267
|
+
if (errno)
|
1268
|
+
break;
|
1269
|
+
|
1270
|
+
/* sort etc. */
|
1271
|
+
req->int1 = flags;
|
1272
|
+
req->result = dentoffs;
|
1273
|
+
|
1274
|
+
if (flags & EIO_READDIR_STAT_ORDER)
|
1275
|
+
eio_dent_sort (dents, dentoffs, flags & EIO_READDIR_DIRS_FIRST ? 7 : 0, inode_bits);
|
1276
|
+
else if (flags & EIO_READDIR_DIRS_FIRST)
|
1277
|
+
if (flags & EIO_READDIR_FOUND_UNKNOWN)
|
1278
|
+
eio_dent_sort (dents, dentoffs, 7, inode_bits); /* sort by score and inode */
|
1279
|
+
else
|
1280
|
+
{
|
1281
|
+
/* in this case, all is known, and we just put dirs first and sort them */
|
1282
|
+
eio_dirent *oth = dents + dentoffs;
|
1283
|
+
eio_dirent *dir = dents;
|
1284
|
+
|
1285
|
+
/* now partition dirs to the front, and non-dirs to the back */
|
1286
|
+
/* by walking from both sides and swapping if necessary */
|
1287
|
+
while (oth > dir)
|
1288
|
+
{
|
1289
|
+
if (dir->type == EIO_DT_DIR)
|
1290
|
+
++dir;
|
1291
|
+
else if ((--oth)->type == EIO_DT_DIR)
|
1292
|
+
{
|
1293
|
+
eio_dirent tmp = *dir; *dir = *oth; *oth = tmp;
|
1294
|
+
|
1295
|
+
++dir;
|
1296
|
+
}
|
1297
|
+
}
|
1298
|
+
|
1299
|
+
/* now sort the dirs only (dirs all have the same score) */
|
1300
|
+
eio_dent_sort (dents, dir - dents, 0, inode_bits);
|
1301
|
+
}
|
1302
|
+
|
1303
|
+
break;
|
1304
|
+
}
|
1305
|
+
|
1306
|
+
/* now add the entry to our list(s) */
|
1307
|
+
name = entp->d_name;
|
1308
|
+
|
1309
|
+
/* skip . and .. entries */
|
1310
|
+
if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
|
1311
|
+
{
|
1312
|
+
int len = D_NAMLEN (entp) + 1;
|
1313
|
+
|
1314
|
+
while (expect_false (namesoffs + len > namesalloc))
|
1315
|
+
{
|
1316
|
+
namesalloc *= 2;
|
1317
|
+
X_LOCK (wrklock);
|
1318
|
+
req->ptr2 = names = realloc (names, namesalloc);
|
1319
|
+
X_UNLOCK (wrklock);
|
1320
|
+
|
1321
|
+
if (!names)
|
1322
|
+
break;
|
1323
|
+
}
|
1324
|
+
|
1325
|
+
memcpy (names + namesoffs, name, len);
|
1326
|
+
|
1327
|
+
if (dents)
|
1328
|
+
{
|
1329
|
+
struct eio_dirent *ent;
|
1330
|
+
|
1331
|
+
if (expect_false (dentoffs == dentalloc))
|
1332
|
+
{
|
1333
|
+
dentalloc *= 2;
|
1334
|
+
X_LOCK (wrklock);
|
1335
|
+
req->ptr1 = dents = realloc (dents, dentalloc * sizeof (eio_dirent));
|
1336
|
+
X_UNLOCK (wrklock);
|
1337
|
+
|
1338
|
+
if (!dents)
|
1339
|
+
break;
|
1340
|
+
}
|
1341
|
+
|
1342
|
+
ent = dents + dentoffs;
|
1343
|
+
|
1344
|
+
ent->nameofs = namesoffs; /* rather dirtily we store the offset in the pointer */
|
1345
|
+
ent->namelen = len - 1;
|
1346
|
+
ent->inode = D_INO (entp);
|
1347
|
+
|
1348
|
+
inode_bits |= ent->inode;
|
1349
|
+
|
1350
|
+
switch (D_TYPE (entp))
|
1351
|
+
{
|
1352
|
+
default:
|
1353
|
+
ent->type = EIO_DT_UNKNOWN;
|
1354
|
+
flags |= EIO_READDIR_FOUND_UNKNOWN;
|
1355
|
+
break;
|
1356
|
+
|
1357
|
+
#ifdef DT_FIFO
|
1358
|
+
case DT_FIFO: ent->type = EIO_DT_FIFO; break;
|
1359
|
+
#endif
|
1360
|
+
#ifdef DT_CHR
|
1361
|
+
case DT_CHR: ent->type = EIO_DT_CHR; break;
|
1362
|
+
#endif
|
1363
|
+
#ifdef DT_MPC
|
1364
|
+
case DT_MPC: ent->type = EIO_DT_MPC; break;
|
1365
|
+
#endif
|
1366
|
+
#ifdef DT_DIR
|
1367
|
+
case DT_DIR: ent->type = EIO_DT_DIR; break;
|
1368
|
+
#endif
|
1369
|
+
#ifdef DT_NAM
|
1370
|
+
case DT_NAM: ent->type = EIO_DT_NAM; break;
|
1371
|
+
#endif
|
1372
|
+
#ifdef DT_BLK
|
1373
|
+
case DT_BLK: ent->type = EIO_DT_BLK; break;
|
1374
|
+
#endif
|
1375
|
+
#ifdef DT_MPB
|
1376
|
+
case DT_MPB: ent->type = EIO_DT_MPB; break;
|
1377
|
+
#endif
|
1378
|
+
#ifdef DT_REG
|
1379
|
+
case DT_REG: ent->type = EIO_DT_REG; break;
|
1380
|
+
#endif
|
1381
|
+
#ifdef DT_NWK
|
1382
|
+
case DT_NWK: ent->type = EIO_DT_NWK; break;
|
1383
|
+
#endif
|
1384
|
+
#ifdef DT_CMP
|
1385
|
+
case DT_CMP: ent->type = EIO_DT_CMP; break;
|
1386
|
+
#endif
|
1387
|
+
#ifdef DT_LNK
|
1388
|
+
case DT_LNK: ent->type = EIO_DT_LNK; break;
|
1389
|
+
#endif
|
1390
|
+
#ifdef DT_SOCK
|
1391
|
+
case DT_SOCK: ent->type = EIO_DT_SOCK; break;
|
1392
|
+
#endif
|
1393
|
+
#ifdef DT_DOOR
|
1394
|
+
case DT_DOOR: ent->type = EIO_DT_DOOR; break;
|
1395
|
+
#endif
|
1396
|
+
#ifdef DT_WHT
|
1397
|
+
case DT_WHT: ent->type = EIO_DT_WHT; break;
|
1398
|
+
#endif
|
1399
|
+
}
|
1400
|
+
|
1401
|
+
ent->score = 7;
|
1402
|
+
|
1403
|
+
if (flags & EIO_READDIR_DIRS_FIRST)
|
1404
|
+
{
|
1405
|
+
if (ent->type == EIO_DT_UNKNOWN)
|
1406
|
+
{
|
1407
|
+
if (*name == '.') /* leading dots are likely directories, and, in any case, rare */
|
1408
|
+
ent->score = 1;
|
1409
|
+
else if (!strchr (name, '.')) /* absense of dots indicate likely dirs */
|
1410
|
+
ent->score = len <= 2 ? 4 - len : len <= 4 ? 4 : len <= 7 ? 5 : 6; /* shorter == more likely dir, but avoid too many classes */
|
1411
|
+
}
|
1412
|
+
else if (ent->type == EIO_DT_DIR)
|
1413
|
+
ent->score = 0;
|
1414
|
+
}
|
1415
|
+
}
|
1416
|
+
|
1417
|
+
namesoffs += len;
|
1418
|
+
++dentoffs;
|
1419
|
+
}
|
1420
|
+
|
1421
|
+
if (EIO_CANCELLED (req))
|
1422
|
+
{
|
1423
|
+
errno = ECANCELED;
|
1424
|
+
break;
|
1425
|
+
}
|
1426
|
+
}
|
1427
|
+
}
|
1428
|
+
|
1429
|
+
#ifdef PAGESIZE
|
1430
|
+
# define eio_pagesize() PAGESIZE
|
1431
|
+
#else
|
1432
|
+
static intptr_t
|
1433
|
+
eio_pagesize (void)
|
1434
|
+
{
|
1435
|
+
static intptr_t page;
|
1436
|
+
|
1437
|
+
if (!page)
|
1438
|
+
page = sysconf (_SC_PAGESIZE);
|
1439
|
+
|
1440
|
+
return page;
|
1441
|
+
}
|
1442
|
+
#endif
|
1443
|
+
|
1444
|
+
static void
|
1445
|
+
eio_page_align (void **addr, size_t *length)
|
1446
|
+
{
|
1447
|
+
intptr_t mask = eio_pagesize () - 1;
|
1448
|
+
|
1449
|
+
/* round down addr */
|
1450
|
+
intptr_t adj = mask & (intptr_t)*addr;
|
1451
|
+
|
1452
|
+
*addr = (void *)((intptr_t)*addr - adj);
|
1453
|
+
*length += adj;
|
1454
|
+
|
1455
|
+
/* round up length */
|
1456
|
+
*length = (*length + mask) & ~mask;
|
1457
|
+
}
|
1458
|
+
|
1459
|
+
#if !_POSIX_MEMLOCK
|
1460
|
+
# define eio__mlockall(a) ((errno = ENOSYS), -1)
|
1461
|
+
#else
|
1462
|
+
|
1463
|
+
static int
|
1464
|
+
eio__mlockall (int flags)
|
1465
|
+
{
|
1466
|
+
#if __GLIBC__ == 2 && __GLIBC_MINOR__ <= 7
|
1467
|
+
extern int mallopt (int, int);
|
1468
|
+
mallopt (-6, 238); /* http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=473812 */
|
1469
|
+
#endif
|
1470
|
+
|
1471
|
+
if (EIO_MCL_CURRENT != MCL_CURRENT
|
1472
|
+
|| EIO_MCL_FUTURE != MCL_FUTURE)
|
1473
|
+
{
|
1474
|
+
flags = 0
|
1475
|
+
| (flags & EIO_MCL_CURRENT ? MCL_CURRENT : 0)
|
1476
|
+
| (flags & EIO_MCL_FUTURE ? MCL_FUTURE : 0);
|
1477
|
+
}
|
1478
|
+
|
1479
|
+
return mlockall (flags);
|
1480
|
+
}
|
1481
|
+
#endif
|
1482
|
+
|
1483
|
+
#if !_POSIX_MEMLOCK_RANGE
|
1484
|
+
# define eio__mlock(a,b) ((errno = ENOSYS), -1)
|
1485
|
+
#else
|
1486
|
+
|
1487
|
+
static int
|
1488
|
+
eio__mlock (void *addr, size_t length)
|
1489
|
+
{
|
1490
|
+
eio_page_align (&addr, &length);
|
1491
|
+
|
1492
|
+
return mlock (addr, length);
|
1493
|
+
}
|
1494
|
+
|
1495
|
+
#endif
|
1496
|
+
|
1497
|
+
#if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO)
|
1498
|
+
# define eio__msync(a,b,c) ((errno = ENOSYS), -1)
|
1499
|
+
#else
|
1500
|
+
|
1501
|
+
int
|
1502
|
+
eio__msync (void *mem, size_t len, int flags)
|
1503
|
+
{
|
1504
|
+
eio_page_align (&mem, &len);
|
1505
|
+
|
1506
|
+
if (EIO_MS_ASYNC != MS_SYNC
|
1507
|
+
|| EIO_MS_INVALIDATE != MS_INVALIDATE
|
1508
|
+
|| EIO_MS_SYNC != MS_SYNC)
|
1509
|
+
{
|
1510
|
+
flags = 0
|
1511
|
+
| (flags & EIO_MS_ASYNC ? MS_ASYNC : 0)
|
1512
|
+
| (flags & EIO_MS_INVALIDATE ? MS_INVALIDATE : 0)
|
1513
|
+
| (flags & EIO_MS_SYNC ? MS_SYNC : 0);
|
1514
|
+
}
|
1515
|
+
|
1516
|
+
return msync (mem, len, flags);
|
1517
|
+
}
|
1518
|
+
|
1519
|
+
#endif
|
1520
|
+
|
1521
|
+
int
|
1522
|
+
eio__mtouch (void *mem, size_t len, int flags)
|
1523
|
+
{
|
1524
|
+
eio_page_align (&mem, &len);
|
1525
|
+
|
1526
|
+
{
|
1527
|
+
intptr_t addr = (intptr_t)mem;
|
1528
|
+
intptr_t end = addr + len;
|
1529
|
+
intptr_t page = eio_pagesize ();
|
1530
|
+
|
1531
|
+
if (addr < end)
|
1532
|
+
if (flags & EIO_MT_MODIFY) /* modify */
|
1533
|
+
do { *((volatile sig_atomic_t *)addr) |= 0; } while ((addr += page) < len);
|
1534
|
+
else
|
1535
|
+
do { *((volatile sig_atomic_t *)addr) ; } while ((addr += page) < len);
|
1536
|
+
}
|
1537
|
+
|
1538
|
+
return 0;
|
1539
|
+
}
|
1540
|
+
|
1541
|
+
/*****************************************************************************/
|
1542
|
+
|
1543
|
+
#define ALLOC(len) \
|
1544
|
+
if (!req->ptr2) \
|
1545
|
+
{ \
|
1546
|
+
X_LOCK (wrklock); \
|
1547
|
+
req->flags |= EIO_FLAG_PTR2_FREE; \
|
1548
|
+
X_UNLOCK (wrklock); \
|
1549
|
+
req->ptr2 = malloc (len); \
|
1550
|
+
if (!req->ptr2) \
|
1551
|
+
{ \
|
1552
|
+
errno = ENOMEM; \
|
1553
|
+
req->result = -1; \
|
1554
|
+
break; \
|
1555
|
+
} \
|
1556
|
+
}
|
1557
|
+
|
1558
|
+
X_THREAD_PROC (etp_proc)
|
1559
|
+
{
|
1560
|
+
ETP_REQ *req;
|
1561
|
+
struct timespec ts;
|
1562
|
+
etp_worker *self = (etp_worker *)thr_arg;
|
1563
|
+
|
1564
|
+
/* try to distribute timeouts somewhat randomly */
|
1565
|
+
ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL);
|
1566
|
+
|
1567
|
+
for (;;)
|
1568
|
+
{
|
1569
|
+
X_LOCK (reqlock);
|
1570
|
+
|
1571
|
+
for (;;)
|
1572
|
+
{
|
1573
|
+
self->req = req = reqq_shift (&req_queue);
|
1574
|
+
|
1575
|
+
if (req)
|
1576
|
+
break;
|
1577
|
+
|
1578
|
+
++idle;
|
1579
|
+
|
1580
|
+
ts.tv_sec = time (0) + idle_timeout;
|
1581
|
+
if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT)
|
1582
|
+
{
|
1583
|
+
if (idle > max_idle)
|
1584
|
+
{
|
1585
|
+
--idle;
|
1586
|
+
X_UNLOCK (reqlock);
|
1587
|
+
X_LOCK (wrklock);
|
1588
|
+
--started;
|
1589
|
+
X_UNLOCK (wrklock);
|
1590
|
+
goto quit;
|
1591
|
+
}
|
1592
|
+
|
1593
|
+
/* we are allowed to idle, so do so without any timeout */
|
1594
|
+
X_COND_WAIT (reqwait, reqlock);
|
1595
|
+
}
|
1596
|
+
|
1597
|
+
--idle;
|
1598
|
+
}
|
1599
|
+
|
1600
|
+
--nready;
|
1601
|
+
|
1602
|
+
X_UNLOCK (reqlock);
|
1603
|
+
|
1604
|
+
if (req->type < 0)
|
1605
|
+
goto quit;
|
1606
|
+
|
1607
|
+
if (!EIO_CANCELLED (req))
|
1608
|
+
ETP_EXECUTE (self, req);
|
1609
|
+
|
1610
|
+
X_LOCK (reslock);
|
1611
|
+
|
1612
|
+
++npending;
|
1613
|
+
|
1614
|
+
if (!reqq_push (&res_queue, req) && want_poll_cb)
|
1615
|
+
want_poll_cb ();
|
1616
|
+
|
1617
|
+
self->req = 0;
|
1618
|
+
etp_worker_clear (self);
|
1619
|
+
|
1620
|
+
X_UNLOCK (reslock);
|
1621
|
+
}
|
1622
|
+
|
1623
|
+
quit:
|
1624
|
+
X_LOCK (wrklock);
|
1625
|
+
etp_worker_free (self);
|
1626
|
+
X_UNLOCK (wrklock);
|
1627
|
+
|
1628
|
+
return 0;
|
1629
|
+
}
|
1630
|
+
|
1631
|
+
/*****************************************************************************/
|
1632
|
+
|
1633
|
+
int eio_init (void (*want_poll)(void), void (*done_poll)(void))
|
1634
|
+
{
|
1635
|
+
return etp_init (want_poll, done_poll);
|
1636
|
+
}
|
1637
|
+
|
1638
|
+
static void eio_api_destroy (eio_req *req)
|
1639
|
+
{
|
1640
|
+
free (req);
|
1641
|
+
}
|
1642
|
+
|
1643
|
+
#define REQ(rtype) \
|
1644
|
+
eio_req *req; \
|
1645
|
+
\
|
1646
|
+
req = (eio_req *)calloc (1, sizeof *req); \
|
1647
|
+
if (!req) \
|
1648
|
+
return 0; \
|
1649
|
+
\
|
1650
|
+
req->type = rtype; \
|
1651
|
+
req->pri = pri; \
|
1652
|
+
req->finish = cb; \
|
1653
|
+
req->data = data; \
|
1654
|
+
req->destroy = eio_api_destroy;
|
1655
|
+
|
1656
|
+
#define SEND eio_submit (req); return req
|
1657
|
+
|
1658
|
+
#define PATH \
|
1659
|
+
req->flags |= EIO_FLAG_PTR1_FREE; \
|
1660
|
+
req->ptr1 = strdup (path); \
|
1661
|
+
if (!req->ptr1) \
|
1662
|
+
{ \
|
1663
|
+
eio_api_destroy (req); \
|
1664
|
+
return 0; \
|
1665
|
+
}
|
1666
|
+
|
1667
|
+
static void eio_execute (etp_worker *self, eio_req *req)
|
1668
|
+
{
|
1669
|
+
switch (req->type)
|
1670
|
+
{
|
1671
|
+
case EIO_READ: ALLOC (req->size);
|
1672
|
+
req->result = req->offs >= 0
|
1673
|
+
? pread (req->int1, req->ptr2, req->size, req->offs)
|
1674
|
+
: read (req->int1, req->ptr2, req->size); break;
|
1675
|
+
case EIO_WRITE: req->result = req->offs >= 0
|
1676
|
+
? pwrite (req->int1, req->ptr2, req->size, req->offs)
|
1677
|
+
: write (req->int1, req->ptr2, req->size); break;
|
1678
|
+
|
1679
|
+
case EIO_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
|
1680
|
+
case EIO_SENDFILE: req->result = eio__sendfile (req->int1, req->int2, req->offs, req->size, self); break;
|
1681
|
+
|
1682
|
+
case EIO_STAT: ALLOC (sizeof (EIO_STRUCT_STAT));
|
1683
|
+
req->result = stat (req->ptr1, (EIO_STRUCT_STAT *)req->ptr2); break;
|
1684
|
+
case EIO_LSTAT: ALLOC (sizeof (EIO_STRUCT_STAT));
|
1685
|
+
req->result = lstat (req->ptr1, (EIO_STRUCT_STAT *)req->ptr2); break;
|
1686
|
+
case EIO_FSTAT: ALLOC (sizeof (EIO_STRUCT_STAT));
|
1687
|
+
req->result = fstat (req->int1, (EIO_STRUCT_STAT *)req->ptr2); break;
|
1688
|
+
|
1689
|
+
case EIO_STATVFS: ALLOC (sizeof (EIO_STRUCT_STATVFS));
|
1690
|
+
req->result = statvfs (req->ptr1, (EIO_STRUCT_STATVFS *)req->ptr2); break;
|
1691
|
+
case EIO_FSTATVFS: ALLOC (sizeof (EIO_STRUCT_STATVFS));
|
1692
|
+
req->result = fstatvfs (req->int1, (EIO_STRUCT_STATVFS *)req->ptr2); break;
|
1693
|
+
|
1694
|
+
case EIO_CHOWN: req->result = chown (req->ptr1, req->int2, req->int3); break;
|
1695
|
+
case EIO_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break;
|
1696
|
+
case EIO_CHMOD: req->result = chmod (req->ptr1, (mode_t)req->int2); break;
|
1697
|
+
case EIO_FCHMOD: req->result = fchmod (req->int1, (mode_t)req->int2); break;
|
1698
|
+
case EIO_TRUNCATE: req->result = truncate (req->ptr1, req->offs); break;
|
1699
|
+
case EIO_FTRUNCATE: req->result = ftruncate (req->int1, req->offs); break;
|
1700
|
+
|
1701
|
+
case EIO_OPEN: req->result = open (req->ptr1, req->int1, (mode_t)req->int2); break;
|
1702
|
+
case EIO_CLOSE: req->result = close (req->int1); break;
|
1703
|
+
case EIO_DUP2: req->result = dup2 (req->int1, req->int2); break;
|
1704
|
+
case EIO_UNLINK: req->result = unlink (req->ptr1); break;
|
1705
|
+
case EIO_RMDIR: req->result = rmdir (req->ptr1); break;
|
1706
|
+
case EIO_MKDIR: req->result = mkdir (req->ptr1, (mode_t)req->int2); break;
|
1707
|
+
case EIO_RENAME: req->result = rename (req->ptr1, req->ptr2); break;
|
1708
|
+
case EIO_LINK: req->result = link (req->ptr1, req->ptr2); break;
|
1709
|
+
case EIO_SYMLINK: req->result = symlink (req->ptr1, req->ptr2); break;
|
1710
|
+
case EIO_MKNOD: req->result = mknod (req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break;
|
1711
|
+
|
1712
|
+
case EIO_READLINK: ALLOC (PATH_MAX);
|
1713
|
+
req->result = readlink (req->ptr1, req->ptr2, PATH_MAX); break;
|
1714
|
+
|
1715
|
+
case EIO_SYNC: req->result = 0; sync (); break;
|
1716
|
+
case EIO_FSYNC: req->result = fsync (req->int1); break;
|
1717
|
+
case EIO_FDATASYNC: req->result = fdatasync (req->int1); break;
|
1718
|
+
case EIO_MSYNC: req->result = eio__msync (req->ptr2, req->size, req->int1); break;
|
1719
|
+
case EIO_MTOUCH: req->result = eio__mtouch (req->ptr2, req->size, req->int1); break;
|
1720
|
+
case EIO_MLOCK: req->result = eio__mlock (req->ptr2, req->size); break;
|
1721
|
+
case EIO_MLOCKALL: req->result = eio__mlockall (req->int1); break;
|
1722
|
+
case EIO_SYNC_FILE_RANGE: req->result = eio__sync_file_range (req->int1, req->offs, req->size, req->int2); break;
|
1723
|
+
|
1724
|
+
case EIO_READDIR: eio__scandir (req, self); break;
|
1725
|
+
|
1726
|
+
case EIO_BUSY:
|
1727
|
+
#ifdef _WIN32
|
1728
|
+
Sleep (req->nv1 * 1e3);
|
1729
|
+
#else
|
1730
|
+
{
|
1731
|
+
struct timeval tv;
|
1732
|
+
|
1733
|
+
tv.tv_sec = req->nv1;
|
1734
|
+
tv.tv_usec = (req->nv1 - tv.tv_sec) * 1e6;
|
1735
|
+
|
1736
|
+
req->result = select (0, 0, 0, 0, &tv);
|
1737
|
+
}
|
1738
|
+
#endif
|
1739
|
+
break;
|
1740
|
+
|
1741
|
+
case EIO_UTIME:
|
1742
|
+
case EIO_FUTIME:
|
1743
|
+
{
|
1744
|
+
struct timeval tv[2];
|
1745
|
+
struct timeval *times;
|
1746
|
+
|
1747
|
+
if (req->nv1 != -1. || req->nv2 != -1.)
|
1748
|
+
{
|
1749
|
+
tv[0].tv_sec = req->nv1;
|
1750
|
+
tv[0].tv_usec = (req->nv1 - tv[0].tv_sec) * 1000000.;
|
1751
|
+
tv[1].tv_sec = req->nv2;
|
1752
|
+
tv[1].tv_usec = (req->nv2 - tv[1].tv_sec) * 1000000.;
|
1753
|
+
|
1754
|
+
times = tv;
|
1755
|
+
}
|
1756
|
+
else
|
1757
|
+
times = 0;
|
1758
|
+
|
1759
|
+
req->result = req->type == EIO_FUTIME
|
1760
|
+
? futimes (req->int1, times)
|
1761
|
+
: utimes (req->ptr1, times);
|
1762
|
+
}
|
1763
|
+
break;
|
1764
|
+
|
1765
|
+
case EIO_GROUP:
|
1766
|
+
abort (); /* handled in eio_request */
|
1767
|
+
|
1768
|
+
case EIO_NOP:
|
1769
|
+
req->result = 0;
|
1770
|
+
break;
|
1771
|
+
|
1772
|
+
case EIO_CUSTOM:
|
1773
|
+
((void (*)(eio_req *))req->feed) (req);
|
1774
|
+
break;
|
1775
|
+
|
1776
|
+
default:
|
1777
|
+
errno = ENOSYS;
|
1778
|
+
req->result = -1;
|
1779
|
+
break;
|
1780
|
+
}
|
1781
|
+
|
1782
|
+
req->errorno = errno;
|
1783
|
+
}
|
1784
|
+
|
1785
|
+
#ifndef EIO_NO_WRAPPERS
|
1786
|
+
|
1787
|
+
eio_req *eio_nop (int pri, eio_cb cb, void *data)
|
1788
|
+
{
|
1789
|
+
REQ (EIO_NOP); SEND;
|
1790
|
+
}
|
1791
|
+
|
1792
|
+
eio_req *eio_busy (double delay, int pri, eio_cb cb, void *data)
|
1793
|
+
{
|
1794
|
+
REQ (EIO_BUSY); req->nv1 = delay; SEND;
|
1795
|
+
}
|
1796
|
+
|
1797
|
+
eio_req *eio_sync (int pri, eio_cb cb, void *data)
|
1798
|
+
{
|
1799
|
+
REQ (EIO_SYNC); SEND;
|
1800
|
+
}
|
1801
|
+
|
1802
|
+
eio_req *eio_fsync (int fd, int pri, eio_cb cb, void *data)
|
1803
|
+
{
|
1804
|
+
REQ (EIO_FSYNC); req->int1 = fd; SEND;
|
1805
|
+
}
|
1806
|
+
|
1807
|
+
eio_req *eio_msync (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data)
|
1808
|
+
{
|
1809
|
+
REQ (EIO_MSYNC); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND;
|
1810
|
+
}
|
1811
|
+
|
1812
|
+
eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data)
|
1813
|
+
{
|
1814
|
+
REQ (EIO_MTOUCH); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND;
|
1815
|
+
}
|
1816
|
+
|
1817
|
+
eio_req *eio_mlock (void *addr, size_t length, int pri, eio_cb cb, void *data)
|
1818
|
+
{
|
1819
|
+
REQ (EIO_MLOCK); req->ptr2 = addr; req->size = length; SEND;
|
1820
|
+
}
|
1821
|
+
|
1822
|
+
eio_req *eio_mlockall (int flags, int pri, eio_cb cb, void *data)
|
1823
|
+
{
|
1824
|
+
REQ (EIO_MLOCKALL); req->int1 = flags; SEND;
|
1825
|
+
}
|
1826
|
+
|
1827
|
+
eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data)
|
1828
|
+
{
|
1829
|
+
REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND;
|
1830
|
+
}
|
1831
|
+
|
1832
|
+
eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data)
|
1833
|
+
{
|
1834
|
+
REQ (EIO_FDATASYNC); req->int1 = fd; SEND;
|
1835
|
+
}
|
1836
|
+
|
1837
|
+
eio_req *eio_close (int fd, int pri, eio_cb cb, void *data)
|
1838
|
+
{
|
1839
|
+
REQ (EIO_CLOSE); req->int1 = fd; SEND;
|
1840
|
+
}
|
1841
|
+
|
1842
|
+
eio_req *eio_readahead (int fd, off_t offset, size_t length, int pri, eio_cb cb, void *data)
|
1843
|
+
{
|
1844
|
+
REQ (EIO_READAHEAD); req->int1 = fd; req->offs = offset; req->size = length; SEND;
|
1845
|
+
}
|
1846
|
+
|
1847
|
+
eio_req *eio_read (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data)
|
1848
|
+
{
|
1849
|
+
REQ (EIO_READ); req->int1 = fd; req->offs = offset; req->size = length; req->ptr2 = buf; SEND;
|
1850
|
+
}
|
1851
|
+
|
1852
|
+
eio_req *eio_write (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data)
|
1853
|
+
{
|
1854
|
+
REQ (EIO_WRITE); req->int1 = fd; req->offs = offset; req->size = length; req->ptr2 = buf; SEND;
|
1855
|
+
}
|
1856
|
+
|
1857
|
+
eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data)
|
1858
|
+
{
|
1859
|
+
REQ (EIO_FSTAT); req->int1 = fd; SEND;
|
1860
|
+
}
|
1861
|
+
|
1862
|
+
eio_req *eio_fstatvfs (int fd, int pri, eio_cb cb, void *data)
|
1863
|
+
{
|
1864
|
+
REQ (EIO_FSTATVFS); req->int1 = fd; SEND;
|
1865
|
+
}
|
1866
|
+
|
1867
|
+
eio_req *eio_futime (int fd, double atime, double mtime, int pri, eio_cb cb, void *data)
|
1868
|
+
{
|
1869
|
+
REQ (EIO_FUTIME); req->int1 = fd; req->nv1 = atime; req->nv2 = mtime; SEND;
|
1870
|
+
}
|
1871
|
+
|
1872
|
+
eio_req *eio_ftruncate (int fd, off_t offset, int pri, eio_cb cb, void *data)
|
1873
|
+
{
|
1874
|
+
REQ (EIO_FTRUNCATE); req->int1 = fd; req->offs = offset; SEND;
|
1875
|
+
}
|
1876
|
+
|
1877
|
+
eio_req *eio_fchmod (int fd, mode_t mode, int pri, eio_cb cb, void *data)
|
1878
|
+
{
|
1879
|
+
REQ (EIO_FCHMOD); req->int1 = fd; req->int2 = (long)mode; SEND;
|
1880
|
+
}
|
1881
|
+
|
1882
|
+
eio_req *eio_fchown (int fd, uid_t uid, gid_t gid, int pri, eio_cb cb, void *data)
|
1883
|
+
{
|
1884
|
+
REQ (EIO_FCHOWN); req->int1 = fd; req->int2 = (long)uid; req->int3 = (long)gid; SEND;
|
1885
|
+
}
|
1886
|
+
|
1887
|
+
eio_req *eio_dup2 (int fd, int fd2, int pri, eio_cb cb, void *data)
|
1888
|
+
{
|
1889
|
+
REQ (EIO_DUP2); req->int1 = fd; req->int2 = fd2; SEND;
|
1890
|
+
}
|
1891
|
+
|
1892
|
+
eio_req *eio_sendfile (int out_fd, int in_fd, off_t in_offset, size_t length, int pri, eio_cb cb, void *data)
|
1893
|
+
{
|
1894
|
+
REQ (EIO_SENDFILE); req->int1 = out_fd; req->int2 = in_fd; req->offs = in_offset; req->size = length; SEND;
|
1895
|
+
}
|
1896
|
+
|
1897
|
+
eio_req *eio_open (const char *path, int flags, mode_t mode, int pri, eio_cb cb, void *data)
|
1898
|
+
{
|
1899
|
+
REQ (EIO_OPEN); PATH; req->int1 = flags; req->int2 = (long)mode; SEND;
|
1900
|
+
}
|
1901
|
+
|
1902
|
+
eio_req *eio_utime (const char *path, double atime, double mtime, int pri, eio_cb cb, void *data)
|
1903
|
+
{
|
1904
|
+
REQ (EIO_UTIME); PATH; req->nv1 = atime; req->nv2 = mtime; SEND;
|
1905
|
+
}
|
1906
|
+
|
1907
|
+
eio_req *eio_truncate (const char *path, off_t offset, int pri, eio_cb cb, void *data)
|
1908
|
+
{
|
1909
|
+
REQ (EIO_TRUNCATE); PATH; req->offs = offset; SEND;
|
1910
|
+
}
|
1911
|
+
|
1912
|
+
eio_req *eio_chown (const char *path, uid_t uid, gid_t gid, int pri, eio_cb cb, void *data)
|
1913
|
+
{
|
1914
|
+
REQ (EIO_CHOWN); PATH; req->int2 = (long)uid; req->int3 = (long)gid; SEND;
|
1915
|
+
}
|
1916
|
+
|
1917
|
+
eio_req *eio_chmod (const char *path, mode_t mode, int pri, eio_cb cb, void *data)
|
1918
|
+
{
|
1919
|
+
REQ (EIO_CHMOD); PATH; req->int2 = (long)mode; SEND;
|
1920
|
+
}
|
1921
|
+
|
1922
|
+
eio_req *eio_mkdir (const char *path, mode_t mode, int pri, eio_cb cb, void *data)
|
1923
|
+
{
|
1924
|
+
REQ (EIO_MKDIR); PATH; req->int2 = (long)mode; SEND;
|
1925
|
+
}
|
1926
|
+
|
1927
|
+
static eio_req *
|
1928
|
+
eio__1path (int type, const char *path, int pri, eio_cb cb, void *data)
|
1929
|
+
{
|
1930
|
+
REQ (type); PATH; SEND;
|
1931
|
+
}
|
1932
|
+
|
1933
|
+
eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data)
|
1934
|
+
{
|
1935
|
+
return eio__1path (EIO_READLINK, path, pri, cb, data);
|
1936
|
+
}
|
1937
|
+
|
1938
|
+
eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data)
|
1939
|
+
{
|
1940
|
+
return eio__1path (EIO_STAT, path, pri, cb, data);
|
1941
|
+
}
|
1942
|
+
|
1943
|
+
eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data)
|
1944
|
+
{
|
1945
|
+
return eio__1path (EIO_LSTAT, path, pri, cb, data);
|
1946
|
+
}
|
1947
|
+
|
1948
|
+
eio_req *eio_statvfs (const char *path, int pri, eio_cb cb, void *data)
|
1949
|
+
{
|
1950
|
+
return eio__1path (EIO_STATVFS, path, pri, cb, data);
|
1951
|
+
}
|
1952
|
+
|
1953
|
+
eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data)
|
1954
|
+
{
|
1955
|
+
return eio__1path (EIO_UNLINK, path, pri, cb, data);
|
1956
|
+
}
|
1957
|
+
|
1958
|
+
eio_req *eio_rmdir (const char *path, int pri, eio_cb cb, void *data)
|
1959
|
+
{
|
1960
|
+
return eio__1path (EIO_RMDIR, path, pri, cb, data);
|
1961
|
+
}
|
1962
|
+
|
1963
|
+
eio_req *eio_readdir (const char *path, int flags, int pri, eio_cb cb, void *data)
|
1964
|
+
{
|
1965
|
+
REQ (EIO_READDIR); PATH; req->int1 = flags; SEND;
|
1966
|
+
}
|
1967
|
+
|
1968
|
+
eio_req *eio_mknod (const char *path, mode_t mode, dev_t dev, int pri, eio_cb cb, void *data)
|
1969
|
+
{
|
1970
|
+
REQ (EIO_MKNOD); PATH; req->int2 = (long)mode; req->offs = (off_t)dev; SEND;
|
1971
|
+
}
|
1972
|
+
|
1973
|
+
static eio_req *
|
1974
|
+
eio__2path (int type, const char *path, const char *new_path, int pri, eio_cb cb, void *data)
|
1975
|
+
{
|
1976
|
+
REQ (type); PATH;
|
1977
|
+
|
1978
|
+
req->flags |= EIO_FLAG_PTR2_FREE;
|
1979
|
+
req->ptr2 = strdup (new_path);
|
1980
|
+
if (!req->ptr2)
|
1981
|
+
{
|
1982
|
+
eio_api_destroy (req);
|
1983
|
+
return 0;
|
1984
|
+
}
|
1985
|
+
|
1986
|
+
SEND;
|
1987
|
+
}
|
1988
|
+
|
1989
|
+
eio_req *eio_link (const char *path, const char *new_path, int pri, eio_cb cb, void *data)
|
1990
|
+
{
|
1991
|
+
return eio__2path (EIO_LINK, path, new_path, pri, cb, data);
|
1992
|
+
}
|
1993
|
+
|
1994
|
+
eio_req *eio_symlink (const char *path, const char *new_path, int pri, eio_cb cb, void *data)
|
1995
|
+
{
|
1996
|
+
return eio__2path (EIO_SYMLINK, path, new_path, pri, cb, data);
|
1997
|
+
}
|
1998
|
+
|
1999
|
+
eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data)
|
2000
|
+
{
|
2001
|
+
return eio__2path (EIO_RENAME, path, new_path, pri, cb, data);
|
2002
|
+
}
|
2003
|
+
|
2004
|
+
eio_req *eio_custom (eio_cb execute, int pri, eio_cb cb, void *data)
|
2005
|
+
{
|
2006
|
+
REQ (EIO_CUSTOM); req->feed = (void (*)(eio_req *))execute; SEND;
|
2007
|
+
}
|
2008
|
+
|
2009
|
+
#endif
|
2010
|
+
|
2011
|
+
eio_req *eio_grp (eio_cb cb, void *data)
|
2012
|
+
{
|
2013
|
+
const int pri = EIO_PRI_MAX;
|
2014
|
+
|
2015
|
+
REQ (EIO_GROUP); SEND;
|
2016
|
+
}
|
2017
|
+
|
2018
|
+
#undef REQ
|
2019
|
+
#undef PATH
|
2020
|
+
#undef SEND
|
2021
|
+
|
2022
|
+
/*****************************************************************************/
|
2023
|
+
/* grp functions */
|
2024
|
+
|
2025
|
+
void eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit)
|
2026
|
+
{
|
2027
|
+
grp->int2 = limit;
|
2028
|
+
grp->feed = feed;
|
2029
|
+
|
2030
|
+
grp_try_feed (grp);
|
2031
|
+
}
|
2032
|
+
|
2033
|
+
void eio_grp_limit (eio_req *grp, int limit)
|
2034
|
+
{
|
2035
|
+
grp->int2 = limit;
|
2036
|
+
|
2037
|
+
grp_try_feed (grp);
|
2038
|
+
}
|
2039
|
+
|
2040
|
+
void eio_grp_add (eio_req *grp, eio_req *req)
|
2041
|
+
{
|
2042
|
+
assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2));
|
2043
|
+
|
2044
|
+
grp->flags |= EIO_FLAG_GROUPADD;
|
2045
|
+
|
2046
|
+
++grp->size;
|
2047
|
+
req->grp = grp;
|
2048
|
+
|
2049
|
+
req->grp_prev = 0;
|
2050
|
+
req->grp_next = grp->grp_first;
|
2051
|
+
|
2052
|
+
if (grp->grp_first)
|
2053
|
+
grp->grp_first->grp_prev = req;
|
2054
|
+
|
2055
|
+
grp->grp_first = req;
|
2056
|
+
}
|
2057
|
+
|
2058
|
+
/*****************************************************************************/
|
2059
|
+
/* misc garbage */
|
2060
|
+
|
2061
|
+
ssize_t eio_sendfile_sync (int ofd, int ifd, off_t offset, size_t count)
|
2062
|
+
{
|
2063
|
+
etp_worker wrk;
|
2064
|
+
ssize_t ret;
|
2065
|
+
|
2066
|
+
wrk.dbuf = 0;
|
2067
|
+
|
2068
|
+
ret = eio__sendfile (ofd, ifd, offset, count, &wrk);
|
2069
|
+
|
2070
|
+
if (wrk.dbuf)
|
2071
|
+
free (wrk.dbuf);
|
2072
|
+
|
2073
|
+
return ret;
|
2074
|
+
}
|
2075
|
+
|