aboutsummaryrefslogtreecommitdiff
path: root/src/unix
diff options
context:
space:
mode:
authorJameson Nash <vtjnash@gmail.com>2018-02-13 16:05:45 -0500
committerSantiago Gimeno <santiago.gimeno@gmail.com>2018-09-28 21:47:07 +0200
commit60abdbaed6c20a41678cd9af1d6f40a1d11bf5e2 (patch)
tree9077d8a98c049e83a07395b129202998f325f7c0 /src/unix
parent19a341919546140c37834488e1f7c46721b0d2d5 (diff)
downloadlibuv-60abdbaed6c20a41678cd9af1d6f40a1d11bf5e2.tar.gz
libuv-60abdbaed6c20a41678cd9af1d6f40a1d11bf5e2.zip
unix,readv: always permit partial reads to return
For simplicity and predictability (since the user must handle the retry anyways), always emit exactly one readv/pread/preadv syscall and return that result to the user. By contrast, write needs to preserve order, so it needs to keep retrying the operation until it finishes before retiring the req from the queue. Fixes: https://github.com/nodejs/node/issues/16601 PR-URL: https://github.com/libuv/libuv/pull/1742 Refs: https://github.com/libuv/libuv/pull/640 Refs: https://github.com/libuv/libuv/issues/1720 Reviewed-By: Santiago Gimeno <santiago.gimeno@gmail.com>
Diffstat (limited to 'src/unix')
-rw-r--r--src/unix/fs.c35
1 files changed, 25 insertions, 10 deletions
diff --git a/src/unix/fs.c b/src/unix/fs.c
index 8d773aa1..35bcc6f6 100644
--- a/src/unix/fs.c
+++ b/src/unix/fs.c
@@ -262,17 +262,25 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
#if defined(__linux__)
static int no_preadv;
#endif
+ unsigned int iovmax;
ssize_t result;
#if defined(_AIX)
struct stat buf;
- if(fstat(req->file, &buf))
- return -1;
- if(S_ISDIR(buf.st_mode)) {
+ result = fstat(req->file, &buf);
+ if (result)
+ goto done;
+ if (S_ISDIR(buf.st_mode)) {
errno = EISDIR;
- return -1;
+ result -1;
+ goto done;
}
#endif /* defined(_AIX) */
+
+ iovmax = uv__getiovmax();
+ if (req->nbufs > iovmax)
+ req->nbufs = iovmax;
+
if (req->off < 0) {
if (req->nbufs == 1)
result = read(req->file, req->bufs[0].base, req->bufs[0].len);
@@ -309,6 +317,13 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
}
done:
+ /* Early cleanup of bufs allocation, since we're done with it. */
+ if (req->bufs != req->bufsml)
+ uv__free(req->bufs);
+
+ req->bufs = NULL;
+ req->nbufs = 0;
+
return result;
}
@@ -1023,8 +1038,7 @@ static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
return offset;
}
-typedef ssize_t (*uv__fs_buf_iter_processor)(uv_fs_t* req);
-static ssize_t uv__fs_buf_iter(uv_fs_t* req, uv__fs_buf_iter_processor process) {
+static ssize_t uv__fs_write_all(uv_fs_t* req) {
unsigned int iovmax;
unsigned int nbufs;
uv_buf_t* bufs;
@@ -1042,7 +1056,7 @@ static ssize_t uv__fs_buf_iter(uv_fs_t* req, uv__fs_buf_iter_processor process)
req->nbufs = iovmax;
do
- result = process(req);
+ result = uv__fs_write(req);
while (result < 0 && errno == EINTR);
if (result <= 0) {
@@ -1076,7 +1090,8 @@ static void uv__fs_work(struct uv__work* w) {
ssize_t r;
req = container_of(w, uv_fs_t, work_req);
- retry_on_eintr = !(req->fs_type == UV_FS_CLOSE);
+ retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
+ req->fs_type == UV_FS_READ);
do {
errno = 0;
@@ -1105,7 +1120,7 @@ static void uv__fs_work(struct uv__work* w) {
X(MKDIR, mkdir(req->path, req->mode));
X(MKDTEMP, uv__fs_mkdtemp(req));
X(OPEN, uv__fs_open(req));
- X(READ, uv__fs_buf_iter(req, uv__fs_read));
+ X(READ, uv__fs_read(req));
X(SCANDIR, uv__fs_scandir(req));
X(READLINK, uv__fs_readlink(req));
X(REALPATH, uv__fs_realpath(req));
@@ -1116,7 +1131,7 @@ static void uv__fs_work(struct uv__work* w) {
X(SYMLINK, symlink(req->path, req->new_path));
X(UNLINK, unlink(req->path));
X(UTIME, uv__fs_utime(req));
- X(WRITE, uv__fs_buf_iter(req, uv__fs_write));
+ X(WRITE, uv__fs_write_all(req));
default: abort();
}
#undef X