daemon: Fix read-file so it fails gracefully for large files (RHBZ#589039).

Pengzhen Cao noticed that read-file would fail for files
larger than the protocol size; this is *not* the bug.  However
it would also lose protocol synchronization after this.

The reason was that functions which return RBufferOut in the
generator must not 'touch' the *size_r parameter along error
return paths.

I fixed read-file and initrd-cat, and I checked that pread was
doing the right thing.

This also adds regression tests for read-file with various categories
of large file.
This commit is contained in:
Richard Jones
2010-05-07 13:30:42 +01:00
parent a84f136049
commit 42f59b28f1
3 changed files with 32 additions and 9 deletions

View File

@@ -336,25 +336,24 @@ do_read_file (const char *path, size_t *size_r)
return NULL;
}
*size_r = statbuf.st_size;
/* The actual limit on messages is smaller than this. This
* check just limits the amount of memory we'll try and allocate
* here. If the message is larger than the real limit, that will
* be caught later when we try to serialize the message.
*/
if (*size_r >= GUESTFS_MESSAGE_MAX) {
if (statbuf.st_size >= GUESTFS_MESSAGE_MAX) {
reply_with_error ("%s: file is too large for the protocol, use guestfs_download instead", path);
close (fd);
return NULL;
}
r = malloc (*size_r);
r = malloc (statbuf.st_size);
if (r == NULL) {
reply_with_perror ("malloc");
close (fd);
return NULL;
}
if (xread (fd, r, *size_r) == -1) {
if (xread (fd, r, statbuf.st_size) == -1) {
reply_with_perror ("read: %s", path);
close (fd);
free (r);
@@ -367,6 +366,10 @@ do_read_file (const char *path, size_t *size_r)
return NULL;
}
/* Mustn't touch *size_r until we are sure that we won't return any
* error (RHBZ#589039).
*/
*size_r = statbuf.st_size;
return r;
}
@@ -418,6 +421,9 @@ do_pread (const char *path, int count, int64_t offset, size_t *size_r)
return NULL;
}
/* Mustn't touch *size_r until we are sure that we won't return any
* error (RHBZ#589039).
*/
*size_r = r;
return buf;
}

View File

@@ -142,25 +142,24 @@ do_initrd_cat (const char *path, const char *filename, size_t *size_r)
goto cleanup;
}
*size_r = statbuf.st_size;
/* The actual limit on messages is smaller than this. This
* check just limits the amount of memory we'll try and allocate
* here. If the message is larger than the real limit, that will
* be caught later when we try to serialize the message.
*/
if (*size_r >= GUESTFS_MESSAGE_MAX) {
if (statbuf.st_size >= GUESTFS_MESSAGE_MAX) {
reply_with_error ("%s:%s: file is too large for the protocol",
path, filename);
goto cleanup;
}
ret = malloc (*size_r);
ret = malloc (statbuf.st_size);
if (ret == NULL) {
reply_with_perror ("malloc");
goto cleanup;
}
if (xread (fd, ret, *size_r) == -1) {
if (xread (fd, ret, statbuf.st_size) == -1) {
reply_with_perror ("read: %s:%s", path, filename);
free (ret);
ret = NULL;
@@ -175,6 +174,11 @@ do_initrd_cat (const char *path, const char *filename, size_t *size_r)
}
fd = -1;
/* Mustn't touch *size_r until we are sure that we won't return any
* error (RHBZ#589039).
*/
*size_r = statbuf.st_size;
cleanup:
if (fd >= 0)
close (fd);

View File

@@ -3287,7 +3287,20 @@ for full details.");
("read_file", (RBufferOut "content", [Pathname "path"]), 150, [ProtocolLimitWarning],
[InitISOFS, Always, TestOutputBuffer (
[["read_file"; "/known-4"]], "abc\ndef\nghi")],
[["read_file"; "/known-4"]], "abc\ndef\nghi");
(* Test various near large, large and too large files (RHBZ#589039). *)
InitBasicFS, Always, TestLastFail (
[["touch"; "/a"];
["truncate_size"; "/a"; "4194303"]; (* GUESTFS_MESSAGE_MAX - 1 *)
["read_file"; "/a"]]);
InitBasicFS, Always, TestLastFail (
[["touch"; "/a"];
["truncate_size"; "/a"; "4194304"]; (* GUESTFS_MESSAGE_MAX *)
["read_file"; "/a"]]);
InitBasicFS, Always, TestLastFail (
[["touch"; "/a"];
["truncate_size"; "/a"; "41943040"]; (* GUESTFS_MESSAGE_MAX * 10 *)
["read_file"; "/a"]])],
"read a file",
"\
This calls returns the contents of the file C<path> as a