Partial rewrite of virt-df to use parallel threads instead of single appliance.

Previously when you asked virt-df to show stats for all your libvirt
guests, it used a single appliance and tried to attach as many disks
as possible to it, even disks from different guests.

However this has been problematic: Not only is the code to do this
horrendously complex, but it's also insecure, and it doesn't interact
well with sVirt labels (see RHBZ#912499 comment 7).

In the meantime we discovered that running parallel appliances gives
you most of the performance of using a single appliance, but with a
lot less complexity and better guest isolation (see the documentation
in commit 680450f3b4).

Therefore this is a partial rewrite of virt-df so that in this case it
now uses parallel appliances.

Notes:

The '--one-per-guest' option is now the default and only way to do
things; this option now does nothing.

By default, the amount of parallelism to use is controlled by the
amount of free memory seen when virt-df starts up (subject to some
minima and maxima).  The user can control this through new command
line option '-P'.
This commit is contained in:
Richard W.M. Jones
2013-02-25 18:46:40 +00:00
parent 76266be549
commit 34e77af1bf
11 changed files with 452 additions and 546 deletions

View File

@@ -38,9 +38,14 @@ virt_df_SOURCES = \
$(SHARED_SOURCE_FILES) \
virt-df.h \
domains.c \
domains.h \
df.c \
estimate-max-threads.c \
estimate-max-threads.h \
main.c \
output.c
output.c \
parallel.c \
parallel.h
virt_df_CPPFLAGS = \
-DGUESTFS_WARN_DEPRECATED=1 \
@@ -51,6 +56,7 @@ virt_df_CPPFLAGS = \
-I$(srcdir)/../gnulib/lib -I../gnulib/lib
virt_df_CFLAGS = \
-pthread \
$(WARN_CFLAGS) $(WERROR_CFLAGS) \
$(GPROF_CFLAGS) $(GCOV_CFLAGS) \
$(LIBCONFIG_CFLAGS) \

166
df/df.c
View File

@@ -23,157 +23,93 @@
#include <stdint.h>
#include <string.h>
#include <inttypes.h>
#ifdef HAVE_LIBVIRT
#include <libvirt/libvirt.h>
#include <libvirt/virterror.h>
#endif
#include <unistd.h>
#include <error.h>
#include <errno.h>
#include "progname.h"
#include "c-ctype.h"
#include "guestfs.h"
#include "options.h"
#include "domains.h"
#include "virt-df.h"
static void try_df (const char *name, const char *uuid, const char *dev, int offset);
static int find_dev_in_devices (const char *dev, char **devices);
/* Since we want this function to be robust against very bad failure
* cases (hello, https://bugzilla.kernel.org/show_bug.cgi?id=18792) it
* won't exit on guestfs failures.
*/
int
df_on_handle (const char *name, const char *uuid, char **devices, int offset)
df_on_handle (guestfs_h *g, const char *name, const char *uuid, FILE *fp)
{
int ret = -1;
size_t i;
CLEANUP_FREE_STRING_LIST char **devices = NULL;
CLEANUP_FREE_STRING_LIST char **fses = NULL;
int free_devices = 0, is_lv;
if (verbose) {
fprintf (stderr, "df_on_handle %s devices=", name);
if (devices) {
fputc ('[', stderr);
for (i = 0; devices[i] != NULL; ++i) {
if (i > 0)
fputc (' ', stderr);
fputs (devices[i], stderr);
}
fputc (']', stderr);
}
else
fprintf (stderr, "null");
fputc ('\n', stderr);
}
if (verbose)
fprintf (stderr, "df_on_handle: %s\n", name);
if (devices == NULL) {
devices = guestfs_list_devices (g);
if (devices == NULL)
goto cleanup;
free_devices = 1;
} else {
/* Mask LVM for just the devices in the set. */
if (guestfs_lvm_set_filter (g, devices) == -1)
goto cleanup;
}
devices = guestfs_list_devices (g);
if (devices == NULL)
return -1;
/* list-filesystems will return filesystems on every device ... */
fses = guestfs_list_filesystems (g);
if (fses == NULL)
goto cleanup;
return -1;
/* ... so we need to filter out only the devices we are interested in. */
for (i = 0; fses[i] != NULL; i += 2) {
if (STRNEQ (fses[i+1], "") &&
STRNEQ (fses[i+1], "swap") &&
STRNEQ (fses[i+1], "unknown")) {
is_lv = guestfs_is_lv (g, fses[i]);
if (is_lv > 0) /* LVs are OK because of the LVM filter */
try_df (name, uuid, fses[i], -1);
else if (is_lv == 0) {
if (find_dev_in_devices (fses[i], devices))
try_df (name, uuid, fses[i], offset);
const char *dev = fses[i];
CLEANUP_FREE_STATVFS struct guestfs_statvfs *stat = NULL;
if (verbose)
fprintf (stderr, "df_on_handle: %s dev %s\n", name, dev);
/* Try mounting and stating the device. This might reasonably
* fail, so don't show errors.
*/
guestfs_push_error_handler (g, NULL, NULL);
if (guestfs_mount_ro (g, dev, "/") == 0) {
stat = guestfs_statvfs (g, "/");
guestfs_umount_all (g);
}
guestfs_pop_error_handler (g);
if (stat)
print_stat (fp, name, uuid, dev, stat);
}
}
ret = 0;
cleanup:
if (free_devices) {
for (i = 0; devices[i] != NULL; ++i)
free (devices[i]);
free (devices);
}
return ret;
return 0;
}
/* dev is a device or partition name such as "/dev/sda" or "/dev/sda1".
* See if dev occurs somewhere in the list of devices.
#if defined(HAVE_LIBVIRT)
/* The multi-threaded version. This callback is called from the code
* in "parallel.c".
*/
static int
find_dev_in_devices (const char *dev, char **devices)
void
df_work (guestfs_h *g, size_t i, FILE *fp)
{
size_t i, len;
char *whole_disk;
int free_whole_disk;
int ret = 0;
struct guestfs___add_libvirt_dom_argv optargs;
/* Convert 'dev' to a whole disk name. */
len = strlen (dev);
if (len > 0 && c_isdigit (dev[len-1])) {
guestfs_push_error_handler (g, NULL, NULL);
optargs.bitmask =
GUESTFS___ADD_LIBVIRT_DOM_READONLY_BITMASK |
GUESTFS___ADD_LIBVIRT_DOM_READONLYDISK_BITMASK;
optargs.readonly = 1;
optargs.readonlydisk = "read";
whole_disk = guestfs_part_to_dev (g, dev);
if (guestfs___add_libvirt_dom (g, domains[i].dom, &optargs) == -1)
return;
guestfs_pop_error_handler (g);
if (guestfs_launch (g) == -1)
return;
if (!whole_disk) /* probably an MD device or similar */
return 0;
free_whole_disk = 1;
}
else {
whole_disk = (char *) dev;
free_whole_disk = 0;
}
for (i = 0; devices[i] != NULL; ++i) {
if (STREQ (whole_disk, devices[i])) {
ret = 1;
break;
}
}
if (free_whole_disk)
free (whole_disk);
return ret;
(void) df_on_handle (g, domains[i].name, domains[i].uuid, fp);
}
static void
try_df (const char *name, const char *uuid,
const char *dev, int offset)
{
CLEANUP_FREE_STATVFS struct guestfs_statvfs *stat = NULL;
if (verbose)
fprintf (stderr, "try_df %s %s %d\n", name, dev, offset);
/* Try mounting and stating the device. This might reasonably fail,
* so don't show errors.
*/
guestfs_push_error_handler (g, NULL, NULL);
if (guestfs_mount_ro (g, dev, "/") == 0) {
stat = guestfs_statvfs (g, "/");
guestfs_umount_all (g);
}
guestfs_pop_error_handler (g);
if (stat)
print_stat (name, uuid, dev, offset, stat);
}
#endif /* HAVE_LIBVIRT */

View File

@@ -31,30 +31,14 @@
#include "progname.h"
#include "guestfs.h"
#include "options.h"
#include "virt-df.h"
#include "guestfs-internal-frontend.h"
#include "domains.h"
#if defined(HAVE_LIBVIRT) && defined(HAVE_LIBXML2)
/* The list of domains and disks that we build up in
* get_domains_from_libvirt.
*/
struct disk {
struct disk *next;
char *filename;
char *format; /* could be NULL */
int failed; /* flag if disk failed when adding */
};
struct domain {
char *name;
char *uuid;
struct disk *disks;
size_t nr_disks;
};
#if defined(HAVE_LIBVIRT)
virConnectPtr conn = NULL;
struct domain *domains = NULL;
size_t nr_domains;
size_t nr_domains = 0;
static int
compare_domain_names (const void *p1, const void *p2)
@@ -65,43 +49,33 @@ compare_domain_names (const void *p1, const void *p2)
return strcmp (d1->name, d2->name);
}
static void
free_domain (struct domain *domain)
void
free_domains (void)
{
struct disk *disk, *next;
size_t i;
for (disk = domain->disks; disk; disk = next) {
next = disk->next;
free (disk->filename);
free (disk->format);
free (disk);
for (i = 0; i < nr_domains; ++i) {
free (domains[i].name);
free (domains[i].uuid);
virDomainFree (domains[i].dom);
}
free (domain->name);
free (domain->uuid);
free (domains);
if (conn)
virConnectClose (conn);
}
static void add_domains_by_id (virConnectPtr conn, int *ids, size_t n, size_t max_disks);
static void add_domains_by_name (virConnectPtr conn, char **names, size_t n, size_t max_disks);
static void add_domain (virDomainPtr dom, size_t max_disks);
static int add_disk (guestfs_h *g, const char *filename, const char *format, int readonly, void *domain_vp);
static void multi_df (struct domain *, size_t n, size_t *errors);
static void add_domains_by_id (virConnectPtr conn, int *ids, size_t n);
static void add_domains_by_name (virConnectPtr conn, char **names, size_t n);
static void add_domain (virDomainPtr dom);
void
get_domains_from_libvirt (void)
get_all_libvirt_domains (const char *libvirt_uri)
{
virErrorPtr err;
virConnectPtr conn;
int n, r;
size_t i, j, nr_disks_added, errors, max_disks;
r = guestfs_max_disks (g);
if (r == -1)
exit (EXIT_FAILURE);
max_disks = (size_t) r;
nr_domains = 0;
domains = NULL;
int n;
size_t i;
/* Get the list of all domains. */
conn = virConnectOpenReadOnly (libvirt_uri);
@@ -132,7 +106,7 @@ get_domains_from_libvirt (void)
exit (EXIT_FAILURE);
}
add_domains_by_id (conn, ids, n, max_disks);
add_domains_by_id (conn, ids, n);
n = virConnectNumOfDefinedDomains (conn);
if (n == -1) {
@@ -153,7 +127,7 @@ get_domains_from_libvirt (void)
exit (EXIT_FAILURE);
}
add_domains_by_name (conn, names, n, max_disks);
add_domains_by_name (conn, names, n);
/* You must free these even though the libvirt documentation doesn't
* mention it.
@@ -161,57 +135,16 @@ get_domains_from_libvirt (void)
for (i = 0; i < (size_t) n; ++i)
free (names[i]);
virConnectClose (conn);
/* No domains? */
if (nr_domains == 0)
return;
/* Sort the domains alphabetically by name for display. */
qsort (domains, nr_domains, sizeof (struct domain), compare_domain_names);
print_title ();
/* To minimize the number of times we have to launch the appliance,
* shuffle as many domains together as we can, but not exceeding
* max_disks per request. If --one-per-guest was requested then only
* request disks from a single guest each time.
* Interesting application for NP-complete knapsack problem here.
*/
errors = 0;
if (one_per_guest) {
for (i = 0; i < nr_domains; ++i)
multi_df (&domains[i], 1, &errors);
} else {
for (i = 0; i < nr_domains; /**/) {
nr_disks_added = 0;
/* Make a request with domains [i..j-1]. */
for (j = i; j < nr_domains; ++j) {
if (nr_disks_added + domains[j].nr_disks > max_disks)
break;
nr_disks_added += domains[j].nr_disks;
}
multi_df (&domains[i], j-i, &errors);
i = j;
}
}
/* Free up domains structure. */
for (i = 0; i < nr_domains; ++i)
free_domain (&domains[i]);
free (domains);
if (errors > 0) {
fprintf (stderr, _("%s: failed to analyze a disk, see error(s) above\n"),
program_name);
exit (EXIT_FAILURE);
}
}
static void
add_domains_by_id (virConnectPtr conn, int *ids, size_t n, size_t max_disks)
add_domains_by_id (virConnectPtr conn, int *ids, size_t n)
{
size_t i;
virDomainPtr dom;
@@ -219,32 +152,27 @@ add_domains_by_id (virConnectPtr conn, int *ids, size_t n, size_t max_disks)
for (i = 0; i < n; ++i) {
if (ids[i] != 0) { /* RHBZ#538041 */
dom = virDomainLookupByID (conn, ids[i]);
if (dom) { /* transient errors are possible here, ignore them */
add_domain (dom, max_disks);
virDomainFree (dom);
}
if (dom) /* transient errors are possible here, ignore them */
add_domain (dom);
}
}
}
static void
add_domains_by_name (virConnectPtr conn, char **names, size_t n,
size_t max_disks)
add_domains_by_name (virConnectPtr conn, char **names, size_t n)
{
size_t i;
virDomainPtr dom;
for (i = 0; i < n; ++i) {
dom = virDomainLookupByName (conn, names[i]);
if (dom) { /* transient errors are possible here, ignore them */
add_domain (dom, max_disks);
virDomainFree (dom);
}
if (dom) /* transient errors are possible here, ignore them */
add_domain (dom);
}
}
static void
add_domain (virDomainPtr dom, size_t max_disks)
add_domain (virDomainPtr dom)
{
struct domain *domain;
@@ -257,6 +185,8 @@ add_domain (virDomainPtr dom, size_t max_disks)
domain = &domains[nr_domains];
nr_domains++;
domain->dom = dom;
domain->name = strdup (virDomainGetName (dom));
if (domain->name == NULL) {
perror ("strdup");
@@ -273,193 +203,6 @@ add_domain (virDomainPtr dom, size_t max_disks)
}
else
domain->uuid = NULL;
domain->disks = NULL;
int n = guestfs___for_each_disk (g, dom, add_disk, domain, NULL);
if (n == -1)
exit (EXIT_FAILURE);
domain->nr_disks = n;
if (domain->nr_disks > max_disks) {
fprintf (stderr,
_("%s: ignoring %s, it has too many disks (%zu > %zu)\n"),
program_name, domain->name, domain->nr_disks, max_disks);
free_domain (domain);
nr_domains--;
return;
}
}
static int
add_disk (guestfs_h *g,
const char *filename, const char *format, int readonly,
void *domain_vp)
{
struct domain *domain = domain_vp;
struct disk *disk;
disk = calloc (1, sizeof *disk);
if (disk == NULL) {
perror ("malloc");
return -1;
}
disk->next = domain->disks;
domain->disks = disk;
disk->filename = strdup (filename);
if (disk->filename == NULL) {
perror ("malloc");
return -1;
}
if (format) {
disk->format = strdup (format);
if (disk->format == NULL) {
perror ("malloc");
return -1;
}
}
else
disk->format = NULL;
return 0;
}
static void reset_guestfs_handle (void);
static size_t add_disks_to_handle_reverse (struct disk *disk, size_t *errors_r);
static size_t count_non_failed_disks (struct disk *disk);
static char **duplicate_first_n (char **, size_t n);
/* Perform 'df' operation on the domain(s) given in the list. */
static void
multi_df (struct domain *domains, size_t n, size_t *errors_r)
{
size_t i;
size_t nd;
size_t count;
int r;
CLEANUP_FREE_STRING_LIST char **devices = NULL;
char **domain_devices;
/* Add all the disks to the handle (since they were added in reverse
* order, we must add them here in reverse too).
*/
for (i = 0, count = 0; i < n; ++i)
count += add_disks_to_handle_reverse (domains[i].disks, errors_r);
if (count == 0)
return;
/* Launch the handle. */
if (guestfs_launch (g) == -1)
exit (EXIT_FAILURE);
devices = guestfs_list_devices (g);
if (devices == NULL)
exit (EXIT_FAILURE);
for (i = 0, nd = 0; i < n; ++i) {
/* Find out how many non-failed disks this domain has. */
count = count_non_failed_disks (domains[i].disks);
if (count == 0)
continue;
/* Duplicate the devices into a separate list for convenience.
* Note this doesn't duplicate the strings themselves.
*/
domain_devices = duplicate_first_n (&devices[nd], count);
r = df_on_handle (domains[i].name, domains[i].uuid, domain_devices, nd);
nd += count;
free (domain_devices);
/* Something broke in df_on_handle. Give up on the remaining
* devices for this handle, but keep going on the next handle.
*/
if (r == -1) {
(*errors_r)++;
break;
}
}
/* Reset the handle. */
reset_guestfs_handle ();
}
static size_t
add_disks_to_handle_reverse (struct disk *disk, size_t *errors_r)
{
size_t nr_disks_added;
if (disk == NULL)
return 0;
nr_disks_added = add_disks_to_handle_reverse (disk->next, errors_r);
struct guestfs_add_drive_opts_argv optargs = { .bitmask = 0 };
optargs.bitmask |= GUESTFS_ADD_DRIVE_OPTS_READONLY_BITMASK;
optargs.readonly = 1;
if (disk->format) {
optargs.bitmask |= GUESTFS_ADD_DRIVE_OPTS_FORMAT_BITMASK;
optargs.format = disk->format;
}
if (guestfs_add_drive_opts_argv (g, disk->filename, &optargs) == -1) {
(*errors_r)++;
disk->failed = 1;
return nr_disks_added;
}
return nr_disks_added+1;
}
/* Close and reopen the libguestfs handle. */
static void
reset_guestfs_handle (void)
{
/* Copy the settings from the old handle. */
int verbose = guestfs_get_verbose (g);
int trace = guestfs_get_trace (g);
guestfs_close (g);
g = guestfs_create ();
if (g == NULL) {
fprintf (stderr, _("guestfs_create: failed to create handle\n"));
exit (EXIT_FAILURE);
}
guestfs_set_verbose (g, verbose);
guestfs_set_trace (g, trace);
}
static size_t
count_non_failed_disks (struct disk *disk)
{
if (disk == NULL)
return 0;
else if (disk->failed)
return count_non_failed_disks (disk->next);
else
return 1 + count_non_failed_disks (disk->next);
}
static char **
duplicate_first_n (char **strs, size_t n)
{
char **ret;
ret = malloc ((n+1) * sizeof (char *));
if (ret == NULL) {
perror ("malloc");
exit (EXIT_FAILURE);
}
memcpy (ret, strs, n * sizeof (char *));
ret[n] = NULL;
return ret;
}
#endif
#endif /* HAVE_LIBVIRT */

45
df/domains.h Normal file
View File

@@ -0,0 +1,45 @@
/* virt-df & virt-alignment-scan domains code.
* Copyright (C) 2010-2013 Red Hat Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef GUESTFS_DOMAINS_H_
#define GUESTFS_DOMAINS_H_
#if defined(HAVE_LIBVIRT)
/* The list of domains that we build up in get_all_libvirt_guests. */
struct domain {
virDomainPtr dom;
char *name;
char *uuid;
};
extern struct domain *domains;
extern size_t nr_domains;
/* Frees up everything used by 'domains.c'. */
extern void free_domains (void);
/* Read all libguest guests into the global variables 'domains' and
* 'nr_domains'. The guests are ordered by name. This exits on any
* error.
*/
extern void get_all_libvirt_domains (const char *libvirt_uri);
#endif /* HAVE_LIBVIRT */
#endif /* GUESTFS_DOMAINS_H_ */

View File

@@ -38,6 +38,8 @@
#include "guestfs.h"
#include "options.h"
#include "domains.h"
#include "parallel.h"
#include "virt-df.h"
/* These globals are shared with options.c. */
@@ -54,7 +56,6 @@ int inspector = 0;
int csv = 0; /* --csv */
int human = 0; /* --human-readable|-h */
int inodes = 0; /* --inodes */
int one_per_guest = 0; /* --one-per-guest */
int uuid = 0; /* --uuid */
static char *make_display_name (struct drv *drvs);
@@ -82,6 +83,7 @@ usage (int status)
" --help Display brief help\n"
" -i|--inodes Display inodes\n"
" --one-per-guest Separate appliance per guest\n"
" -P nr_threads Use at most nr_threads\n"
" --uuid Add UUIDs to --long output\n"
" -v|--verbose Verbose messages\n"
" -V|--version Display version and exit\n"
@@ -105,7 +107,7 @@ main (int argc, char *argv[])
enum { HELP_OPTION = CHAR_MAX + 1 };
static const char *options = "a:c:d:hivVx";
static const char *options = "a:c:d:hiP:vVx";
static const struct option long_options[] = {
{ "add", 1, 0, 'a' },
{ "connect", 1, 0, 'c' },
@@ -126,6 +128,7 @@ main (int argc, char *argv[])
const char *format = NULL;
int c;
int option_index;
size_t max_threads = 0;
g = guestfs_create ();
if (g == NULL) {
@@ -149,7 +152,7 @@ main (int argc, char *argv[])
} else if (STREQ (long_options[option_index].name, "csv")) {
csv = 1;
} else if (STREQ (long_options[option_index].name, "one-per-guest")) {
one_per_guest = 1;
/* nothing - left for backwards compatibility */
} else if (STREQ (long_options[option_index].name, "uuid")) {
uuid = 1;
} else {
@@ -179,6 +182,13 @@ main (int argc, char *argv[])
inodes = 1;
break;
case 'P':
if (sscanf (optarg, "%zu", &max_threads) != 1) {
fprintf (stderr, _("%s: -P option is not numeric\n"), program_name);
exit (EXIT_FAILURE);
}
break;
case 'v':
OPTION_v;
break;
@@ -252,19 +262,24 @@ main (int argc, char *argv[])
exit (EXIT_FAILURE);
}
/* If the user didn't specify any drives, then we ask libvirt for
* the full list of guests and drives, which we add in batches.
/* virt-df has two modes. If the user didn't specify any drives,
* then we do the df on every libvirt guest. That's the if-clause
* below. If the user specified domains/drives, then we assume they
* belong to a single guest. That's the else-clause below.
*/
if (drvs == NULL) {
#if defined(HAVE_LIBVIRT) && defined(HAVE_LIBXML2)
get_domains_from_libvirt ();
#if defined(HAVE_LIBVIRT)
get_all_libvirt_domains (libvirt_uri);
print_title ();
start_threads (max_threads, g, df_work);
free_domains ();
#else
fprintf (stderr, _("%s: compiled without support for libvirt and/or libxml2.\n"),
fprintf (stderr, _("%s: compiled without support for libvirt.\n"),
program_name);
exit (EXIT_FAILURE);
#endif
}
else {
else { /* Single guest. */
CLEANUP_FREE char *name = NULL;
/* Add domains/drives from the command line (for a single guest). */
@@ -284,7 +299,7 @@ main (int argc, char *argv[])
* guestfs_add_domain so the UUID is not available easily for
* single '-d' command-line options.
*/
(void) df_on_handle (name, NULL, NULL, 0);
(void) df_on_handle (g, name, NULL, stdout);
/* Free up data structures, no longer needed after this point. */
free_drives (drvs);

View File

@@ -41,7 +41,7 @@
#include "options.h"
#include "virt-df.h"
static void write_csv_field (const char *field);
static void write_csv_field (FILE *fp, const char *field);
void
print_title (void)
@@ -76,17 +76,16 @@ print_title (void)
for (i = 0; i < 6; ++i) {
if (i > 0)
putchar (',');
write_csv_field (cols[i]);
write_csv_field (stdout, cols[i]);
}
putchar ('\n');
}
}
static char *adjust_device_offset (const char *device, int offset);
void
print_stat (const char *name, const char *uuid_param,
const char *dev_param, int offset,
print_stat (FILE *fp,
const char *name, const char *uuid_param,
const char *dev_param,
const struct guestfs_statvfs *stat)
{
/* First two columns are always 'name' and 'dev', followed by four
@@ -104,15 +103,10 @@ print_stat (const char *name, const char *uuid_param,
size_t i, len;
char *dev;
/* Make a canonical name, adjusting the device offset if necessary. */
/* Make a canonical name. */
dev = guestfs_canonical_device_name (g, dev_param);
if (!dev)
exit (EXIT_FAILURE);
if (offset >= 0) {
char *p = dev;
dev = adjust_device_offset (p, offset);
free (p);
}
if (!inodes) { /* 1K blocks */
if (!human) {
@@ -172,27 +166,27 @@ print_stat (const char *name, const char *uuid_param,
if (!csv) {
len = strlen (name) + strlen (dev) + 1;
printf ("%s:%s", name, dev);
fprintf (fp, "%s:%s", name, dev);
if (len <= 36) {
for (i = len; i < 36; ++i)
putchar (' ');
fputc (' ', fp);
} else {
printf ("\n ");
fprintf (fp, "\n ");
}
printf ("%10s %10s %10s %5s\n", cols[0], cols[1], cols[2], cols[3]);
fprintf (fp, "%10s %10s %10s %5s\n", cols[0], cols[1], cols[2], cols[3]);
}
else {
write_csv_field (name);
putchar (',');
write_csv_field (dev);
write_csv_field (fp, name);
fputc (',', fp);
write_csv_field (fp, dev);
for (i = 0; i < 4; ++i) {
putchar (',');
write_csv_field (cols[i]);
fputc (',', fp);
write_csv_field (fp, cols[i]);
}
putchar ('\n');
fputc ('\n', fp);
}
free (dev);
@@ -202,7 +196,7 @@ print_stat (const char *name, const char *uuid_param,
* external module.
*/
static void
write_csv_field (const char *field)
write_csv_field (FILE *fp, const char *field)
{
size_t i, len;
int needs_quoting = 0;
@@ -218,87 +212,18 @@ write_csv_field (const char *field)
}
if (!needs_quoting) {
printf ("%s", field);
fprintf (fp, "%s", field);
return;
}
/* Quoting for CSV fields. */
putchar ('"');
fputc ('"', fp);
for (i = 0; i < len; ++i) {
if (field[i] == '"') {
putchar ('"');
putchar ('"');
fputc ('"', fp);
fputc ('"', fp);
} else
putchar (field[i]);
fputc (field[i], fp);
}
putchar ('"');
}
static char *drive_name (int index, char *ret);
static char *
adjust_device_offset (const char *device, int offset)
{
int index;
int part_num;
char *whole_device;
int free_whole_device;
size_t len;
char *ret;
/* Could be a whole disk or a partition. guestfs_device_index will
* only work with the whole disk name.
*/
len = strlen (device);
if (len > 0 && c_isdigit (device[len-1])) {
whole_device = guestfs_part_to_dev (g, device);
if (whole_device == NULL)
exit (EXIT_FAILURE);
free_whole_device = 1;
part_num = guestfs_part_to_partnum (g, device);
if (part_num == -1)
exit (EXIT_FAILURE);
} else {
whole_device = (char *) device;
free_whole_device = 0;
part_num = 0;
}
index = guestfs_device_index (g, whole_device);
if (index == -1)
exit (EXIT_FAILURE);
if (free_whole_device)
free (whole_device);
assert (index >= offset);
index -= offset;
/* Construct the final device name. */
ret = malloc (128);
if (!ret) {
perror ("malloc");
exit (EXIT_FAILURE);
}
strcpy (ret, "/dev/sd");
drive_name (index, &ret[7]);
len = strlen (ret);
if (part_num > 0)
snprintf (&ret[len], 128-len, "%d", part_num);
return ret;
}
/* https://rwmj.wordpress.com/2011/01/09/how-are-linux-drives-named-beyond-drive-26-devsdz/ */
static char *
drive_name (int index, char *ret)
{
if (index >= 26)
ret = drive_name (index/26 - 1, ret);
index %= 26;
*ret++ = 'a' + index;
*ret = '\0';
return ret;
fputc ('"', fp);
}

204
df/parallel.c Normal file
View File

@@ -0,0 +1,204 @@
/* virt-df
* Copyright (C) 2013 Red Hat Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <libintl.h>
#include <error.h>
#include <assert.h>
#include <pthread.h>
#ifdef HAVE_LIBVIRT
#include <libvirt/libvirt.h>
#include <libvirt/virterror.h>
#endif
#include "progname.h"
#include "guestfs.h"
#include "guestfs-internal-frontend.h"
#include "options.h"
#include "domains.h"
#include "estimate-max-threads.h"
#include "parallel.h"
#define DEBUG_PARALLEL 0
#if defined(HAVE_LIBVIRT)
/* Maximum number of threads we would ever run. Note this should not
* be > 20, unless libvirt is modified to increase the maximum number
* of clients.
*/
#define MAX_THREADS 12
/* The worker threads take domains off the 'domains' global list until
* 'next_domain_to_take' is 'nr_threads'.
*
* The worker threads retire domains in numerical order, using the
* 'next_domain_to_retire' number.
*
* 'next_domain_to_take' is protected just by a mutex.
* 'next_domain_to_retire' is protected by a mutex and condition.
*/
static size_t next_domain_to_take = 0;
static pthread_mutex_t take_mutex = PTHREAD_MUTEX_INITIALIZER;
static size_t next_domain_to_retire = 0;
static pthread_mutex_t retire_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t retire_cond = PTHREAD_COND_INITIALIZER;
static void thread_failure (const char *fn, int err) __attribute__((noreturn));
static void *worker_thread (void *arg);
struct thread_data {
guestfs_h *options_handle;
work_fn work;
};
/* Start threads. */
void
start_threads (size_t option_P, guestfs_h *options_handle, work_fn work)
{
struct thread_data thread_data = { .options_handle = options_handle,
.work = work };
size_t i, nr_threads;
int err;
void *status;
if (nr_domains == 0) /* Nothing to do. */
return;
/* If the user selected the -P option, then we use up to that many threads. */
if (option_P > 0)
nr_threads = MIN (nr_domains, option_P);
else
nr_threads = MIN (nr_domains, MIN (MAX_THREADS, estimate_max_threads ()));
pthread_t threads[nr_threads];
/* Start the worker threads. */
for (i = 0; i < nr_threads; ++i) {
err = pthread_create (&threads[i], NULL, worker_thread, &thread_data);
if (err != 0)
error (EXIT_FAILURE, err, "pthread_create [%zu]", i);
}
/* Wait for the threads to exit. */
for (i = 0; i < nr_threads; ++i) {
err = pthread_join (threads[i], &status);
if (err != 0)
error (EXIT_FAILURE, err, "pthread_join [%zu]", i);
}
}
/* Worker thread. */
static void *
worker_thread (void *thread_data_vp)
{
struct thread_data *thread_data = thread_data_vp;
while (1) {
size_t i; /* The current domain we're working on. */
FILE *fp;
CLEANUP_FREE char *output = NULL;
size_t output_len = 0;
guestfs_h *g;
int err;
/* Take the next domain from the list. */
err = pthread_mutex_lock (&take_mutex);
if (err != 0) thread_failure ("pthread_mutex_lock", err);
i = next_domain_to_take++;
err = pthread_mutex_unlock (&take_mutex);
if (err != 0) thread_failure ("pthread_mutex_unlock", err);
if (i >= nr_domains) /* Work finished. */
break;
if (DEBUG_PARALLEL)
printf ("thread taking domain %zu\n", i);
fp = open_memstream (&output, &output_len);
if (fp == NULL) {
perror ("open_memstream");
_exit (EXIT_FAILURE);
}
/* Create a guestfs handle. */
g = guestfs_create ();
if (g == NULL) {
perror ("guestfs_create");
_exit (EXIT_FAILURE);
}
/* Copy some settings from the options guestfs handle. */
if (thread_data->options_handle) {
guestfs_set_trace (g, guestfs_get_trace (thread_data->options_handle));
guestfs_set_verbose (g,
guestfs_get_verbose (thread_data->options_handle));
}
/* Do work. */
thread_data->work (g, i, fp);
fclose (fp);
guestfs_close (g);
/* Retire this domain. We have to retire domains in order, which
* may mean waiting for another thread to finish here.
*/
err = pthread_mutex_lock (&retire_mutex);
if (err != 0) thread_failure ("pthread_mutex_lock", err);
while (next_domain_to_retire != i) {
err = pthread_cond_wait (&retire_cond, &retire_mutex);
if (err != 0) thread_failure ("pthread_cond_wait", err);
}
if (DEBUG_PARALLEL)
printf ("thread retiring domain %zu\n", i);
/* Retire domain. */
printf ("%s", output);
/* Update next_domain_to_retire and tell other threads. */
next_domain_to_retire = i+1;
pthread_cond_broadcast (&retire_cond);
err = pthread_mutex_unlock (&retire_mutex);
if (err != 0) thread_failure ("pthread_mutex_unlock", err);
}
if (DEBUG_PARALLEL)
printf ("thread exiting\n");
return NULL;
}
static void
thread_failure (const char *fn, int err)
{
fprintf (stderr, "%s: %s: %s\n", program_name, fn, strerror (err));
_exit (EXIT_FAILURE);
}
#endif /* HAVE_LIBVIRT */

44
df/parallel.h Normal file
View File

@@ -0,0 +1,44 @@
/* virt-df & virt-alignment-scan parallel appliances code.
* Copyright (C) 2013 Red Hat Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef GUESTFS_PARALLEL_H_
#define GUESTFS_PARALLEL_H_
#if defined(HAVE_LIBVIRT)
#include "domains.h"
/* The work function should do the work (inspecting the domain, etc.)
* on domain index 'i'. However it MUST NOT print out any result
* directly. Instead it prints anything it needs to the supplied
* 'FILE *'.
*/
typedef void (*work_fn) (guestfs_h *g, size_t i, FILE *fp);
/* Run the threads and work through the global list of libvirt
* domains. 'option_P' is whatever the user passed in the '-P'
* option, or 0 if the user didn't use the '-P' option (in which case
* the number of threads is chosen heuristically. 'options_handle'
* (which may be NULL) is the global guestfs handle created by the
* options mini-library.
*/
extern void start_threads (size_t option_P, guestfs_h *options_handle, work_fn work);
#endif /* HAVE_LIBVIRT */
#endif /* GUESTFS_PARALLEL_H_ */

View File

@@ -1,5 +1,5 @@
/* virt-df
* Copyright (C) 2010 Red Hat Inc.
* Copyright (C) 2010-2013 Red Hat Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -16,27 +16,22 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef GUESTFS_VIRT_DF_
#define GUESTFS_VIRT_DF_
#ifndef GUESTFS_VIRT_DF_H_
#define GUESTFS_VIRT_DF_H_
extern guestfs_h *g;
extern const char *libvirt_uri; /* --connect */
extern int csv; /* --csv */
extern int human; /* --human-readable|-h */
extern int inodes; /* --inodes */
extern int one_per_guest; /* --one-per-guest */
extern int uuid; /* --uuid */
/* df.c */
extern int df_on_handle (const char *name, const char *uuid, char **devices, int offset);
/* domains.c */
#if defined(HAVE_LIBVIRT) && defined(HAVE_LIBXML2)
extern void get_domains_from_libvirt (void);
extern int df_on_handle (guestfs_h *g, const char *name, const char *uuid, FILE *fp);
#if defined(HAVE_LIBVIRT)
extern void df_work (guestfs_h *g, size_t i, FILE *fp);
#endif
/* output.c */
extern void print_title (void);
extern void print_stat (const char *name, const char *uuid, const char *dev, int offset, const struct guestfs_statvfs *stat);
extern void print_stat (FILE *fp, const char *name, const char *uuid, const char *dev, const struct guestfs_statvfs *stat);
#endif /* GUESTFS_VIRT_DF_ */
#endif /* GUESTFS_VIRT_DF_H_ */

View File

@@ -152,27 +152,19 @@ Print inodes instead of blocks.
=item B<--one-per-guest>
Run one libguestfs appliance per guest. Normally C<virt-df> will
add the disks from several guests to a single libguestfs appliance.
Since libguestfs 1.22, this is the default. This option does nothing
and is left here for backwards compatibility with older scripts.
You might use this option in the following circumstances:
=item B<-P> nr_threads
=over 4
Since libguestfs 1.22, virt-df is multithreaded and examines guests in
parallel. By default the number of threads to use is chosen based on
the amount of free memory available at the time that virt-df is
started. You can force virt-df to use at most C<nr_threads> by using
the I<-P> option.
=item *
If you think an untrusted guest might actively try to exploit the
libguestfs appliance kernel, then this prevents one guest from
interfering with the stats printed for another guest.
=item *
If the kernel has a bug which stops it from accessing a
filesystem in one guest (see for example RHBZ#635373) then
this allows libguestfs to continue and report stats for further
guests.
=back
Note that I<-P 0> means to autodetect, and I<-P 1> means to use a
single thread.
=item B<--uuid>

View File

@@ -102,6 +102,7 @@ df/domains.c
df/estimate-max-threads.c
df/main.c
df/output.c
df/parallel.c
edit/virt-edit.c
erlang/erl-guestfs-proto.c
erlang/erl-guestfs.c