lib: Remove User-Mode Linux

User-Mode Linux was an alternative hypervisor that could run the
appliance, instead of using qemu.  It had many limitations including
lack of network, and UML support in Linux has been semi-broken for a
long time.  It was also slower than KVM on baremeal in general and had
various corner cases which were much slower including the emulated
serial port which made bulk uploads and downloads painful.  Also of
course it lacked qemu-specific features like qcow2 or any
network-backed disk, so many disk images could not be opened this way.

This was never supported in RHEL.

See-also: https://bugzilla.redhat.com/1144197
This commit is contained in:
Richard W.M. Jones
2014-09-19 13:38:20 +01:00
parent dbc2fd8dc8
commit b9b0a90487
28 changed files with 4 additions and 850 deletions

View File

@@ -302,8 +302,6 @@ check-all:
check-valgrind \
check-direct \
check-valgrind-direct \
check-uml \
check-valgrind-uml \
check-with-upstream-qemu \
check-with-upstream-libvirt \
check-slow
@@ -341,24 +339,6 @@ check-valgrind-direct:
check-valgrind-with-appliance: check-valgrind-direct
# Tests which currently fail under UML:
# - blockdev --setro seems to have no effect on /dev/ubd* devices [*]
# - RHBZ#914931: test is sent a SIGTERM, apparently by UML [*]
# - tests/md/test-inspect-fstab-md.sh hangs at various places during the
# test, eg. running mdadm, mounting MD filesystem [*]
# [*] = likely to be a bug in UML itself
SKIP_TESTS_FAILING_IN_UML = \
SKIP_TEST_BLOCKDEV_GETRO=1 \
SKIP_TEST_BLOCKDEV_SETRO=1 \
SKIP_TEST_RHBZ914931=1 \
SKIP_TEST_INSPECT_FSTAB_MD_SH=1
check-uml:
$(MAKE) LIBGUESTFS_BACKEND=uml $(SKIP_TESTS_FAILING_IN_UML) check
check-valgrind-uml:
$(MAKE) LIBGUESTFS_BACKEND=uml $(SKIP_TESTS_FAILING_IN_UML) check-valgrind
QEMUDIR = $(HOME)/d/qemu
QEMUBINARY = $(QEMUDIR)/x86_64-softmmu/qemu-system-x86_64
@@ -499,8 +479,6 @@ help:
@echo "make check-valgrind Run a subset of the tests under valgrind."
@echo "make check-direct Test using direct backend."
@echo "make check-valgrind-direct Test valgrind + direct backend."
@echo "make check-uml Test using User-Mode Linux."
@echo "make check-valgrind-uml Test valgrind + User-Mode Linux."
@echo "make check-with-upstream-qemu Test using upstream qemu."
@echo "make check-with-upstream-libvirt Test using upstream libvirt."
@echo "make check-slow Slow/long-running tests."

5
TODO
View File

@@ -532,11 +532,6 @@ virt-builder
- /etc/resolv.conf handling works but is best described as a hack:
https://github.com/libguestfs/libguestfs/commit/9521422ce60578f7196cc8b7977d998159238c19
- let's make UML work
+ SLIRP is insecure, but we could allow just a bare web proxy which
gets proxied over virtio-serial to the outside world (except
virtio-serial can't be multiplexed)
- sometimes (not always) aug_init takes ages, why?
Midnight Commander (mc) extension

View File

@@ -313,7 +313,6 @@ lib/inspect-osinfo.c
lib/journal.c
lib/launch-direct.c
lib/launch-libvirt.c
lib/launch-uml.c
lib/launch.c
lib/libvirt-auth.c
lib/libvirt-domain.c

View File

@@ -271,10 +271,6 @@ Optional. Used only for testing.
Optional. qemu-nbd is used for testing.
=item uml_mkcow
Optional. For the L<UML backend|guestfs(3)/BACKEND>.
=item curl
Optional. Used by virt-builder for downloads.

View File

@@ -649,21 +649,6 @@ using C<./configure --with-default-backend=...>
Run a subset of the test suite under valgrind using the
default appliance back-end.
=item C<make check-uml>
Runs all tests using the User-Mode Linux backend.
As there is no standard location for the User-Mode Linux kernel, you
I<have> to set C<LIBGUESTFS_HV> to point to the kernel image, eg:
make check-uml LIBGUESTFS_HV=~/d/linux-um/vmlinux
=item C<make check-valgrind-uml>
Runs all tests using the User-Mode Linux backend, under valgrind.
As above, you have to set C<LIBGUESTFS_HV> to point to the kernel.
=item C<make check-with-upstream-qemu>
Runs all tests using a local qemu binary. It looks for the qemu

View File

@@ -365,43 +365,6 @@ L<http://rwmj.wordpress.com/2013/02/25/multiple-libguestfs-appliances-in-paralle
printf ("%d %.2f\n", $nr_threads, $end_t - $start_t);
}
=head1 USING USER-MODE LINUX
Since libguestfs 1.24, it has been possible to use the User-Mode Linux
(uml) backend instead of KVM
(see L<guestfs(3)/USER-MODE LINUX BACKEND>). This section makes some
general remarks about this backend, but it is B<highly advisable> to
measure your own workload under UML rather than trusting comments or
intuition.
=over 4
=item *
UML usually performs the same or slightly slower than KVM, on baremetal.
=item *
However UML often performs the same under virtualization as it does on
baremetal, whereas KVM can run much slower under virtualization (since
hardware virt acceleration is not available).
=item *
Upload and download is as much as 10 times slower on UML than KVM.
Libguestfs sends this data over the UML emulated serial port, which is
far less efficient than KVMs virtio-serial.
=item *
UML lacks some features (eg. qcow2 support), so it may not be
applicable at all.
=back
For some actual figures, see:
L<http://rwmj.wordpress.com/2013/08/14/performance-of-user-mode-linux-as-a-libguestfs-backend/#content>
=head1 TROUBLESHOOTING POOR PERFORMANCE
=head2 Ensure hardware virtualization is available

View File

@@ -25,8 +25,6 @@ $TEST_FUNCTIONS
skip_if_skipped "test-fuse.sh"
skip_if_skipped
skip_unless_phony_guest fedora.img
# UML backend does not support qcow2.
skip_if_backend uml
skip_unless_fuse
rm -f test.qcow2 test-copy.qcow2 test.pid

View File

@@ -32,8 +32,7 @@ let non_daemon_functions = [
longdesc = "\
Set the hypervisor binary that we will use. The hypervisor
depends on the backend, but is usually the location of the
qemu/KVM hypervisor. For the uml backend, it is the location
of the C<linux> or C<vmlinux> binary.
qemu/KVM hypervisor.
The default is chosen when the library was compiled by the
configure script.

View File

@@ -97,7 +97,6 @@ libguestfs_la_SOURCES = \
launch.c \
launch-direct.c \
launch-libvirt.c \
launch-uml.c \
libvirt-auth.c \
libvirt-domain.c \
lpj.c \

View File

@@ -281,21 +281,7 @@ build_supermin_appliance (guestfs_h *g,
/* Touch the files so they don't get deleted (as they are in /var/tmp). */
(void) utimes (appliance->kernel, NULL);
(void) utimes (appliance->initrd, NULL);
/* Checking backend != "uml" is a big hack. UML encodes the mtime
* of the original backing file (in this case, the appliance) in the
* COW file, and checks it when adding it to the VM. If there are
* multiple threads running and one touches the appliance here, it
* will disturb the mtime and UML will give an error.
*
* We can get rid of this hack as soon as UML fixes the
* ubdN=cow,original parsing bug, since we won't need to run
* uml_mkcow separately, so there is no possible race.
*
* XXX
*/
if (STRNEQ (g->backend, "uml"))
(void) utimes (appliance->image, NULL);
(void) utimes (appliance->image, NULL);
return 0;
}

View File

@@ -292,7 +292,7 @@ struct drive {
* it is non-NULL, else consult the original source above.
*
* Note that the overlay is in a backend-specific format, probably
* different from the source format. eg. qcow2, UML COW.
* different from the source format. eg. qcow2
*/
char *overlay;
@@ -812,7 +812,6 @@ void guestfs_int_init_direct_backend (void) __attribute__((constructor));
#ifdef HAVE_LIBVIRT_BACKEND
void guestfs_int_init_libvirt_backend (void) __attribute__((constructor));
#endif
void guestfs_int_init_uml_backend (void) __attribute__((constructor));
/* qemu.c */
struct qemu_data;

View File

@@ -1481,17 +1481,6 @@ URI would be C<libvirt:qemu:///session>
The libvirt backend supports more features, including
hotplugging (see L</HOTPLUGGING>) and sVirt.
=item C<uml>
Run the User-Mode Linux kernel. The location of the kernel is set
using C<$LIBGUESTFS_HV> or using the L</guestfs_set_qemu> API (note
that qemu is not involved, we just reuse the same variable in the
handle for convenience).
User-Mode Linux can be much faster, simpler and more lightweight than
using a full-blown virtual machine, but it also has some shortcomings.
See L</USER-MODE LINUX BACKEND> below.
=back
C<direct> is usually the default backend. However since libguestfs
@@ -1553,84 +1542,6 @@ On Fedora, install C<kernel-debuginfo> for the C<vmlinux> file
(containing symbols). Make sure the symbols precisely match the
kernel being used.
=head2 USER-MODE LINUX BACKEND
Setting the following environment variables (or the equivalent in the
API) selects the User-Mode Linux backend:
export LIBGUESTFS_BACKEND=uml
export LIBGUESTFS_HV=/path/to/vmlinux
C<vmlinux> (or it may be called C<linux>) is the Linux binary,
compiled to run as a userspace process. Note that we reuse the qemu
variable in the handle for convenience; qemu is not involved.
User-Mode Linux can be faster and more lightweight than running a
full-blown virtual machine as the backend (especially if you are
already running libguestfs in a virtual machine or cloud instance),
but it also has some shortcomings compared to the usual qemu/KVM-based
backend.
=head3 BUILDING USER-MODE LINUX FROM SOURCE
Your Linux distro may provide UML in which case you can ignore this
section.
These instructions are adapted from:
L<http://user-mode-linux.sourceforge.net/source.html>
=over 4
=item 1. Check out Linux sources
Clone the Linux git repository or download the Linux source tarball.
=item 2. Configure the kernel
B<Note:> All make commands must have C<ARCH=um> added.
make menuconfig ARCH=um
Make sure any filesystem drivers that you need are compiled into the
kernel.
B<Currently, it needs a large amount of extra work to get modules
working>. Its recommended that you disable module support in the
kernel configuration, which will cause everything to be compiled into
the image.
=item 3. Build the kernel
make ARCH=um
This will leave a file called C<linux> or C<vmlinux> in the top-level
directory. This is the UML kernel. You should set C<LIBGUESTFS_HV>
to point to this file.
=back
=head3 USER-MODE LINUX DIFFERENCES FROM KVM
=over 4
=item UML only supports raw-format images
Only plain raw-format images will work. No qcow2, no backing files.
=item UML does not support any remote drives
No NBD, etc.
=item UML only works on ix86 and x86-64
=item UML is experimental
In particular, support for UML in libguestfs depends on support for
UML in the upstream kernel. If UML was ever removed from the upstream
Linux kernel, then we might remove it from libguestfs too.
=back
=head2 ABI GUARANTEE
We guarantee the libguestfs ABI (binary interface), for public,

View File

@@ -1,607 +0,0 @@
/* libguestfs
* Copyright (C) 2009-2020 Red Hat Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <inttypes.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/signal.h>
#include <libintl.h>
#include "cloexec.h"
#include "guestfs.h"
#include "guestfs-internal.h"
#include "guestfs_protocol.h"
/* Per-handle data. */
struct backend_uml_data {
pid_t pid; /* vmlinux PID. */
pid_t recoverypid; /* Recovery process PID. */
#define UML_UMID_LEN 16
char umid[UML_UMID_LEN+1]; /* umid=<...> unique ID. */
};
static void print_vmlinux_command_line (guestfs_h *g, char **argv);
/* Run uml_mkcow to create a COW overlay. */
static char *
make_cow_overlay (guestfs_h *g, const char *original)
{
CLEANUP_CMD_CLOSE struct command *cmd = guestfs_int_new_command (g);
char *overlay;
int r;
overlay = guestfs_int_make_temp_path (g, "overlay", "qcow2");
if (!overlay)
return NULL;
guestfs_int_cmd_add_arg (cmd, "uml_mkcow");
guestfs_int_cmd_add_arg (cmd, overlay);
guestfs_int_cmd_add_arg (cmd, original);
r = guestfs_int_cmd_run (cmd);
if (r == -1) {
free (overlay);
return NULL;
}
if (!WIFEXITED (r) || WEXITSTATUS (r) != 0) {
guestfs_int_external_command_failed (g, r, "uml_mkcow", original);
free (overlay);
return NULL;
}
return overlay;
}
static char *
create_cow_overlay_uml (guestfs_h *g, void *datav, struct drive *drv)
{
return make_cow_overlay (g, drv->src.u.path);
}
/* Test for features which are not supported by the UML backend.
* Possibly some of these should just be warnings, not errors.
*/
static bool
uml_supported (guestfs_h *g)
{
size_t i;
struct drive *drv;
if (g->enable_network) {
error (g, _("uml backend does not support networking"));
return false;
}
if (g->smp > 1) {
error (g, _("uml backend does not support SMP"));
return false;
}
ITER_DRIVES (g, i, drv) {
if (drv->src.protocol != drive_protocol_file) {
error (g, _("uml backend does not support remote drives"));
return false;
}
if (drv->src.format && STRNEQ (drv->src.format, "raw")) {
error (g, _("uml backend does not support non-raw-format drives"));
return false;
}
if (drv->iface) {
error (g,
_("uml backend does not support drives with iface parameter"));
return false;
}
if (drv->disk_label) {
error (g,
_("uml backend does not support drives with label parameter"));
return false;
}
/* Note that discard == "besteffort" is fine. */
if (drv->discard == discard_enable) {
error (g,
_("uml backend does not support drives with discard parameter set to enable"));
return false;
}
if (drv->blocksize) {
error (g,
_("uml backend does not support drives with blocksize parameter"));
return false;
}
}
return true;
}
static int
launch_uml (guestfs_h *g, void *datav, const char *arg)
{
struct backend_uml_data *data = datav;
CLEANUP_FREE_STRINGSBUF DECLARE_STRINGSBUF (cmdline);
int console_sock = -1, daemon_sock = -1;
int r;
int csv[2], dsv[2];
CLEANUP_FREE char *kernel = NULL, *initrd = NULL, *appliance = NULL;
int has_appliance_drive;
CLEANUP_FREE char *appliance_cow = NULL;
uint32_t size;
CLEANUP_FREE void *buf = NULL;
struct drive *drv;
size_t i;
struct hv_param *hp;
char *term = getenv ("TERM");
if (!uml_supported (g))
return -1;
if (!g->nr_drives) {
error (g, _("you must call guestfs_add_drive before guestfs_launch"));
return -1;
}
/* Assign a random unique ID to this run. */
if (guestfs_int_random_string (data->umid, UML_UMID_LEN) == -1) {
perrorf (g, "guestfs_int_random_string");
return -1;
}
/* Locate and/or build the appliance. */
if (guestfs_int_build_appliance (g, &kernel, &initrd, &appliance) == -1)
return -1;
has_appliance_drive = appliance != NULL;
/* Create COW overlays for the appliance. Note that the documented
* syntax ubd0=cow,orig does not work since kernel 3.3. See:
* http://thread.gmane.org/gmane.linux.uml.devel/13556
*/
if (has_appliance_drive) {
appliance_cow = make_cow_overlay (g, appliance);
if (!appliance_cow)
goto cleanup0;
}
/* The socket that the daemon will talk to us on.
*/
if (socketpair (AF_LOCAL, SOCK_STREAM|SOCK_CLOEXEC, 0, dsv) == -1) {
perrorf (g, "socketpair");
goto cleanup0;
}
/* The console socket. */
if (!g->direct_mode) {
if (socketpair (AF_LOCAL, SOCK_STREAM|SOCK_CLOEXEC, 0, csv) == -1) {
perrorf (g, "socketpair");
close (dsv[0]);
close (dsv[1]);
goto cleanup0;
}
}
/* Construct the vmlinux command line. We have to do this before
* forking, because after fork we are not allowed to use
* non-signal-safe functions such as malloc.
*/
#define ADD_CMDLINE(str) \
guestfs_int_add_string (g, &cmdline, (str))
#define ADD_CMDLINE_PRINTF(fs,...) \
guestfs_int_add_sprintf (g, &cmdline, (fs), ##__VA_ARGS__)
ADD_CMDLINE (g->hv);
/* Give this instance a unique random ID. */
ADD_CMDLINE_PRINTF ("umid=%s", data->umid);
/* Set memory size. */
ADD_CMDLINE_PRINTF ("mem=%dM", g->memsize);
/* vmlinux appears to ignore this, but let's add it anyway. */
ADD_CMDLINE_PRINTF ("initrd=%s", initrd);
/* Make sure our appliance init script runs first. */
ADD_CMDLINE ("init=/init");
/* This tells the /init script not to reboot at the end. */
ADD_CMDLINE ("guestfs_noreboot=1");
/* Root filesystem should be mounted read-write (default seems to
* be "ro").
*/
ADD_CMDLINE ("rw");
/* See also guestfs_int_appliance_command_line. */
if (g->verbose)
ADD_CMDLINE ("guestfs_verbose=1");
ADD_CMDLINE ("panic=1");
ADD_CMDLINE_PRINTF ("TERM=%s", term ? term : "linux");
if (g->selinux)
ADD_CMDLINE ("selinux=1 enforcing=0");
else
ADD_CMDLINE ("selinux=0");
/* XXX This isn't quite right. Multiple append args won't work. */
if (g->append)
ADD_CMDLINE (g->append);
/* Add the drives. */
ITER_DRIVES (g, i, drv) {
if (!drv->overlay)
ADD_CMDLINE_PRINTF ("ubd%zu=%s", i, drv->src.u.path);
else
ADD_CMDLINE_PRINTF ("ubd%zu=%s", i, drv->overlay);
}
/* Add the ext2 appliance drive (after all the drives). */
if (has_appliance_drive) {
char drv_name[64] = "ubd";
guestfs_int_drive_name (g->nr_drives, &drv_name[3]);
ADD_CMDLINE_PRINTF ("ubd%zu=%s", g->nr_drives, appliance_cow);
ADD_CMDLINE_PRINTF ("root=/dev/%s", drv_name);
}
/* Create the daemon socket. */
ADD_CMDLINE_PRINTF ("ssl3=fd:%d", dsv[1]);
ADD_CMDLINE ("guestfs_channel=/dev/ttyS3");
/* Add any vmlinux parameters. */
for (hp = g->hv_params; hp; hp = hp->next) {
ADD_CMDLINE (hp->hv_param);
if (hp->hv_value)
ADD_CMDLINE (hp->hv_value);
}
/* Finish off the command line. */
guestfs_int_end_stringsbuf (g, &cmdline);
r = fork ();
if (r == -1) {
perrorf (g, "fork");
if (!g->direct_mode) {
close (csv[0]);
close (csv[1]);
}
close (dsv[0]);
close (dsv[1]);
goto cleanup0;
}
if (r == 0) { /* Child (vmlinux). */
/* Set up the daemon socket for the child. */
close (dsv[0]);
set_cloexec_flag (dsv[1], 0); /* so it doesn't close across exec */
if (!g->direct_mode) {
/* Set up stdin, stdout, stderr. */
close (0);
close (1);
close (csv[0]);
/* We set the FD_CLOEXEC flag on the socket above, but now (in
* the child) it's safe to unset this flag so vmlinux can use the
* socket.
*/
set_cloexec_flag (csv[1], 0);
/* Stdin. */
if (dup (csv[1]) == -1) {
dup_failed:
perror ("dup failed");
_exit (EXIT_FAILURE);
}
/* Stdout. */
if (dup (csv[1]) == -1)
goto dup_failed;
/* Send stderr to the pipe as well. */
close (2);
if (dup (csv[1]) == -1)
goto dup_failed;
close (csv[1]);
/* RHBZ#1123007 */
close_file_descriptors (fd > 2 && fd != dsv[1]);
}
/* RHBZ#1460338. */
guestfs_int_unblock_sigterm ();
/* Dump the command line (after setting up stderr above). */
if (g->verbose)
print_vmlinux_command_line (g, cmdline.argv);
/* Put vmlinux in a new process group. */
if (g->pgroup)
setpgid (0, 0);
setenv ("LC_ALL", "C", 1);
execv (g->hv, cmdline.argv); /* Run vmlinux. */
perror (g->hv);
_exit (EXIT_FAILURE);
}
/* Parent (library). */
data->pid = r;
/* Fork the recovery process off which will kill vmlinux if the
* parent process fails to do so (eg. if the parent segfaults).
*/
data->recoverypid = -1;
if (g->recovery_proc) {
r = fork ();
if (r == 0) {
struct sigaction sa;
pid_t vmlinux_pid = data->pid;
pid_t parent_pid = getppid ();
/* Remove all signal handlers. See the justification here:
* https://www.redhat.com/archives/libvir-list/2008-August/msg00303.html
* We don't mask signal handlers yet, so this isn't completely
* race-free, but better than not doing it at all.
*/
memset (&sa, 0, sizeof sa);
sa.sa_handler = SIG_DFL;
sa.sa_flags = 0;
sigemptyset (&sa.sa_mask);
for (i = 1; i < NSIG; ++i)
sigaction (i, &sa, NULL);
/* Close all other file descriptors. This ensures that we don't
* hold open (eg) pipes from the parent process.
*/
close_file_descriptors (1);
/* RHBZ#1460338 */
guestfs_int_unblock_sigterm ();
/* It would be nice to be able to put this in the same process
* group as vmlinux (ie. setpgid (0, vmlinux_pid)). However
* this is not possible because we don't have any guarantee here
* that the vmlinux process has started yet.
*/
if (g->pgroup)
setpgid (0, 0);
/* Writing to argv is hideously complicated and error prone. See:
* http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/utils/misc/ps_status.c;hb=HEAD
*/
/* Loop around waiting for one or both of the other processes to
* disappear. It's fair to say this is very hairy. The PIDs that
* we are looking at might be reused by another process. We are
* effectively polling. Is the cure worse than the disease?
*/
for (;;) {
if (kill (vmlinux_pid, 0) == -1)
/* vmlinux's gone away, we aren't needed */
_exit (EXIT_SUCCESS);
if (kill (parent_pid, 0) == -1) {
/* Parent's gone away, vmlinux still around, so kill vmlinux. */
kill (data->pid, SIGKILL);
_exit (EXIT_SUCCESS);
}
sleep (2);
}
}
/* Don't worry, if the fork failed, this will be -1. The recovery
* process isn't essential.
*/
data->recoverypid = r;
}
if (!g->direct_mode) {
/* Close the other end of the console socketpair. */
close (csv[1]);
console_sock = csv[0]; /* stdin of child */
csv[0] = -1;
}
daemon_sock = dsv[0];
close (dsv[1]);
dsv[0] = -1;
g->state = LAUNCHING;
/* Wait for vmlinux to start and to connect back to us via
* virtio-serial and send the GUESTFS_LAUNCH_FLAG message.
*/
g->conn =
guestfs_int_new_conn_socket_connected (g, daemon_sock, console_sock);
if (!g->conn)
goto cleanup1;
/* g->conn now owns these sockets. */
daemon_sock = console_sock = -1;
/* We now have to wait for vmlinux to start up, the daemon to start
* running, and for it to send the GUESTFS_LAUNCH_FLAG to us.
*/
r = guestfs_int_recv_from_daemon (g, &size, &buf);
if (r == -1) {
guestfs_int_launch_failed_error (g);
goto cleanup1;
}
if (size != GUESTFS_LAUNCH_FLAG) {
guestfs_int_launch_failed_error (g);
goto cleanup1;
}
debug (g, "appliance is up");
/* This is possible in some really strange situations, such as
* guestfsd starts up OK but then vmlinux immediately exits. Check
* for it because the caller is probably expecting to be able to
* send commands after this function returns.
*/
if (g->state != READY) {
error (g, _("vmlinux launched and contacted daemon, but state != READY"));
goto cleanup1;
}
if (has_appliance_drive)
guestfs_int_add_dummy_appliance_drive (g);
return 0;
cleanup1:
if (!g->direct_mode && csv[0] >= 0)
close (csv[0]);
if (dsv[0] >= 0)
close (dsv[0]);
if (data->pid > 0) kill (data->pid, SIGKILL);
if (data->recoverypid > 0) kill (data->recoverypid, SIGKILL);
if (data->pid > 0) guestfs_int_waitpid_noerror (data->pid);
if (data->recoverypid > 0) guestfs_int_waitpid_noerror (data->recoverypid);
data->pid = 0;
data->recoverypid = 0;
memset (&g->launch_t, 0, sizeof g->launch_t);
cleanup0:
if (daemon_sock >= 0)
close (daemon_sock);
if (console_sock >= 0)
close (console_sock);
if (g->conn) {
g->conn->ops->free_connection (g, g->conn);
g->conn = NULL;
}
g->state = CONFIG;
return -1;
}
/* This is called from the forked subprocess just before vmlinux runs,
* so it can just print the message straight to stderr, where it will
* be picked up and funnelled through the usual appliance event API.
*/
static void
print_vmlinux_command_line (guestfs_h *g, char **argv)
{
size_t i = 0;
int needs_quote;
struct timeval tv;
gettimeofday (&tv, NULL);
fprintf (stderr, "[%05" PRIi64 "ms] ",
guestfs_int_timeval_diff (&g->launch_t, &tv));
while (argv[i]) {
if (i > 0) fputc (' ', stderr);
/* Does it need shell quoting? This only deals with simple cases. */
needs_quote = strcspn (argv[i], " ") != strlen (argv[i]);
if (needs_quote) fputc ('\'', stderr);
fprintf (stderr, "%s", argv[i]);
if (needs_quote) fputc ('\'', stderr);
i++;
}
fputc ('\n', stderr);
}
static int
shutdown_uml (guestfs_h *g, void *datav, int check_for_errors)
{
struct backend_uml_data *data = datav;
int ret = 0;
int status;
/* Signal vmlinux to shutdown cleanly, and kill the recovery process. */
if (data->pid > 0) {
debug (g, "sending SIGTERM to process %d", data->pid);
kill (data->pid, SIGTERM);
}
if (data->recoverypid > 0) kill (data->recoverypid, 9);
/* Wait for subprocess(es) to exit. */
if (data->pid > 0) {
if (guestfs_int_waitpid (g, data->pid, &status, "vmlinux") == -1)
ret = -1;
/* Note it's normal for the pre-3.11 vmlinux process to exit with
* status "killed by signal 15" (where 15 == SIGTERM). Post 3.11
* the exit status can normally be 1.
*
* So don't consider those to be an error.
*/
else if (!(WIFSIGNALED (status) && WTERMSIG (status) == SIGTERM) &&
!(WIFEXITED (status) && WEXITSTATUS (status) == 0) &&
!(WIFEXITED (status) && WEXITSTATUS (status) == 1)) {
guestfs_int_external_command_failed (g, status, g->hv, NULL);
ret = -1;
}
}
if (data->recoverypid > 0) guestfs_int_waitpid_noerror (data->recoverypid);
data->pid = data->recoverypid = 0;
return ret;
}
static int
get_pid_uml (guestfs_h *g, void *datav)
{
struct backend_uml_data *data = datav;
if (data->pid > 0)
return data->pid;
else {
error (g, "get_pid: no vmlinux subprocess");
return -1;
}
}
/* UML appears to use a single major, and puts ubda at minor 0 with
* each partition at minors 1-15, ubdb at minor 16, etc. So the
* maximum is 256/16 = 16. However one disk is used by the appliance,
* so it's one less than this. I tested both 15 & 16 disks, and found
* that 15 worked and 16 failed.
*/
static int
max_disks_uml (guestfs_h *g, void *datav)
{
return 15;
}
static struct backend_ops backend_uml_ops = {
.data_size = sizeof (struct backend_uml_data),
.create_cow_overlay = create_cow_overlay_uml,
.launch = launch_uml,
.shutdown = shutdown_uml,
.get_pid = get_pid_uml,
.max_disks = max_disks_uml,
};
void
guestfs_int_init_uml_backend (void)
{
guestfs_int_register_backend ("uml", &backend_uml_ops);
}

View File

@@ -284,14 +284,6 @@ guestfs_impl_config (guestfs_h *g,
{
struct hv_param *hp;
/*
XXX For qemu this made sense, but not for uml.
if (hv_param[0] != '-') {
error (g, _("parameter must begin with '-' character"));
return -1;
}
*/
/* A bit fascist, but the user will probably break the extra
* parameters that we add if they try to set any of these.
*/
@@ -431,5 +423,4 @@ guestfs_int_force_load_backends[] = {
#ifdef HAVE_LIBVIRT_BACKEND
guestfs_int_init_libvirt_backend,
#endif
guestfs_int_init_uml_backend,
};

View File

@@ -73,7 +73,6 @@ main (int argc, char *argv[])
virDomainPtr dom;
virErrorPtr err;
int r;
char *backend;
char cwd[1024];
FILE *fp;
char libvirt_uri[sizeof cwd + 64];
@@ -86,13 +85,6 @@ main (int argc, char *argv[])
if (g == NULL)
error (EXIT_FAILURE, errno, "guestfs_create");
backend = guestfs_get_backend (g);
if (STREQ (backend, "uml")) {
free (backend);
error (77, 0, "test skipped because UML backend does not support qcow2");
}
free (backend);
/* Create the libvirt XML and test images in the current
* directory.
*/

View File

@@ -25,11 +25,6 @@ use Sys::Guestfs;
exit 77 if $ENV{SKIP_TEST_DISK_LABELS_PL};
if (Sys::Guestfs->new()->get_backend() eq "uml") {
print "$0: test skipped because UML backend does not support disk labels\n";
exit 77
}
my $g = Sys::Guestfs->new ();
# Add two drives.

View File

@@ -24,7 +24,6 @@ set -e
$TEST_FUNCTIONS
skip_because "device name hints are broken" ;# XXX Fix before 1.38
skip_if_skipped
skip_if_backend uml
skip_unless_phony_guest fedora.img
canonical="sed -r s,/dev/[abce-ln-z]+d,/dev/sd,g"

View File

@@ -20,7 +20,6 @@ set -e
$TEST_FUNCTIONS
skip_if_skipped
skip_if_backend uml
skip_unless_feature_available btrfs
canonical="sed s,/dev/vd,/dev/sd,g"

View File

@@ -27,11 +27,6 @@ END { kill 15, $pid if $pid > 0 };
exit 77 if $ENV{SKIP_TEST_NBD_PL};
if (Sys::Guestfs->new()->get_backend() eq "uml") {
print "$0: test skipped because UML backend does not support NBD\n";
exit 77
}
# Check we have qemu-nbd.
if (system ("qemu-nbd --help >/dev/null 2>&1") != 0) {
print "$0: test skipped because qemu-nbd program not found\n";

View File

@@ -24,6 +24,5 @@ set -e
$TEST_FUNCTIONS
skip_if_skipped
skip_if_backend uml
guestfish --network -a /dev/null run

View File

@@ -20,8 +20,6 @@
$TEST_FUNCTIONS
skip_if_skipped
# Only applicable if the backend uses qemu.
skip_if_backend uml
set -e

View File

@@ -24,8 +24,6 @@ set -e
$TEST_FUNCTIONS
skip_if_skipped
# UML backend doesn't support qcow2 format.
skip_if_backend uml
f=isolation-qcow2.img
rm -f $f

View File

@@ -24,7 +24,6 @@ set -e
$TEST_FUNCTIONS
skip_if_skipped
skip_if_backend uml
guestfish <<EOF
-add-domain rhbz1370424 \

View File

@@ -31,8 +31,6 @@ skip_if_arch ppc64
skip_if_arch ppc64le
skip_if_arch s390x
skip_if_backend libvirt
# UML doesn't support the 'iface' parameter.
skip_if_backend uml
rm -f rhbz690819.img

View File

@@ -31,8 +31,6 @@ skip_if_arch ppc64
skip_if_arch ppc64le
skip_if_arch s390x
skip_if_backend libvirt
# UML doesn't support the 'iface' parameter.
skip_if_backend uml
rm -f rhbz975797-*.img

View File

@@ -21,10 +21,6 @@ set -e
$TEST_FUNCTIONS
skip_if_skipped
# UML doesn't support qcow2. Conceivably there might be a similar
# problem with UML COW images which would require a separate test.
skip_if_backend uml
rm -f backing*
rm -f overlay*
rm -f link*

View File

@@ -44,10 +44,6 @@ case "$backend" in
echo "$0: skipping test because host firewall will probably prevent this test from working"
exit 77
;;
uml)
echo "$0: skipping test because networking is not available in the UML backend"
exit 77
;;
*)
echo "$0: don't know how to get IP address of backend $backend"
exit 77

View File

@@ -54,7 +54,7 @@ skip_if_skipped ()
}
# Skip if the current libguestfs backend is $1.
# eg. skip_if_backend uml
# eg. skip_if_backend libvirt
skip_if_backend ()
{
local b="$(guestfish get-backend)"